ata: add AMD Seattle platform driver
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / sge.c
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #ifdef CONFIG_NET_RX_BUSY_POLL
47 #include <net/busy_poll.h>
48 #endif /* CONFIG_NET_RX_BUSY_POLL */
49 #ifdef CONFIG_CHELSIO_T4_FCOE
50 #include <scsi/fc/fc_fcoe.h>
51 #endif /* CONFIG_CHELSIO_T4_FCOE */
52 #include "cxgb4.h"
53 #include "t4_regs.h"
54 #include "t4_values.h"
55 #include "t4_msg.h"
56 #include "t4fw_api.h"
57
58 /*
59 * Rx buffer size. We use largish buffers if possible but settle for single
60 * pages under memory shortage.
61 */
62 #if PAGE_SHIFT >= 16
63 # define FL_PG_ORDER 0
64 #else
65 # define FL_PG_ORDER (16 - PAGE_SHIFT)
66 #endif
67
68 /* RX_PULL_LEN should be <= RX_COPY_THRES */
69 #define RX_COPY_THRES 256
70 #define RX_PULL_LEN 128
71
72 /*
73 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
74 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
75 */
76 #define RX_PKT_SKB_LEN 512
77
78 /*
79 * Max number of Tx descriptors we clean up at a time. Should be modest as
80 * freeing skbs isn't cheap and it happens while holding locks. We just need
81 * to free packets faster than they arrive, we eventually catch up and keep
82 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
83 */
84 #define MAX_TX_RECLAIM 16
85
86 /*
87 * Max number of Rx buffers we replenish at a time. Again keep this modest,
88 * allocating buffers isn't cheap either.
89 */
90 #define MAX_RX_REFILL 16U
91
92 /*
93 * Period of the Rx queue check timer. This timer is infrequent as it has
94 * something to do only when the system experiences severe memory shortage.
95 */
96 #define RX_QCHECK_PERIOD (HZ / 2)
97
98 /*
99 * Period of the Tx queue check timer.
100 */
101 #define TX_QCHECK_PERIOD (HZ / 2)
102
103 /*
104 * Max number of Tx descriptors to be reclaimed by the Tx timer.
105 */
106 #define MAX_TIMER_TX_RECLAIM 100
107
108 /*
109 * Timer index used when backing off due to memory shortage.
110 */
111 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
112
113 /*
114 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
115 * This is the same as calc_tx_descs() for a TSO packet with
116 * nr_frags == MAX_SKB_FRAGS.
117 */
118 #define ETHTXQ_STOP_THRES \
119 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
120
121 /*
122 * Suspension threshold for non-Ethernet Tx queues. We require enough room
123 * for a full sized WR.
124 */
125 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
126
127 /*
128 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
129 * into a WR.
130 */
131 #define MAX_IMM_TX_PKT_LEN 256
132
133 /*
134 * Max size of a WR sent through a control Tx queue.
135 */
136 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
137
138 struct tx_sw_desc { /* SW state per Tx descriptor */
139 struct sk_buff *skb;
140 struct ulptx_sgl *sgl;
141 };
142
143 struct rx_sw_desc { /* SW state per Rx descriptor */
144 struct page *page;
145 dma_addr_t dma_addr;
146 };
147
148 /*
149 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
150 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
151 * We could easily support more but there doesn't seem to be much need for
152 * that ...
153 */
154 #define FL_MTU_SMALL 1500
155 #define FL_MTU_LARGE 9000
156
157 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
158 unsigned int mtu)
159 {
160 struct sge *s = &adapter->sge;
161
162 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
163 }
164
165 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
166 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
167
168 /*
169 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
170 * these to specify the buffer size as an index into the SGE Free List Buffer
171 * Size register array. We also use bit 4, when the buffer has been unmapped
172 * for DMA, but this is of course never sent to the hardware and is only used
173 * to prevent double unmappings. All of the above requires that the Free List
174 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
175 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
176 * Free List Buffer alignment is 32 bytes, this works out for us ...
177 */
178 enum {
179 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
180 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
181 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
182
183 /*
184 * XXX We shouldn't depend on being able to use these indices.
185 * XXX Especially when some other Master PF has initialized the
186 * XXX adapter or we use the Firmware Configuration File. We
187 * XXX should really search through the Host Buffer Size register
188 * XXX array for the appropriately sized buffer indices.
189 */
190 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
191 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
192
193 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
194 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
195 };
196
197 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
198 #define MIN_NAPI_WORK 1
199
200 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
201 {
202 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
203 }
204
205 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
206 {
207 return !(d->dma_addr & RX_UNMAPPED_BUF);
208 }
209
210 /**
211 * txq_avail - return the number of available slots in a Tx queue
212 * @q: the Tx queue
213 *
214 * Returns the number of descriptors in a Tx queue available to write new
215 * packets.
216 */
217 static inline unsigned int txq_avail(const struct sge_txq *q)
218 {
219 return q->size - 1 - q->in_use;
220 }
221
222 /**
223 * fl_cap - return the capacity of a free-buffer list
224 * @fl: the FL
225 *
226 * Returns the capacity of a free-buffer list. The capacity is less than
227 * the size because one descriptor needs to be left unpopulated, otherwise
228 * HW will think the FL is empty.
229 */
230 static inline unsigned int fl_cap(const struct sge_fl *fl)
231 {
232 return fl->size - 8; /* 1 descriptor = 8 buffers */
233 }
234
235 /**
236 * fl_starving - return whether a Free List is starving.
237 * @adapter: pointer to the adapter
238 * @fl: the Free List
239 *
240 * Tests specified Free List to see whether the number of buffers
241 * available to the hardware has falled below our "starvation"
242 * threshold.
243 */
244 static inline bool fl_starving(const struct adapter *adapter,
245 const struct sge_fl *fl)
246 {
247 const struct sge *s = &adapter->sge;
248
249 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
250 }
251
252 static int map_skb(struct device *dev, const struct sk_buff *skb,
253 dma_addr_t *addr)
254 {
255 const skb_frag_t *fp, *end;
256 const struct skb_shared_info *si;
257
258 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
259 if (dma_mapping_error(dev, *addr))
260 goto out_err;
261
262 si = skb_shinfo(skb);
263 end = &si->frags[si->nr_frags];
264
265 for (fp = si->frags; fp < end; fp++) {
266 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
267 DMA_TO_DEVICE);
268 if (dma_mapping_error(dev, *addr))
269 goto unwind;
270 }
271 return 0;
272
273 unwind:
274 while (fp-- > si->frags)
275 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
276
277 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
278 out_err:
279 return -ENOMEM;
280 }
281
282 #ifdef CONFIG_NEED_DMA_MAP_STATE
283 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
284 const dma_addr_t *addr)
285 {
286 const skb_frag_t *fp, *end;
287 const struct skb_shared_info *si;
288
289 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
290
291 si = skb_shinfo(skb);
292 end = &si->frags[si->nr_frags];
293 for (fp = si->frags; fp < end; fp++)
294 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
295 }
296
297 /**
298 * deferred_unmap_destructor - unmap a packet when it is freed
299 * @skb: the packet
300 *
301 * This is the packet destructor used for Tx packets that need to remain
302 * mapped until they are freed rather than until their Tx descriptors are
303 * freed.
304 */
305 static void deferred_unmap_destructor(struct sk_buff *skb)
306 {
307 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
308 }
309 #endif
310
311 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
312 const struct ulptx_sgl *sgl, const struct sge_txq *q)
313 {
314 const struct ulptx_sge_pair *p;
315 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
316
317 if (likely(skb_headlen(skb)))
318 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
319 DMA_TO_DEVICE);
320 else {
321 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
322 DMA_TO_DEVICE);
323 nfrags--;
324 }
325
326 /*
327 * the complexity below is because of the possibility of a wrap-around
328 * in the middle of an SGL
329 */
330 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
331 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
332 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
333 ntohl(p->len[0]), DMA_TO_DEVICE);
334 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
335 ntohl(p->len[1]), DMA_TO_DEVICE);
336 p++;
337 } else if ((u8 *)p == (u8 *)q->stat) {
338 p = (const struct ulptx_sge_pair *)q->desc;
339 goto unmap;
340 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
341 const __be64 *addr = (const __be64 *)q->desc;
342
343 dma_unmap_page(dev, be64_to_cpu(addr[0]),
344 ntohl(p->len[0]), DMA_TO_DEVICE);
345 dma_unmap_page(dev, be64_to_cpu(addr[1]),
346 ntohl(p->len[1]), DMA_TO_DEVICE);
347 p = (const struct ulptx_sge_pair *)&addr[2];
348 } else {
349 const __be64 *addr = (const __be64 *)q->desc;
350
351 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
352 ntohl(p->len[0]), DMA_TO_DEVICE);
353 dma_unmap_page(dev, be64_to_cpu(addr[0]),
354 ntohl(p->len[1]), DMA_TO_DEVICE);
355 p = (const struct ulptx_sge_pair *)&addr[1];
356 }
357 }
358 if (nfrags) {
359 __be64 addr;
360
361 if ((u8 *)p == (u8 *)q->stat)
362 p = (const struct ulptx_sge_pair *)q->desc;
363 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
364 *(const __be64 *)q->desc;
365 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
366 DMA_TO_DEVICE);
367 }
368 }
369
370 /**
371 * free_tx_desc - reclaims Tx descriptors and their buffers
372 * @adapter: the adapter
373 * @q: the Tx queue to reclaim descriptors from
374 * @n: the number of descriptors to reclaim
375 * @unmap: whether the buffers should be unmapped for DMA
376 *
377 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
378 * Tx buffers. Called with the Tx queue lock held.
379 */
380 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
381 unsigned int n, bool unmap)
382 {
383 struct tx_sw_desc *d;
384 unsigned int cidx = q->cidx;
385 struct device *dev = adap->pdev_dev;
386
387 d = &q->sdesc[cidx];
388 while (n--) {
389 if (d->skb) { /* an SGL is present */
390 if (unmap)
391 unmap_sgl(dev, d->skb, d->sgl, q);
392 dev_consume_skb_any(d->skb);
393 d->skb = NULL;
394 }
395 ++d;
396 if (++cidx == q->size) {
397 cidx = 0;
398 d = q->sdesc;
399 }
400 }
401 q->cidx = cidx;
402 }
403
404 /*
405 * Return the number of reclaimable descriptors in a Tx queue.
406 */
407 static inline int reclaimable(const struct sge_txq *q)
408 {
409 int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
410 hw_cidx -= q->cidx;
411 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
412 }
413
414 /**
415 * reclaim_completed_tx - reclaims completed Tx descriptors
416 * @adap: the adapter
417 * @q: the Tx queue to reclaim completed descriptors from
418 * @unmap: whether the buffers should be unmapped for DMA
419 *
420 * Reclaims Tx descriptors that the SGE has indicated it has processed,
421 * and frees the associated buffers if possible. Called with the Tx
422 * queue locked.
423 */
424 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
425 bool unmap)
426 {
427 int avail = reclaimable(q);
428
429 if (avail) {
430 /*
431 * Limit the amount of clean up work we do at a time to keep
432 * the Tx lock hold time O(1).
433 */
434 if (avail > MAX_TX_RECLAIM)
435 avail = MAX_TX_RECLAIM;
436
437 free_tx_desc(adap, q, avail, unmap);
438 q->in_use -= avail;
439 }
440 }
441
442 static inline int get_buf_size(struct adapter *adapter,
443 const struct rx_sw_desc *d)
444 {
445 struct sge *s = &adapter->sge;
446 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
447 int buf_size;
448
449 switch (rx_buf_size_idx) {
450 case RX_SMALL_PG_BUF:
451 buf_size = PAGE_SIZE;
452 break;
453
454 case RX_LARGE_PG_BUF:
455 buf_size = PAGE_SIZE << s->fl_pg_order;
456 break;
457
458 case RX_SMALL_MTU_BUF:
459 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
460 break;
461
462 case RX_LARGE_MTU_BUF:
463 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
464 break;
465
466 default:
467 BUG_ON(1);
468 }
469
470 return buf_size;
471 }
472
473 /**
474 * free_rx_bufs - free the Rx buffers on an SGE free list
475 * @adap: the adapter
476 * @q: the SGE free list to free buffers from
477 * @n: how many buffers to free
478 *
479 * Release the next @n buffers on an SGE free-buffer Rx queue. The
480 * buffers must be made inaccessible to HW before calling this function.
481 */
482 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
483 {
484 while (n--) {
485 struct rx_sw_desc *d = &q->sdesc[q->cidx];
486
487 if (is_buf_mapped(d))
488 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
489 get_buf_size(adap, d),
490 PCI_DMA_FROMDEVICE);
491 put_page(d->page);
492 d->page = NULL;
493 if (++q->cidx == q->size)
494 q->cidx = 0;
495 q->avail--;
496 }
497 }
498
499 /**
500 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
501 * @adap: the adapter
502 * @q: the SGE free list
503 *
504 * Unmap the current buffer on an SGE free-buffer Rx queue. The
505 * buffer must be made inaccessible to HW before calling this function.
506 *
507 * This is similar to @free_rx_bufs above but does not free the buffer.
508 * Do note that the FL still loses any further access to the buffer.
509 */
510 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
511 {
512 struct rx_sw_desc *d = &q->sdesc[q->cidx];
513
514 if (is_buf_mapped(d))
515 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
516 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
517 d->page = NULL;
518 if (++q->cidx == q->size)
519 q->cidx = 0;
520 q->avail--;
521 }
522
523 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
524 {
525 if (q->pend_cred >= 8) {
526 u32 val = adap->params.arch.sge_fl_db;
527
528 if (is_t4(adap->params.chip))
529 val |= PIDX_V(q->pend_cred / 8);
530 else
531 val |= PIDX_T5_V(q->pend_cred / 8);
532
533 /* Make sure all memory writes to the Free List queue are
534 * committed before we tell the hardware about them.
535 */
536 wmb();
537
538 /* If we don't have access to the new User Doorbell (T5+), use
539 * the old doorbell mechanism; otherwise use the new BAR2
540 * mechanism.
541 */
542 if (unlikely(q->bar2_addr == NULL)) {
543 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
544 val | QID_V(q->cntxt_id));
545 } else {
546 writel(val | QID_V(q->bar2_qid),
547 q->bar2_addr + SGE_UDB_KDOORBELL);
548
549 /* This Write memory Barrier will force the write to
550 * the User Doorbell area to be flushed.
551 */
552 wmb();
553 }
554 q->pend_cred &= 7;
555 }
556 }
557
558 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
559 dma_addr_t mapping)
560 {
561 sd->page = pg;
562 sd->dma_addr = mapping; /* includes size low bits */
563 }
564
565 /**
566 * refill_fl - refill an SGE Rx buffer ring
567 * @adap: the adapter
568 * @q: the ring to refill
569 * @n: the number of new buffers to allocate
570 * @gfp: the gfp flags for the allocations
571 *
572 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
573 * allocated with the supplied gfp flags. The caller must assure that
574 * @n does not exceed the queue's capacity. If afterwards the queue is
575 * found critically low mark it as starving in the bitmap of starving FLs.
576 *
577 * Returns the number of buffers allocated.
578 */
579 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
580 gfp_t gfp)
581 {
582 struct sge *s = &adap->sge;
583 struct page *pg;
584 dma_addr_t mapping;
585 unsigned int cred = q->avail;
586 __be64 *d = &q->desc[q->pidx];
587 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
588 int node;
589
590 #ifdef CONFIG_DEBUG_FS
591 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
592 goto out;
593 #endif
594
595 gfp |= __GFP_NOWARN;
596 node = dev_to_node(adap->pdev_dev);
597
598 if (s->fl_pg_order == 0)
599 goto alloc_small_pages;
600
601 /*
602 * Prefer large buffers
603 */
604 while (n) {
605 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
606 if (unlikely(!pg)) {
607 q->large_alloc_failed++;
608 break; /* fall back to single pages */
609 }
610
611 mapping = dma_map_page(adap->pdev_dev, pg, 0,
612 PAGE_SIZE << s->fl_pg_order,
613 PCI_DMA_FROMDEVICE);
614 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
615 __free_pages(pg, s->fl_pg_order);
616 q->mapping_err++;
617 goto out; /* do not try small pages for this error */
618 }
619 mapping |= RX_LARGE_PG_BUF;
620 *d++ = cpu_to_be64(mapping);
621
622 set_rx_sw_desc(sd, pg, mapping);
623 sd++;
624
625 q->avail++;
626 if (++q->pidx == q->size) {
627 q->pidx = 0;
628 sd = q->sdesc;
629 d = q->desc;
630 }
631 n--;
632 }
633
634 alloc_small_pages:
635 while (n--) {
636 pg = alloc_pages_node(node, gfp, 0);
637 if (unlikely(!pg)) {
638 q->alloc_failed++;
639 break;
640 }
641
642 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
643 PCI_DMA_FROMDEVICE);
644 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
645 put_page(pg);
646 q->mapping_err++;
647 goto out;
648 }
649 *d++ = cpu_to_be64(mapping);
650
651 set_rx_sw_desc(sd, pg, mapping);
652 sd++;
653
654 q->avail++;
655 if (++q->pidx == q->size) {
656 q->pidx = 0;
657 sd = q->sdesc;
658 d = q->desc;
659 }
660 }
661
662 out: cred = q->avail - cred;
663 q->pend_cred += cred;
664 ring_fl_db(adap, q);
665
666 if (unlikely(fl_starving(adap, q))) {
667 smp_wmb();
668 q->low++;
669 set_bit(q->cntxt_id - adap->sge.egr_start,
670 adap->sge.starving_fl);
671 }
672
673 return cred;
674 }
675
676 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
677 {
678 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
679 GFP_ATOMIC);
680 }
681
682 /**
683 * alloc_ring - allocate resources for an SGE descriptor ring
684 * @dev: the PCI device's core device
685 * @nelem: the number of descriptors
686 * @elem_size: the size of each descriptor
687 * @sw_size: the size of the SW state associated with each ring element
688 * @phys: the physical address of the allocated ring
689 * @metadata: address of the array holding the SW state for the ring
690 * @stat_size: extra space in HW ring for status information
691 * @node: preferred node for memory allocations
692 *
693 * Allocates resources for an SGE descriptor ring, such as Tx queues,
694 * free buffer lists, or response queues. Each SGE ring requires
695 * space for its HW descriptors plus, optionally, space for the SW state
696 * associated with each HW entry (the metadata). The function returns
697 * three values: the virtual address for the HW ring (the return value
698 * of the function), the bus address of the HW ring, and the address
699 * of the SW ring.
700 */
701 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
702 size_t sw_size, dma_addr_t *phys, void *metadata,
703 size_t stat_size, int node)
704 {
705 size_t len = nelem * elem_size + stat_size;
706 void *s = NULL;
707 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
708
709 if (!p)
710 return NULL;
711 if (sw_size) {
712 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
713
714 if (!s) {
715 dma_free_coherent(dev, len, p, *phys);
716 return NULL;
717 }
718 }
719 if (metadata)
720 *(void **)metadata = s;
721 memset(p, 0, len);
722 return p;
723 }
724
725 /**
726 * sgl_len - calculates the size of an SGL of the given capacity
727 * @n: the number of SGL entries
728 *
729 * Calculates the number of flits needed for a scatter/gather list that
730 * can hold the given number of entries.
731 */
732 static inline unsigned int sgl_len(unsigned int n)
733 {
734 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
735 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
736 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
737 * repeated sequences of { Length[i], Length[i+1], Address[i],
738 * Address[i+1] } (this ensures that all addresses are on 64-bit
739 * boundaries). If N is even, then Length[N+1] should be set to 0 and
740 * Address[N+1] is omitted.
741 *
742 * The following calculation incorporates all of the above. It's
743 * somewhat hard to follow but, briefly: the "+2" accounts for the
744 * first two flits which include the DSGL header, Length0 and
745 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
746 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
747 * finally the "+((n-1)&1)" adds the one remaining flit needed if
748 * (n-1) is odd ...
749 */
750 n--;
751 return (3 * n) / 2 + (n & 1) + 2;
752 }
753
754 /**
755 * flits_to_desc - returns the num of Tx descriptors for the given flits
756 * @n: the number of flits
757 *
758 * Returns the number of Tx descriptors needed for the supplied number
759 * of flits.
760 */
761 static inline unsigned int flits_to_desc(unsigned int n)
762 {
763 BUG_ON(n > SGE_MAX_WR_LEN / 8);
764 return DIV_ROUND_UP(n, 8);
765 }
766
767 /**
768 * is_eth_imm - can an Ethernet packet be sent as immediate data?
769 * @skb: the packet
770 *
771 * Returns whether an Ethernet packet is small enough to fit as
772 * immediate data. Return value corresponds to headroom required.
773 */
774 static inline int is_eth_imm(const struct sk_buff *skb)
775 {
776 int hdrlen = skb_shinfo(skb)->gso_size ?
777 sizeof(struct cpl_tx_pkt_lso_core) : 0;
778
779 hdrlen += sizeof(struct cpl_tx_pkt);
780 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
781 return hdrlen;
782 return 0;
783 }
784
785 /**
786 * calc_tx_flits - calculate the number of flits for a packet Tx WR
787 * @skb: the packet
788 *
789 * Returns the number of flits needed for a Tx WR for the given Ethernet
790 * packet, including the needed WR and CPL headers.
791 */
792 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
793 {
794 unsigned int flits;
795 int hdrlen = is_eth_imm(skb);
796
797 /* If the skb is small enough, we can pump it out as a work request
798 * with only immediate data. In that case we just have to have the
799 * TX Packet header plus the skb data in the Work Request.
800 */
801
802 if (hdrlen)
803 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
804
805 /* Otherwise, we're going to have to construct a Scatter gather list
806 * of the skb body and fragments. We also include the flits necessary
807 * for the TX Packet Work Request and CPL. We always have a firmware
808 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
809 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
810 * message or, if we're doing a Large Send Offload, an LSO CPL message
811 * with an embedded TX Packet Write CPL message.
812 */
813 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
814 if (skb_shinfo(skb)->gso_size)
815 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
816 sizeof(struct cpl_tx_pkt_lso_core) +
817 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
818 else
819 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
820 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
821 return flits;
822 }
823
824 /**
825 * calc_tx_descs - calculate the number of Tx descriptors for a packet
826 * @skb: the packet
827 *
828 * Returns the number of Tx descriptors needed for the given Ethernet
829 * packet, including the needed WR and CPL headers.
830 */
831 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
832 {
833 return flits_to_desc(calc_tx_flits(skb));
834 }
835
836 /**
837 * write_sgl - populate a scatter/gather list for a packet
838 * @skb: the packet
839 * @q: the Tx queue we are writing into
840 * @sgl: starting location for writing the SGL
841 * @end: points right after the end of the SGL
842 * @start: start offset into skb main-body data to include in the SGL
843 * @addr: the list of bus addresses for the SGL elements
844 *
845 * Generates a gather list for the buffers that make up a packet.
846 * The caller must provide adequate space for the SGL that will be written.
847 * The SGL includes all of the packet's page fragments and the data in its
848 * main body except for the first @start bytes. @sgl must be 16-byte
849 * aligned and within a Tx descriptor with available space. @end points
850 * right after the end of the SGL but does not account for any potential
851 * wrap around, i.e., @end > @sgl.
852 */
853 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
854 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
855 const dma_addr_t *addr)
856 {
857 unsigned int i, len;
858 struct ulptx_sge_pair *to;
859 const struct skb_shared_info *si = skb_shinfo(skb);
860 unsigned int nfrags = si->nr_frags;
861 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
862
863 len = skb_headlen(skb) - start;
864 if (likely(len)) {
865 sgl->len0 = htonl(len);
866 sgl->addr0 = cpu_to_be64(addr[0] + start);
867 nfrags++;
868 } else {
869 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
870 sgl->addr0 = cpu_to_be64(addr[1]);
871 }
872
873 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
874 ULPTX_NSGE_V(nfrags));
875 if (likely(--nfrags == 0))
876 return;
877 /*
878 * Most of the complexity below deals with the possibility we hit the
879 * end of the queue in the middle of writing the SGL. For this case
880 * only we create the SGL in a temporary buffer and then copy it.
881 */
882 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
883
884 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
885 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
886 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
887 to->addr[0] = cpu_to_be64(addr[i]);
888 to->addr[1] = cpu_to_be64(addr[++i]);
889 }
890 if (nfrags) {
891 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
892 to->len[1] = cpu_to_be32(0);
893 to->addr[0] = cpu_to_be64(addr[i + 1]);
894 }
895 if (unlikely((u8 *)end > (u8 *)q->stat)) {
896 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
897
898 if (likely(part0))
899 memcpy(sgl->sge, buf, part0);
900 part1 = (u8 *)end - (u8 *)q->stat;
901 memcpy(q->desc, (u8 *)buf + part0, part1);
902 end = (void *)q->desc + part1;
903 }
904 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
905 *end = 0;
906 }
907
908 /* This function copies 64 byte coalesced work request to
909 * memory mapped BAR2 space. For coalesced WR SGE fetches
910 * data from the FIFO instead of from Host.
911 */
912 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
913 {
914 int count = 8;
915
916 while (count) {
917 writeq(*src, dst);
918 src++;
919 dst++;
920 count--;
921 }
922 }
923
924 /**
925 * ring_tx_db - check and potentially ring a Tx queue's doorbell
926 * @adap: the adapter
927 * @q: the Tx queue
928 * @n: number of new descriptors to give to HW
929 *
930 * Ring the doorbel for a Tx queue.
931 */
932 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
933 {
934 /* Make sure that all writes to the TX Descriptors are committed
935 * before we tell the hardware about them.
936 */
937 wmb();
938
939 /* If we don't have access to the new User Doorbell (T5+), use the old
940 * doorbell mechanism; otherwise use the new BAR2 mechanism.
941 */
942 if (unlikely(q->bar2_addr == NULL)) {
943 u32 val = PIDX_V(n);
944 unsigned long flags;
945
946 /* For T4 we need to participate in the Doorbell Recovery
947 * mechanism.
948 */
949 spin_lock_irqsave(&q->db_lock, flags);
950 if (!q->db_disabled)
951 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
952 QID_V(q->cntxt_id) | val);
953 else
954 q->db_pidx_inc += n;
955 q->db_pidx = q->pidx;
956 spin_unlock_irqrestore(&q->db_lock, flags);
957 } else {
958 u32 val = PIDX_T5_V(n);
959
960 /* T4 and later chips share the same PIDX field offset within
961 * the doorbell, but T5 and later shrank the field in order to
962 * gain a bit for Doorbell Priority. The field was absurdly
963 * large in the first place (14 bits) so we just use the T5
964 * and later limits and warn if a Queue ID is too large.
965 */
966 WARN_ON(val & DBPRIO_F);
967
968 /* If we're only writing a single TX Descriptor and we can use
969 * Inferred QID registers, we can use the Write Combining
970 * Gather Buffer; otherwise we use the simple doorbell.
971 */
972 if (n == 1 && q->bar2_qid == 0) {
973 int index = (q->pidx
974 ? (q->pidx - 1)
975 : (q->size - 1));
976 u64 *wr = (u64 *)&q->desc[index];
977
978 cxgb_pio_copy((u64 __iomem *)
979 (q->bar2_addr + SGE_UDB_WCDOORBELL),
980 wr);
981 } else {
982 writel(val | QID_V(q->bar2_qid),
983 q->bar2_addr + SGE_UDB_KDOORBELL);
984 }
985
986 /* This Write Memory Barrier will force the write to the User
987 * Doorbell area to be flushed. This is needed to prevent
988 * writes on different CPUs for the same queue from hitting
989 * the adapter out of order. This is required when some Work
990 * Requests take the Write Combine Gather Buffer path (user
991 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
992 * take the traditional path where we simply increment the
993 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
994 * hardware DMA read the actual Work Request.
995 */
996 wmb();
997 }
998 }
999
1000 /**
1001 * inline_tx_skb - inline a packet's data into Tx descriptors
1002 * @skb: the packet
1003 * @q: the Tx queue where the packet will be inlined
1004 * @pos: starting position in the Tx queue where to inline the packet
1005 *
1006 * Inline a packet's contents directly into Tx descriptors, starting at
1007 * the given position within the Tx DMA ring.
1008 * Most of the complexity of this operation is dealing with wrap arounds
1009 * in the middle of the packet we want to inline.
1010 */
1011 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
1012 void *pos)
1013 {
1014 u64 *p;
1015 int left = (void *)q->stat - pos;
1016
1017 if (likely(skb->len <= left)) {
1018 if (likely(!skb->data_len))
1019 skb_copy_from_linear_data(skb, pos, skb->len);
1020 else
1021 skb_copy_bits(skb, 0, pos, skb->len);
1022 pos += skb->len;
1023 } else {
1024 skb_copy_bits(skb, 0, pos, left);
1025 skb_copy_bits(skb, left, q->desc, skb->len - left);
1026 pos = (void *)q->desc + (skb->len - left);
1027 }
1028
1029 /* 0-pad to multiple of 16 */
1030 p = PTR_ALIGN(pos, 8);
1031 if ((uintptr_t)p & 8)
1032 *p = 0;
1033 }
1034
1035 static void *inline_tx_skb_header(const struct sk_buff *skb,
1036 const struct sge_txq *q, void *pos,
1037 int length)
1038 {
1039 u64 *p;
1040 int left = (void *)q->stat - pos;
1041
1042 if (likely(length <= left)) {
1043 memcpy(pos, skb->data, length);
1044 pos += length;
1045 } else {
1046 memcpy(pos, skb->data, left);
1047 memcpy(q->desc, skb->data + left, length - left);
1048 pos = (void *)q->desc + (length - left);
1049 }
1050 /* 0-pad to multiple of 16 */
1051 p = PTR_ALIGN(pos, 8);
1052 if ((uintptr_t)p & 8) {
1053 *p = 0;
1054 return p + 1;
1055 }
1056 return p;
1057 }
1058
1059 /*
1060 * Figure out what HW csum a packet wants and return the appropriate control
1061 * bits.
1062 */
1063 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1064 {
1065 int csum_type;
1066 const struct iphdr *iph = ip_hdr(skb);
1067
1068 if (iph->version == 4) {
1069 if (iph->protocol == IPPROTO_TCP)
1070 csum_type = TX_CSUM_TCPIP;
1071 else if (iph->protocol == IPPROTO_UDP)
1072 csum_type = TX_CSUM_UDPIP;
1073 else {
1074 nocsum: /*
1075 * unknown protocol, disable HW csum
1076 * and hope a bad packet is detected
1077 */
1078 return TXPKT_L4CSUM_DIS_F;
1079 }
1080 } else {
1081 /*
1082 * this doesn't work with extension headers
1083 */
1084 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1085
1086 if (ip6h->nexthdr == IPPROTO_TCP)
1087 csum_type = TX_CSUM_TCPIP6;
1088 else if (ip6h->nexthdr == IPPROTO_UDP)
1089 csum_type = TX_CSUM_UDPIP6;
1090 else
1091 goto nocsum;
1092 }
1093
1094 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1095 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1096 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1097
1098 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1099 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1100 else
1101 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1102 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1103 } else {
1104 int start = skb_transport_offset(skb);
1105
1106 return TXPKT_CSUM_TYPE_V(csum_type) |
1107 TXPKT_CSUM_START_V(start) |
1108 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1109 }
1110 }
1111
1112 static void eth_txq_stop(struct sge_eth_txq *q)
1113 {
1114 netif_tx_stop_queue(q->txq);
1115 q->q.stops++;
1116 }
1117
1118 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1119 {
1120 q->in_use += n;
1121 q->pidx += n;
1122 if (q->pidx >= q->size)
1123 q->pidx -= q->size;
1124 }
1125
1126 #ifdef CONFIG_CHELSIO_T4_FCOE
1127 static inline int
1128 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1129 const struct port_info *pi, u64 *cntrl)
1130 {
1131 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1132
1133 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1134 return 0;
1135
1136 if (skb->protocol != htons(ETH_P_FCOE))
1137 return 0;
1138
1139 skb_reset_mac_header(skb);
1140 skb->mac_len = sizeof(struct ethhdr);
1141
1142 skb_set_network_header(skb, skb->mac_len);
1143 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1144
1145 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1146 return -ENOTSUPP;
1147
1148 /* FC CRC offload */
1149 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1150 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1151 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1152 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1153 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1154 return 0;
1155 }
1156 #endif /* CONFIG_CHELSIO_T4_FCOE */
1157
1158 /**
1159 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1160 * @skb: the packet
1161 * @dev: the egress net device
1162 *
1163 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1164 */
1165 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1166 {
1167 u32 wr_mid, ctrl0;
1168 u64 cntrl, *end;
1169 int qidx, credits;
1170 unsigned int flits, ndesc;
1171 struct adapter *adap;
1172 struct sge_eth_txq *q;
1173 const struct port_info *pi;
1174 struct fw_eth_tx_pkt_wr *wr;
1175 struct cpl_tx_pkt_core *cpl;
1176 const struct skb_shared_info *ssi;
1177 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1178 bool immediate = false;
1179 int len, max_pkt_len;
1180 #ifdef CONFIG_CHELSIO_T4_FCOE
1181 int err;
1182 #endif /* CONFIG_CHELSIO_T4_FCOE */
1183
1184 /*
1185 * The chip min packet length is 10 octets but play safe and reject
1186 * anything shorter than an Ethernet header.
1187 */
1188 if (unlikely(skb->len < ETH_HLEN)) {
1189 out_free: dev_kfree_skb_any(skb);
1190 return NETDEV_TX_OK;
1191 }
1192
1193 /* Discard the packet if the length is greater than mtu */
1194 max_pkt_len = ETH_HLEN + dev->mtu;
1195 if (skb_vlan_tag_present(skb))
1196 max_pkt_len += VLAN_HLEN;
1197 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1198 goto out_free;
1199
1200 pi = netdev_priv(dev);
1201 adap = pi->adapter;
1202 qidx = skb_get_queue_mapping(skb);
1203 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1204
1205 reclaim_completed_tx(adap, &q->q, true);
1206 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1207
1208 #ifdef CONFIG_CHELSIO_T4_FCOE
1209 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1210 if (unlikely(err == -ENOTSUPP))
1211 goto out_free;
1212 #endif /* CONFIG_CHELSIO_T4_FCOE */
1213
1214 flits = calc_tx_flits(skb);
1215 ndesc = flits_to_desc(flits);
1216 credits = txq_avail(&q->q) - ndesc;
1217
1218 if (unlikely(credits < 0)) {
1219 eth_txq_stop(q);
1220 dev_err(adap->pdev_dev,
1221 "%s: Tx ring %u full while queue awake!\n",
1222 dev->name, qidx);
1223 return NETDEV_TX_BUSY;
1224 }
1225
1226 if (is_eth_imm(skb))
1227 immediate = true;
1228
1229 if (!immediate &&
1230 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1231 q->mapping_err++;
1232 goto out_free;
1233 }
1234
1235 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1236 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1237 eth_txq_stop(q);
1238 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1239 }
1240
1241 wr = (void *)&q->q.desc[q->q.pidx];
1242 wr->equiq_to_len16 = htonl(wr_mid);
1243 wr->r3 = cpu_to_be64(0);
1244 end = (u64 *)wr + flits;
1245
1246 len = immediate ? skb->len : 0;
1247 ssi = skb_shinfo(skb);
1248 if (ssi->gso_size) {
1249 struct cpl_tx_pkt_lso *lso = (void *)wr;
1250 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1251 int l3hdr_len = skb_network_header_len(skb);
1252 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1253
1254 len += sizeof(*lso);
1255 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1256 FW_WR_IMMDLEN_V(len));
1257 lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1258 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1259 LSO_IPV6_V(v6) |
1260 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1261 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1262 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1263 lso->c.ipid_ofst = htons(0);
1264 lso->c.mss = htons(ssi->gso_size);
1265 lso->c.seqno_offset = htonl(0);
1266 if (is_t4(adap->params.chip))
1267 lso->c.len = htonl(skb->len);
1268 else
1269 lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1270 cpl = (void *)(lso + 1);
1271
1272 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1273 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1274 else
1275 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1276
1277 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1278 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1279 TXPKT_IPHDR_LEN_V(l3hdr_len);
1280 q->tso++;
1281 q->tx_cso += ssi->gso_segs;
1282 } else {
1283 len += sizeof(*cpl);
1284 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1285 FW_WR_IMMDLEN_V(len));
1286 cpl = (void *)(wr + 1);
1287 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1288 cntrl = hwcsum(adap->params.chip, skb) |
1289 TXPKT_IPCSUM_DIS_F;
1290 q->tx_cso++;
1291 }
1292 }
1293
1294 if (skb_vlan_tag_present(skb)) {
1295 q->vlan_ins++;
1296 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1297 #ifdef CONFIG_CHELSIO_T4_FCOE
1298 if (skb->protocol == htons(ETH_P_FCOE))
1299 cntrl |= TXPKT_VLAN_V(
1300 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1301 #endif /* CONFIG_CHELSIO_T4_FCOE */
1302 }
1303
1304 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1305 TXPKT_PF_V(adap->pf);
1306 #ifdef CONFIG_CHELSIO_T4_DCB
1307 if (is_t4(adap->params.chip))
1308 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1309 else
1310 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1311 #endif
1312 cpl->ctrl0 = htonl(ctrl0);
1313 cpl->pack = htons(0);
1314 cpl->len = htons(skb->len);
1315 cpl->ctrl1 = cpu_to_be64(cntrl);
1316
1317 if (immediate) {
1318 inline_tx_skb(skb, &q->q, cpl + 1);
1319 dev_consume_skb_any(skb);
1320 } else {
1321 int last_desc;
1322
1323 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1324 addr);
1325 skb_orphan(skb);
1326
1327 last_desc = q->q.pidx + ndesc - 1;
1328 if (last_desc >= q->q.size)
1329 last_desc -= q->q.size;
1330 q->q.sdesc[last_desc].skb = skb;
1331 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1332 }
1333
1334 txq_advance(&q->q, ndesc);
1335
1336 ring_tx_db(adap, &q->q, ndesc);
1337 return NETDEV_TX_OK;
1338 }
1339
1340 /**
1341 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1342 * @q: the SGE control Tx queue
1343 *
1344 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1345 * that send only immediate data (presently just the control queues) and
1346 * thus do not have any sk_buffs to release.
1347 */
1348 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1349 {
1350 int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
1351 int reclaim = hw_cidx - q->cidx;
1352
1353 if (reclaim < 0)
1354 reclaim += q->size;
1355
1356 q->in_use -= reclaim;
1357 q->cidx = hw_cidx;
1358 }
1359
1360 /**
1361 * is_imm - check whether a packet can be sent as immediate data
1362 * @skb: the packet
1363 *
1364 * Returns true if a packet can be sent as a WR with immediate data.
1365 */
1366 static inline int is_imm(const struct sk_buff *skb)
1367 {
1368 return skb->len <= MAX_CTRL_WR_LEN;
1369 }
1370
1371 /**
1372 * ctrlq_check_stop - check if a control queue is full and should stop
1373 * @q: the queue
1374 * @wr: most recent WR written to the queue
1375 *
1376 * Check if a control queue has become full and should be stopped.
1377 * We clean up control queue descriptors very lazily, only when we are out.
1378 * If the queue is still full after reclaiming any completed descriptors
1379 * we suspend it and have the last WR wake it up.
1380 */
1381 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1382 {
1383 reclaim_completed_tx_imm(&q->q);
1384 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1385 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1386 q->q.stops++;
1387 q->full = 1;
1388 }
1389 }
1390
1391 /**
1392 * ctrl_xmit - send a packet through an SGE control Tx queue
1393 * @q: the control queue
1394 * @skb: the packet
1395 *
1396 * Send a packet through an SGE control Tx queue. Packets sent through
1397 * a control queue must fit entirely as immediate data.
1398 */
1399 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1400 {
1401 unsigned int ndesc;
1402 struct fw_wr_hdr *wr;
1403
1404 if (unlikely(!is_imm(skb))) {
1405 WARN_ON(1);
1406 dev_kfree_skb(skb);
1407 return NET_XMIT_DROP;
1408 }
1409
1410 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1411 spin_lock(&q->sendq.lock);
1412
1413 if (unlikely(q->full)) {
1414 skb->priority = ndesc; /* save for restart */
1415 __skb_queue_tail(&q->sendq, skb);
1416 spin_unlock(&q->sendq.lock);
1417 return NET_XMIT_CN;
1418 }
1419
1420 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1421 inline_tx_skb(skb, &q->q, wr);
1422
1423 txq_advance(&q->q, ndesc);
1424 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1425 ctrlq_check_stop(q, wr);
1426
1427 ring_tx_db(q->adap, &q->q, ndesc);
1428 spin_unlock(&q->sendq.lock);
1429
1430 kfree_skb(skb);
1431 return NET_XMIT_SUCCESS;
1432 }
1433
1434 /**
1435 * restart_ctrlq - restart a suspended control queue
1436 * @data: the control queue to restart
1437 *
1438 * Resumes transmission on a suspended Tx control queue.
1439 */
1440 static void restart_ctrlq(unsigned long data)
1441 {
1442 struct sk_buff *skb;
1443 unsigned int written = 0;
1444 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1445
1446 spin_lock(&q->sendq.lock);
1447 reclaim_completed_tx_imm(&q->q);
1448 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1449
1450 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1451 struct fw_wr_hdr *wr;
1452 unsigned int ndesc = skb->priority; /* previously saved */
1453
1454 written += ndesc;
1455 /* Write descriptors and free skbs outside the lock to limit
1456 * wait times. q->full is still set so new skbs will be queued.
1457 */
1458 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1459 txq_advance(&q->q, ndesc);
1460 spin_unlock(&q->sendq.lock);
1461
1462 inline_tx_skb(skb, &q->q, wr);
1463 kfree_skb(skb);
1464
1465 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1466 unsigned long old = q->q.stops;
1467
1468 ctrlq_check_stop(q, wr);
1469 if (q->q.stops != old) { /* suspended anew */
1470 spin_lock(&q->sendq.lock);
1471 goto ringdb;
1472 }
1473 }
1474 if (written > 16) {
1475 ring_tx_db(q->adap, &q->q, written);
1476 written = 0;
1477 }
1478 spin_lock(&q->sendq.lock);
1479 }
1480 q->full = 0;
1481 ringdb: if (written)
1482 ring_tx_db(q->adap, &q->q, written);
1483 spin_unlock(&q->sendq.lock);
1484 }
1485
1486 /**
1487 * t4_mgmt_tx - send a management message
1488 * @adap: the adapter
1489 * @skb: the packet containing the management message
1490 *
1491 * Send a management message through control queue 0.
1492 */
1493 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1494 {
1495 int ret;
1496
1497 local_bh_disable();
1498 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1499 local_bh_enable();
1500 return ret;
1501 }
1502
1503 /**
1504 * is_ofld_imm - check whether a packet can be sent as immediate data
1505 * @skb: the packet
1506 *
1507 * Returns true if a packet can be sent as an offload WR with immediate
1508 * data. We currently use the same limit as for Ethernet packets.
1509 */
1510 static inline int is_ofld_imm(const struct sk_buff *skb)
1511 {
1512 return skb->len <= MAX_IMM_TX_PKT_LEN;
1513 }
1514
1515 /**
1516 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1517 * @skb: the packet
1518 *
1519 * Returns the number of flits needed for the given offload packet.
1520 * These packets are already fully constructed and no additional headers
1521 * will be added.
1522 */
1523 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1524 {
1525 unsigned int flits, cnt;
1526
1527 if (is_ofld_imm(skb))
1528 return DIV_ROUND_UP(skb->len, 8);
1529
1530 flits = skb_transport_offset(skb) / 8U; /* headers */
1531 cnt = skb_shinfo(skb)->nr_frags;
1532 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1533 cnt++;
1534 return flits + sgl_len(cnt);
1535 }
1536
1537 /**
1538 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1539 * @adap: the adapter
1540 * @q: the queue to stop
1541 *
1542 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1543 * inability to map packets. A periodic timer attempts to restart
1544 * queues so marked.
1545 */
1546 static void txq_stop_maperr(struct sge_ofld_txq *q)
1547 {
1548 q->mapping_err++;
1549 q->q.stops++;
1550 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1551 q->adap->sge.txq_maperr);
1552 }
1553
1554 /**
1555 * ofldtxq_stop - stop an offload Tx queue that has become full
1556 * @q: the queue to stop
1557 * @skb: the packet causing the queue to become full
1558 *
1559 * Stops an offload Tx queue that has become full and modifies the packet
1560 * being written to request a wakeup.
1561 */
1562 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1563 {
1564 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1565
1566 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1567 q->q.stops++;
1568 q->full = 1;
1569 }
1570
1571 /**
1572 * service_ofldq - service/restart a suspended offload queue
1573 * @q: the offload queue
1574 *
1575 * Services an offload Tx queue by moving packets from its Pending Send
1576 * Queue to the Hardware TX ring. The function starts and ends with the
1577 * Send Queue locked, but drops the lock while putting the skb at the
1578 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
1579 * allows more skbs to be added to the Send Queue by other threads.
1580 * The packet being processed at the head of the Pending Send Queue is
1581 * left on the queue in case we experience DMA Mapping errors, etc.
1582 * and need to give up and restart later.
1583 *
1584 * service_ofldq() can be thought of as a task which opportunistically
1585 * uses other threads execution contexts. We use the Offload Queue
1586 * boolean "service_ofldq_running" to make sure that only one instance
1587 * is ever running at a time ...
1588 */
1589 static void service_ofldq(struct sge_ofld_txq *q)
1590 {
1591 u64 *pos, *before, *end;
1592 int credits;
1593 struct sk_buff *skb;
1594 struct sge_txq *txq;
1595 unsigned int left;
1596 unsigned int written = 0;
1597 unsigned int flits, ndesc;
1598
1599 /* If another thread is currently in service_ofldq() processing the
1600 * Pending Send Queue then there's nothing to do. Otherwise, flag
1601 * that we're doing the work and continue. Examining/modifying
1602 * the Offload Queue boolean "service_ofldq_running" must be done
1603 * while holding the Pending Send Queue Lock.
1604 */
1605 if (q->service_ofldq_running)
1606 return;
1607 q->service_ofldq_running = true;
1608
1609 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1610 /* We drop the lock while we're working with the skb at the
1611 * head of the Pending Send Queue. This allows more skbs to
1612 * be added to the Pending Send Queue while we're working on
1613 * this one. We don't need to lock to guard the TX Ring
1614 * updates because only one thread of execution is ever
1615 * allowed into service_ofldq() at a time.
1616 */
1617 spin_unlock(&q->sendq.lock);
1618
1619 reclaim_completed_tx(q->adap, &q->q, false);
1620
1621 flits = skb->priority; /* previously saved */
1622 ndesc = flits_to_desc(flits);
1623 credits = txq_avail(&q->q) - ndesc;
1624 BUG_ON(credits < 0);
1625 if (unlikely(credits < TXQ_STOP_THRES))
1626 ofldtxq_stop(q, skb);
1627
1628 pos = (u64 *)&q->q.desc[q->q.pidx];
1629 if (is_ofld_imm(skb))
1630 inline_tx_skb(skb, &q->q, pos);
1631 else if (map_skb(q->adap->pdev_dev, skb,
1632 (dma_addr_t *)skb->head)) {
1633 txq_stop_maperr(q);
1634 spin_lock(&q->sendq.lock);
1635 break;
1636 } else {
1637 int last_desc, hdr_len = skb_transport_offset(skb);
1638
1639 /* The WR headers may not fit within one descriptor.
1640 * So we need to deal with wrap-around here.
1641 */
1642 before = (u64 *)pos;
1643 end = (u64 *)pos + flits;
1644 txq = &q->q;
1645 pos = (void *)inline_tx_skb_header(skb, &q->q,
1646 (void *)pos,
1647 hdr_len);
1648 if (before > (u64 *)pos) {
1649 left = (u8 *)end - (u8 *)txq->stat;
1650 end = (void *)txq->desc + left;
1651 }
1652
1653 /* If current position is already at the end of the
1654 * ofld queue, reset the current to point to
1655 * start of the queue and update the end ptr as well.
1656 */
1657 if (pos == (u64 *)txq->stat) {
1658 left = (u8 *)end - (u8 *)txq->stat;
1659 end = (void *)txq->desc + left;
1660 pos = (void *)txq->desc;
1661 }
1662
1663 write_sgl(skb, &q->q, (void *)pos,
1664 end, hdr_len,
1665 (dma_addr_t *)skb->head);
1666 #ifdef CONFIG_NEED_DMA_MAP_STATE
1667 skb->dev = q->adap->port[0];
1668 skb->destructor = deferred_unmap_destructor;
1669 #endif
1670 last_desc = q->q.pidx + ndesc - 1;
1671 if (last_desc >= q->q.size)
1672 last_desc -= q->q.size;
1673 q->q.sdesc[last_desc].skb = skb;
1674 }
1675
1676 txq_advance(&q->q, ndesc);
1677 written += ndesc;
1678 if (unlikely(written > 32)) {
1679 ring_tx_db(q->adap, &q->q, written);
1680 written = 0;
1681 }
1682
1683 /* Reacquire the Pending Send Queue Lock so we can unlink the
1684 * skb we've just successfully transferred to the TX Ring and
1685 * loop for the next skb which may be at the head of the
1686 * Pending Send Queue.
1687 */
1688 spin_lock(&q->sendq.lock);
1689 __skb_unlink(skb, &q->sendq);
1690 if (is_ofld_imm(skb))
1691 kfree_skb(skb);
1692 }
1693 if (likely(written))
1694 ring_tx_db(q->adap, &q->q, written);
1695
1696 /*Indicate that no thread is processing the Pending Send Queue
1697 * currently.
1698 */
1699 q->service_ofldq_running = false;
1700 }
1701
1702 /**
1703 * ofld_xmit - send a packet through an offload queue
1704 * @q: the Tx offload queue
1705 * @skb: the packet
1706 *
1707 * Send an offload packet through an SGE offload queue.
1708 */
1709 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1710 {
1711 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1712 spin_lock(&q->sendq.lock);
1713
1714 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
1715 * that results in this new skb being the only one on the queue, start
1716 * servicing it. If there are other skbs already on the list, then
1717 * either the queue is currently being processed or it's been stopped
1718 * for some reason and it'll be restarted at a later time. Restart
1719 * paths are triggered by events like experiencing a DMA Mapping Error
1720 * or filling the Hardware TX Ring.
1721 */
1722 __skb_queue_tail(&q->sendq, skb);
1723 if (q->sendq.qlen == 1)
1724 service_ofldq(q);
1725
1726 spin_unlock(&q->sendq.lock);
1727 return NET_XMIT_SUCCESS;
1728 }
1729
1730 /**
1731 * restart_ofldq - restart a suspended offload queue
1732 * @data: the offload queue to restart
1733 *
1734 * Resumes transmission on a suspended Tx offload queue.
1735 */
1736 static void restart_ofldq(unsigned long data)
1737 {
1738 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1739
1740 spin_lock(&q->sendq.lock);
1741 q->full = 0; /* the queue actually is completely empty now */
1742 service_ofldq(q);
1743 spin_unlock(&q->sendq.lock);
1744 }
1745
1746 /**
1747 * skb_txq - return the Tx queue an offload packet should use
1748 * @skb: the packet
1749 *
1750 * Returns the Tx queue an offload packet should use as indicated by bits
1751 * 1-15 in the packet's queue_mapping.
1752 */
1753 static inline unsigned int skb_txq(const struct sk_buff *skb)
1754 {
1755 return skb->queue_mapping >> 1;
1756 }
1757
1758 /**
1759 * is_ctrl_pkt - return whether an offload packet is a control packet
1760 * @skb: the packet
1761 *
1762 * Returns whether an offload packet should use an OFLD or a CTRL
1763 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1764 */
1765 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1766 {
1767 return skb->queue_mapping & 1;
1768 }
1769
1770 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1771 {
1772 unsigned int idx = skb_txq(skb);
1773
1774 if (unlikely(is_ctrl_pkt(skb))) {
1775 /* Single ctrl queue is a requirement for LE workaround path */
1776 if (adap->tids.nsftids)
1777 idx = 0;
1778 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1779 }
1780 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1781 }
1782
1783 /**
1784 * t4_ofld_send - send an offload packet
1785 * @adap: the adapter
1786 * @skb: the packet
1787 *
1788 * Sends an offload packet. We use the packet queue_mapping to select the
1789 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1790 * should be sent as regular or control, bits 1-15 select the queue.
1791 */
1792 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1793 {
1794 int ret;
1795
1796 local_bh_disable();
1797 ret = ofld_send(adap, skb);
1798 local_bh_enable();
1799 return ret;
1800 }
1801
1802 /**
1803 * cxgb4_ofld_send - send an offload packet
1804 * @dev: the net device
1805 * @skb: the packet
1806 *
1807 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1808 * intended for ULDs.
1809 */
1810 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1811 {
1812 return t4_ofld_send(netdev2adap(dev), skb);
1813 }
1814 EXPORT_SYMBOL(cxgb4_ofld_send);
1815
1816 static inline void copy_frags(struct sk_buff *skb,
1817 const struct pkt_gl *gl, unsigned int offset)
1818 {
1819 int i;
1820
1821 /* usually there's just one frag */
1822 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1823 gl->frags[0].offset + offset,
1824 gl->frags[0].size - offset);
1825 skb_shinfo(skb)->nr_frags = gl->nfrags;
1826 for (i = 1; i < gl->nfrags; i++)
1827 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1828 gl->frags[i].offset,
1829 gl->frags[i].size);
1830
1831 /* get a reference to the last page, we don't own it */
1832 get_page(gl->frags[gl->nfrags - 1].page);
1833 }
1834
1835 /**
1836 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1837 * @gl: the gather list
1838 * @skb_len: size of sk_buff main body if it carries fragments
1839 * @pull_len: amount of data to move to the sk_buff's main body
1840 *
1841 * Builds an sk_buff from the given packet gather list. Returns the
1842 * sk_buff or %NULL if sk_buff allocation failed.
1843 */
1844 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1845 unsigned int skb_len, unsigned int pull_len)
1846 {
1847 struct sk_buff *skb;
1848
1849 /*
1850 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1851 * size, which is expected since buffers are at least PAGE_SIZEd.
1852 * In this case packets up to RX_COPY_THRES have only one fragment.
1853 */
1854 if (gl->tot_len <= RX_COPY_THRES) {
1855 skb = dev_alloc_skb(gl->tot_len);
1856 if (unlikely(!skb))
1857 goto out;
1858 __skb_put(skb, gl->tot_len);
1859 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1860 } else {
1861 skb = dev_alloc_skb(skb_len);
1862 if (unlikely(!skb))
1863 goto out;
1864 __skb_put(skb, pull_len);
1865 skb_copy_to_linear_data(skb, gl->va, pull_len);
1866
1867 copy_frags(skb, gl, pull_len);
1868 skb->len = gl->tot_len;
1869 skb->data_len = skb->len - pull_len;
1870 skb->truesize += skb->data_len;
1871 }
1872 out: return skb;
1873 }
1874 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1875
1876 /**
1877 * t4_pktgl_free - free a packet gather list
1878 * @gl: the gather list
1879 *
1880 * Releases the pages of a packet gather list. We do not own the last
1881 * page on the list and do not free it.
1882 */
1883 static void t4_pktgl_free(const struct pkt_gl *gl)
1884 {
1885 int n;
1886 const struct page_frag *p;
1887
1888 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1889 put_page(p->page);
1890 }
1891
1892 /*
1893 * Process an MPS trace packet. Give it an unused protocol number so it won't
1894 * be delivered to anyone and send it to the stack for capture.
1895 */
1896 static noinline int handle_trace_pkt(struct adapter *adap,
1897 const struct pkt_gl *gl)
1898 {
1899 struct sk_buff *skb;
1900
1901 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1902 if (unlikely(!skb)) {
1903 t4_pktgl_free(gl);
1904 return 0;
1905 }
1906
1907 if (is_t4(adap->params.chip))
1908 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1909 else
1910 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1911
1912 skb_reset_mac_header(skb);
1913 skb->protocol = htons(0xffff);
1914 skb->dev = adap->port[0];
1915 netif_receive_skb(skb);
1916 return 0;
1917 }
1918
1919 /**
1920 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
1921 * @adap: the adapter
1922 * @hwtstamps: time stamp structure to update
1923 * @sgetstamp: 60bit iqe timestamp
1924 *
1925 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
1926 * which is in Core Clock ticks into ktime_t and assign it
1927 **/
1928 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
1929 struct skb_shared_hwtstamps *hwtstamps,
1930 u64 sgetstamp)
1931 {
1932 u64 ns;
1933 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
1934
1935 ns = div_u64(tmp, adap->params.vpd.cclk);
1936
1937 memset(hwtstamps, 0, sizeof(*hwtstamps));
1938 hwtstamps->hwtstamp = ns_to_ktime(ns);
1939 }
1940
1941 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1942 const struct cpl_rx_pkt *pkt)
1943 {
1944 struct adapter *adapter = rxq->rspq.adap;
1945 struct sge *s = &adapter->sge;
1946 struct port_info *pi;
1947 int ret;
1948 struct sk_buff *skb;
1949
1950 skb = napi_get_frags(&rxq->rspq.napi);
1951 if (unlikely(!skb)) {
1952 t4_pktgl_free(gl);
1953 rxq->stats.rx_drops++;
1954 return;
1955 }
1956
1957 copy_frags(skb, gl, s->pktshift);
1958 skb->len = gl->tot_len - s->pktshift;
1959 skb->data_len = skb->len;
1960 skb->truesize += skb->data_len;
1961 skb->ip_summed = CHECKSUM_UNNECESSARY;
1962 skb_record_rx_queue(skb, rxq->rspq.idx);
1963 pi = netdev_priv(skb->dev);
1964 if (pi->rxtstamp)
1965 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
1966 gl->sgetstamp);
1967 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1968 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1969 PKT_HASH_TYPE_L3);
1970
1971 if (unlikely(pkt->vlan_ex)) {
1972 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1973 rxq->stats.vlan_ex++;
1974 }
1975 ret = napi_gro_frags(&rxq->rspq.napi);
1976 if (ret == GRO_HELD)
1977 rxq->stats.lro_pkts++;
1978 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1979 rxq->stats.lro_merged++;
1980 rxq->stats.pkts++;
1981 rxq->stats.rx_cso++;
1982 }
1983
1984 /**
1985 * t4_ethrx_handler - process an ingress ethernet packet
1986 * @q: the response queue that received the packet
1987 * @rsp: the response queue descriptor holding the RX_PKT message
1988 * @si: the gather list of packet fragments
1989 *
1990 * Process an ingress ethernet packet and deliver it to the stack.
1991 */
1992 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1993 const struct pkt_gl *si)
1994 {
1995 bool csum_ok;
1996 struct sk_buff *skb;
1997 const struct cpl_rx_pkt *pkt;
1998 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1999 struct sge *s = &q->adap->sge;
2000 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
2001 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
2002 struct port_info *pi;
2003
2004 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
2005 return handle_trace_pkt(q->adap, si);
2006
2007 pkt = (const struct cpl_rx_pkt *)rsp;
2008 csum_ok = pkt->csum_calc && !pkt->err_vec &&
2009 (q->netdev->features & NETIF_F_RXCSUM);
2010 if ((pkt->l2info & htonl(RXF_TCP_F)) &&
2011 !(cxgb_poll_busy_polling(q)) &&
2012 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
2013 do_gro(rxq, si, pkt);
2014 return 0;
2015 }
2016
2017 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
2018 if (unlikely(!skb)) {
2019 t4_pktgl_free(si);
2020 rxq->stats.rx_drops++;
2021 return 0;
2022 }
2023
2024 __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
2025 skb->protocol = eth_type_trans(skb, q->netdev);
2026 skb_record_rx_queue(skb, q->idx);
2027 if (skb->dev->features & NETIF_F_RXHASH)
2028 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2029 PKT_HASH_TYPE_L3);
2030
2031 rxq->stats.pkts++;
2032
2033 pi = netdev_priv(skb->dev);
2034 if (pi->rxtstamp)
2035 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
2036 si->sgetstamp);
2037 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
2038 if (!pkt->ip_frag) {
2039 skb->ip_summed = CHECKSUM_UNNECESSARY;
2040 rxq->stats.rx_cso++;
2041 } else if (pkt->l2info & htonl(RXF_IP_F)) {
2042 __sum16 c = (__force __sum16)pkt->csum;
2043 skb->csum = csum_unfold(c);
2044 skb->ip_summed = CHECKSUM_COMPLETE;
2045 rxq->stats.rx_cso++;
2046 }
2047 } else {
2048 skb_checksum_none_assert(skb);
2049 #ifdef CONFIG_CHELSIO_T4_FCOE
2050 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2051 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2052
2053 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
2054 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
2055 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
2056 if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
2057 skb->ip_summed = CHECKSUM_UNNECESSARY;
2058 }
2059 }
2060
2061 #undef CPL_RX_PKT_FLAGS
2062 #endif /* CONFIG_CHELSIO_T4_FCOE */
2063 }
2064
2065 if (unlikely(pkt->vlan_ex)) {
2066 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2067 rxq->stats.vlan_ex++;
2068 }
2069 skb_mark_napi_id(skb, &q->napi);
2070 netif_receive_skb(skb);
2071 return 0;
2072 }
2073
2074 /**
2075 * restore_rx_bufs - put back a packet's Rx buffers
2076 * @si: the packet gather list
2077 * @q: the SGE free list
2078 * @frags: number of FL buffers to restore
2079 *
2080 * Puts back on an FL the Rx buffers associated with @si. The buffers
2081 * have already been unmapped and are left unmapped, we mark them so to
2082 * prevent further unmapping attempts.
2083 *
2084 * This function undoes a series of @unmap_rx_buf calls when we find out
2085 * that the current packet can't be processed right away afterall and we
2086 * need to come back to it later. This is a very rare event and there's
2087 * no effort to make this particularly efficient.
2088 */
2089 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
2090 int frags)
2091 {
2092 struct rx_sw_desc *d;
2093
2094 while (frags--) {
2095 if (q->cidx == 0)
2096 q->cidx = q->size - 1;
2097 else
2098 q->cidx--;
2099 d = &q->sdesc[q->cidx];
2100 d->page = si->frags[frags].page;
2101 d->dma_addr |= RX_UNMAPPED_BUF;
2102 q->avail++;
2103 }
2104 }
2105
2106 /**
2107 * is_new_response - check if a response is newly written
2108 * @r: the response descriptor
2109 * @q: the response queue
2110 *
2111 * Returns true if a response descriptor contains a yet unprocessed
2112 * response.
2113 */
2114 static inline bool is_new_response(const struct rsp_ctrl *r,
2115 const struct sge_rspq *q)
2116 {
2117 return (r->type_gen >> RSPD_GEN_S) == q->gen;
2118 }
2119
2120 /**
2121 * rspq_next - advance to the next entry in a response queue
2122 * @q: the queue
2123 *
2124 * Updates the state of a response queue to advance it to the next entry.
2125 */
2126 static inline void rspq_next(struct sge_rspq *q)
2127 {
2128 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
2129 if (unlikely(++q->cidx == q->size)) {
2130 q->cidx = 0;
2131 q->gen ^= 1;
2132 q->cur_desc = q->desc;
2133 }
2134 }
2135
2136 /**
2137 * process_responses - process responses from an SGE response queue
2138 * @q: the ingress queue to process
2139 * @budget: how many responses can be processed in this round
2140 *
2141 * Process responses from an SGE response queue up to the supplied budget.
2142 * Responses include received packets as well as control messages from FW
2143 * or HW.
2144 *
2145 * Additionally choose the interrupt holdoff time for the next interrupt
2146 * on this queue. If the system is under memory shortage use a fairly
2147 * long delay to help recovery.
2148 */
2149 static int process_responses(struct sge_rspq *q, int budget)
2150 {
2151 int ret, rsp_type;
2152 int budget_left = budget;
2153 const struct rsp_ctrl *rc;
2154 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2155 struct adapter *adapter = q->adap;
2156 struct sge *s = &adapter->sge;
2157
2158 while (likely(budget_left)) {
2159 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2160 if (!is_new_response(rc, q)) {
2161 if (q->flush_handler)
2162 q->flush_handler(q);
2163 break;
2164 }
2165
2166 dma_rmb();
2167 rsp_type = RSPD_TYPE_G(rc->type_gen);
2168 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
2169 struct page_frag *fp;
2170 struct pkt_gl si;
2171 const struct rx_sw_desc *rsd;
2172 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
2173
2174 if (len & RSPD_NEWBUF_F) {
2175 if (likely(q->offset > 0)) {
2176 free_rx_bufs(q->adap, &rxq->fl, 1);
2177 q->offset = 0;
2178 }
2179 len = RSPD_LEN_G(len);
2180 }
2181 si.tot_len = len;
2182
2183 /* gather packet fragments */
2184 for (frags = 0, fp = si.frags; ; frags++, fp++) {
2185 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
2186 bufsz = get_buf_size(adapter, rsd);
2187 fp->page = rsd->page;
2188 fp->offset = q->offset;
2189 fp->size = min(bufsz, len);
2190 len -= fp->size;
2191 if (!len)
2192 break;
2193 unmap_rx_buf(q->adap, &rxq->fl);
2194 }
2195
2196 si.sgetstamp = SGE_TIMESTAMP_G(
2197 be64_to_cpu(rc->last_flit));
2198 /*
2199 * Last buffer remains mapped so explicitly make it
2200 * coherent for CPU access.
2201 */
2202 dma_sync_single_for_cpu(q->adap->pdev_dev,
2203 get_buf_addr(rsd),
2204 fp->size, DMA_FROM_DEVICE);
2205
2206 si.va = page_address(si.frags[0].page) +
2207 si.frags[0].offset;
2208 prefetch(si.va);
2209
2210 si.nfrags = frags + 1;
2211 ret = q->handler(q, q->cur_desc, &si);
2212 if (likely(ret == 0))
2213 q->offset += ALIGN(fp->size, s->fl_align);
2214 else
2215 restore_rx_bufs(&si, &rxq->fl, frags);
2216 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
2217 ret = q->handler(q, q->cur_desc, NULL);
2218 } else {
2219 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
2220 }
2221
2222 if (unlikely(ret)) {
2223 /* couldn't process descriptor, back off for recovery */
2224 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
2225 break;
2226 }
2227
2228 rspq_next(q);
2229 budget_left--;
2230 }
2231
2232 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
2233 __refill_fl(q->adap, &rxq->fl);
2234 return budget - budget_left;
2235 }
2236
2237 #ifdef CONFIG_NET_RX_BUSY_POLL
2238 int cxgb_busy_poll(struct napi_struct *napi)
2239 {
2240 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2241 unsigned int params, work_done;
2242 u32 val;
2243
2244 if (!cxgb_poll_lock_poll(q))
2245 return LL_FLUSH_BUSY;
2246
2247 work_done = process_responses(q, 4);
2248 params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
2249 q->next_intr_params = params;
2250 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2251
2252 /* If we don't have access to the new User GTS (T5+), use the old
2253 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2254 */
2255 if (unlikely(!q->bar2_addr))
2256 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2257 val | INGRESSQID_V((u32)q->cntxt_id));
2258 else {
2259 writel(val | INGRESSQID_V(q->bar2_qid),
2260 q->bar2_addr + SGE_UDB_GTS);
2261 wmb();
2262 }
2263
2264 cxgb_poll_unlock_poll(q);
2265 return work_done;
2266 }
2267 #endif /* CONFIG_NET_RX_BUSY_POLL */
2268
2269 /**
2270 * napi_rx_handler - the NAPI handler for Rx processing
2271 * @napi: the napi instance
2272 * @budget: how many packets we can process in this round
2273 *
2274 * Handler for new data events when using NAPI. This does not need any
2275 * locking or protection from interrupts as data interrupts are off at
2276 * this point and other adapter interrupts do not interfere (the latter
2277 * in not a concern at all with MSI-X as non-data interrupts then have
2278 * a separate handler).
2279 */
2280 static int napi_rx_handler(struct napi_struct *napi, int budget)
2281 {
2282 unsigned int params;
2283 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2284 int work_done;
2285 u32 val;
2286
2287 if (!cxgb_poll_lock_napi(q))
2288 return budget;
2289
2290 work_done = process_responses(q, budget);
2291 if (likely(work_done < budget)) {
2292 int timer_index;
2293
2294 napi_complete_done(napi, work_done);
2295 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
2296
2297 if (q->adaptive_rx) {
2298 if (work_done > max(timer_pkt_quota[timer_index],
2299 MIN_NAPI_WORK))
2300 timer_index = (timer_index + 1);
2301 else
2302 timer_index = timer_index - 1;
2303
2304 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2305 q->next_intr_params =
2306 QINTR_TIMER_IDX_V(timer_index) |
2307 QINTR_CNT_EN_V(0);
2308 params = q->next_intr_params;
2309 } else {
2310 params = q->next_intr_params;
2311 q->next_intr_params = q->intr_params;
2312 }
2313 } else
2314 params = QINTR_TIMER_IDX_V(7);
2315
2316 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2317
2318 /* If we don't have access to the new User GTS (T5+), use the old
2319 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2320 */
2321 if (unlikely(q->bar2_addr == NULL)) {
2322 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2323 val | INGRESSQID_V((u32)q->cntxt_id));
2324 } else {
2325 writel(val | INGRESSQID_V(q->bar2_qid),
2326 q->bar2_addr + SGE_UDB_GTS);
2327 wmb();
2328 }
2329 cxgb_poll_unlock_napi(q);
2330 return work_done;
2331 }
2332
2333 /*
2334 * The MSI-X interrupt handler for an SGE response queue.
2335 */
2336 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2337 {
2338 struct sge_rspq *q = cookie;
2339
2340 napi_schedule(&q->napi);
2341 return IRQ_HANDLED;
2342 }
2343
2344 /*
2345 * Process the indirect interrupt entries in the interrupt queue and kick off
2346 * NAPI for each queue that has generated an entry.
2347 */
2348 static unsigned int process_intrq(struct adapter *adap)
2349 {
2350 unsigned int credits;
2351 const struct rsp_ctrl *rc;
2352 struct sge_rspq *q = &adap->sge.intrq;
2353 u32 val;
2354
2355 spin_lock(&adap->sge.intrq_lock);
2356 for (credits = 0; ; credits++) {
2357 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2358 if (!is_new_response(rc, q))
2359 break;
2360
2361 dma_rmb();
2362 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
2363 unsigned int qid = ntohl(rc->pldbuflen_qid);
2364
2365 qid -= adap->sge.ingr_start;
2366 napi_schedule(&adap->sge.ingr_map[qid]->napi);
2367 }
2368
2369 rspq_next(q);
2370 }
2371
2372 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2373
2374 /* If we don't have access to the new User GTS (T5+), use the old
2375 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2376 */
2377 if (unlikely(q->bar2_addr == NULL)) {
2378 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2379 val | INGRESSQID_V(q->cntxt_id));
2380 } else {
2381 writel(val | INGRESSQID_V(q->bar2_qid),
2382 q->bar2_addr + SGE_UDB_GTS);
2383 wmb();
2384 }
2385 spin_unlock(&adap->sge.intrq_lock);
2386 return credits;
2387 }
2388
2389 /*
2390 * The MSI interrupt handler, which handles data events from SGE response queues
2391 * as well as error and other async events as they all use the same MSI vector.
2392 */
2393 static irqreturn_t t4_intr_msi(int irq, void *cookie)
2394 {
2395 struct adapter *adap = cookie;
2396
2397 if (adap->flags & MASTER_PF)
2398 t4_slow_intr_handler(adap);
2399 process_intrq(adap);
2400 return IRQ_HANDLED;
2401 }
2402
2403 /*
2404 * Interrupt handler for legacy INTx interrupts.
2405 * Handles data events from SGE response queues as well as error and other
2406 * async events as they all use the same interrupt line.
2407 */
2408 static irqreturn_t t4_intr_intx(int irq, void *cookie)
2409 {
2410 struct adapter *adap = cookie;
2411
2412 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2413 if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
2414 process_intrq(adap))
2415 return IRQ_HANDLED;
2416 return IRQ_NONE; /* probably shared interrupt */
2417 }
2418
2419 /**
2420 * t4_intr_handler - select the top-level interrupt handler
2421 * @adap: the adapter
2422 *
2423 * Selects the top-level interrupt handler based on the type of interrupts
2424 * (MSI-X, MSI, or INTx).
2425 */
2426 irq_handler_t t4_intr_handler(struct adapter *adap)
2427 {
2428 if (adap->flags & USING_MSIX)
2429 return t4_sge_intr_msix;
2430 if (adap->flags & USING_MSI)
2431 return t4_intr_msi;
2432 return t4_intr_intx;
2433 }
2434
2435 static void sge_rx_timer_cb(unsigned long data)
2436 {
2437 unsigned long m;
2438 unsigned int i;
2439 struct adapter *adap = (struct adapter *)data;
2440 struct sge *s = &adap->sge;
2441
2442 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2443 for (m = s->starving_fl[i]; m; m &= m - 1) {
2444 struct sge_eth_rxq *rxq;
2445 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2446 struct sge_fl *fl = s->egr_map[id];
2447
2448 clear_bit(id, s->starving_fl);
2449 smp_mb__after_atomic();
2450
2451 if (fl_starving(adap, fl)) {
2452 rxq = container_of(fl, struct sge_eth_rxq, fl);
2453 if (napi_reschedule(&rxq->rspq.napi))
2454 fl->starving++;
2455 else
2456 set_bit(id, s->starving_fl);
2457 }
2458 }
2459 /* The remainder of the SGE RX Timer Callback routine is dedicated to
2460 * global Master PF activities like checking for chip ingress stalls,
2461 * etc.
2462 */
2463 if (!(adap->flags & MASTER_PF))
2464 goto done;
2465
2466 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
2467
2468 done:
2469 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2470 }
2471
2472 static void sge_tx_timer_cb(unsigned long data)
2473 {
2474 unsigned long m;
2475 unsigned int i, budget;
2476 struct adapter *adap = (struct adapter *)data;
2477 struct sge *s = &adap->sge;
2478
2479 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2480 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2481 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2482 struct sge_ofld_txq *txq = s->egr_map[id];
2483
2484 clear_bit(id, s->txq_maperr);
2485 tasklet_schedule(&txq->qresume_tsk);
2486 }
2487
2488 budget = MAX_TIMER_TX_RECLAIM;
2489 i = s->ethtxq_rover;
2490 do {
2491 struct sge_eth_txq *q = &s->ethtxq[i];
2492
2493 if (q->q.in_use &&
2494 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2495 __netif_tx_trylock(q->txq)) {
2496 int avail = reclaimable(&q->q);
2497
2498 if (avail) {
2499 if (avail > budget)
2500 avail = budget;
2501
2502 free_tx_desc(adap, &q->q, avail, true);
2503 q->q.in_use -= avail;
2504 budget -= avail;
2505 }
2506 __netif_tx_unlock(q->txq);
2507 }
2508
2509 if (++i >= s->ethqsets)
2510 i = 0;
2511 } while (budget && i != s->ethtxq_rover);
2512 s->ethtxq_rover = i;
2513 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2514 }
2515
2516 /**
2517 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2518 * @adapter: the adapter
2519 * @qid: the SGE Queue ID
2520 * @qtype: the SGE Queue Type (Egress or Ingress)
2521 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2522 *
2523 * Returns the BAR2 address for the SGE Queue Registers associated with
2524 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2525 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2526 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2527 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2528 */
2529 static void __iomem *bar2_address(struct adapter *adapter,
2530 unsigned int qid,
2531 enum t4_bar2_qtype qtype,
2532 unsigned int *pbar2_qid)
2533 {
2534 u64 bar2_qoffset;
2535 int ret;
2536
2537 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
2538 &bar2_qoffset, pbar2_qid);
2539 if (ret)
2540 return NULL;
2541
2542 return adapter->bar2 + bar2_qoffset;
2543 }
2544
2545 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
2546 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
2547 */
2548 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2549 struct net_device *dev, int intr_idx,
2550 struct sge_fl *fl, rspq_handler_t hnd,
2551 rspq_flush_handler_t flush_hnd, int cong)
2552 {
2553 int ret, flsz = 0;
2554 struct fw_iq_cmd c;
2555 struct sge *s = &adap->sge;
2556 struct port_info *pi = netdev_priv(dev);
2557
2558 /* Size needs to be multiple of 16, including status entry. */
2559 iq->size = roundup(iq->size, 16);
2560
2561 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2562 &iq->phys_addr, NULL, 0,
2563 dev_to_node(adap->pdev_dev));
2564 if (!iq->desc)
2565 return -ENOMEM;
2566
2567 memset(&c, 0, sizeof(c));
2568 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2569 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2570 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
2571 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2572 FW_LEN16(c));
2573 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2574 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2575 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
2576 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
2577 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2578 -intr_idx - 1));
2579 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
2580 FW_IQ_CMD_IQGTSMODE_F |
2581 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
2582 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2583 c.iqsize = htons(iq->size);
2584 c.iqaddr = cpu_to_be64(iq->phys_addr);
2585 if (cong >= 0)
2586 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
2587
2588 if (fl) {
2589 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
2590
2591 /* Allocate the ring for the hardware free list (with space
2592 * for its status page) along with the associated software
2593 * descriptor ring. The free list size needs to be a multiple
2594 * of the Egress Queue Unit and at least 2 Egress Units larger
2595 * than the SGE's Egress Congrestion Threshold
2596 * (fl_starve_thres - 1).
2597 */
2598 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
2599 fl->size = s->fl_starve_thres - 1 + 2 * 8;
2600 fl->size = roundup(fl->size, 8);
2601 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2602 sizeof(struct rx_sw_desc), &fl->addr,
2603 &fl->sdesc, s->stat_len,
2604 dev_to_node(adap->pdev_dev));
2605 if (!fl->desc)
2606 goto fl_nomem;
2607
2608 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2609 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
2610 FW_IQ_CMD_FL0FETCHRO_F |
2611 FW_IQ_CMD_FL0DATARO_F |
2612 FW_IQ_CMD_FL0PADEN_F);
2613 if (cong >= 0)
2614 c.iqns_to_fl0congen |=
2615 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
2616 FW_IQ_CMD_FL0CONGCIF_F |
2617 FW_IQ_CMD_FL0CONGEN_F);
2618 /* In T6, for egress queue type FL there is internal overhead
2619 * of 16B for header going into FLM module. Hence the maximum
2620 * allowed burst size is 448 bytes. For T4/T5, the hardware
2621 * doesn't coalesce fetch requests if more than 64 bytes of
2622 * Free List pointers are provided, so we use a 128-byte Fetch
2623 * Burst Minimum there (T6 implements coalescing so we can use
2624 * the smaller 64-byte value there).
2625 */
2626 c.fl0dcaen_to_fl0cidxfthresh =
2627 htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
2628 FETCHBURSTMIN_128B_X :
2629 FETCHBURSTMIN_64B_X) |
2630 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
2631 FETCHBURSTMAX_512B_X :
2632 FETCHBURSTMAX_256B_X));
2633 c.fl0size = htons(flsz);
2634 c.fl0addr = cpu_to_be64(fl->addr);
2635 }
2636
2637 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2638 if (ret)
2639 goto err;
2640
2641 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2642 iq->cur_desc = iq->desc;
2643 iq->cidx = 0;
2644 iq->gen = 1;
2645 iq->next_intr_params = iq->intr_params;
2646 iq->cntxt_id = ntohs(c.iqid);
2647 iq->abs_id = ntohs(c.physiqid);
2648 iq->bar2_addr = bar2_address(adap,
2649 iq->cntxt_id,
2650 T4_BAR2_QTYPE_INGRESS,
2651 &iq->bar2_qid);
2652 iq->size--; /* subtract status entry */
2653 iq->netdev = dev;
2654 iq->handler = hnd;
2655 iq->flush_handler = flush_hnd;
2656
2657 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
2658 skb_queue_head_init(&iq->lro_mgr.lroq);
2659
2660 /* set offset to -1 to distinguish ingress queues without FL */
2661 iq->offset = fl ? 0 : -1;
2662
2663 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2664
2665 if (fl) {
2666 fl->cntxt_id = ntohs(c.fl0id);
2667 fl->avail = fl->pend_cred = 0;
2668 fl->pidx = fl->cidx = 0;
2669 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2670 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2671
2672 /* Note, we must initialize the BAR2 Free List User Doorbell
2673 * information before refilling the Free List!
2674 */
2675 fl->bar2_addr = bar2_address(adap,
2676 fl->cntxt_id,
2677 T4_BAR2_QTYPE_EGRESS,
2678 &fl->bar2_qid);
2679 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2680 }
2681
2682 /* For T5 and later we attempt to set up the Congestion Manager values
2683 * of the new RX Ethernet Queue. This should really be handled by
2684 * firmware because it's more complex than any host driver wants to
2685 * get involved with and it's different per chip and this is almost
2686 * certainly wrong. Firmware would be wrong as well, but it would be
2687 * a lot easier to fix in one place ... For now we do something very
2688 * simple (and hopefully less wrong).
2689 */
2690 if (!is_t4(adap->params.chip) && cong >= 0) {
2691 u32 param, val, ch_map = 0;
2692 int i;
2693 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
2694
2695 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2696 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
2697 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
2698 if (cong == 0) {
2699 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
2700 } else {
2701 val =
2702 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
2703 for (i = 0; i < 4; i++) {
2704 if (cong & (1 << i))
2705 ch_map |= 1 << (i << cng_ch_bits_log);
2706 }
2707 val |= CONMCTXT_CNGCHMAP_V(ch_map);
2708 }
2709 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
2710 &param, &val);
2711 if (ret)
2712 dev_warn(adap->pdev_dev, "Failed to set Congestion"
2713 " Manager Context for Ingress Queue %d: %d\n",
2714 iq->cntxt_id, -ret);
2715 }
2716
2717 return 0;
2718
2719 fl_nomem:
2720 ret = -ENOMEM;
2721 err:
2722 if (iq->desc) {
2723 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2724 iq->desc, iq->phys_addr);
2725 iq->desc = NULL;
2726 }
2727 if (fl && fl->desc) {
2728 kfree(fl->sdesc);
2729 fl->sdesc = NULL;
2730 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2731 fl->desc, fl->addr);
2732 fl->desc = NULL;
2733 }
2734 return ret;
2735 }
2736
2737 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2738 {
2739 q->cntxt_id = id;
2740 q->bar2_addr = bar2_address(adap,
2741 q->cntxt_id,
2742 T4_BAR2_QTYPE_EGRESS,
2743 &q->bar2_qid);
2744 q->in_use = 0;
2745 q->cidx = q->pidx = 0;
2746 q->stops = q->restarts = 0;
2747 q->stat = (void *)&q->desc[q->size];
2748 spin_lock_init(&q->db_lock);
2749 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2750 }
2751
2752 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2753 struct net_device *dev, struct netdev_queue *netdevq,
2754 unsigned int iqid)
2755 {
2756 int ret, nentries;
2757 struct fw_eq_eth_cmd c;
2758 struct sge *s = &adap->sge;
2759 struct port_info *pi = netdev_priv(dev);
2760
2761 /* Add status entries */
2762 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2763
2764 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2765 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2766 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2767 netdev_queue_numa_node_read(netdevq));
2768 if (!txq->q.desc)
2769 return -ENOMEM;
2770
2771 memset(&c, 0, sizeof(c));
2772 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2773 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2774 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
2775 FW_EQ_ETH_CMD_VFN_V(0));
2776 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
2777 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2778 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2779 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2780 c.fetchszm_to_iqid =
2781 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2782 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2783 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
2784 c.dcaen_to_eqsize =
2785 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2786 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2787 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2788 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2789 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2790
2791 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2792 if (ret) {
2793 kfree(txq->q.sdesc);
2794 txq->q.sdesc = NULL;
2795 dma_free_coherent(adap->pdev_dev,
2796 nentries * sizeof(struct tx_desc),
2797 txq->q.desc, txq->q.phys_addr);
2798 txq->q.desc = NULL;
2799 return ret;
2800 }
2801
2802 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2803 txq->txq = netdevq;
2804 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2805 txq->mapping_err = 0;
2806 return 0;
2807 }
2808
2809 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2810 struct net_device *dev, unsigned int iqid,
2811 unsigned int cmplqid)
2812 {
2813 int ret, nentries;
2814 struct fw_eq_ctrl_cmd c;
2815 struct sge *s = &adap->sge;
2816 struct port_info *pi = netdev_priv(dev);
2817
2818 /* Add status entries */
2819 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2820
2821 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2822 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2823 NULL, 0, dev_to_node(adap->pdev_dev));
2824 if (!txq->q.desc)
2825 return -ENOMEM;
2826
2827 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2828 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2829 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
2830 FW_EQ_CTRL_CMD_VFN_V(0));
2831 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
2832 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
2833 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
2834 c.physeqid_pkd = htonl(0);
2835 c.fetchszm_to_iqid =
2836 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2837 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
2838 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
2839 c.dcaen_to_eqsize =
2840 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2841 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2842 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2843 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
2844 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2845
2846 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2847 if (ret) {
2848 dma_free_coherent(adap->pdev_dev,
2849 nentries * sizeof(struct tx_desc),
2850 txq->q.desc, txq->q.phys_addr);
2851 txq->q.desc = NULL;
2852 return ret;
2853 }
2854
2855 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
2856 txq->adap = adap;
2857 skb_queue_head_init(&txq->sendq);
2858 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2859 txq->full = 0;
2860 return 0;
2861 }
2862
2863 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2864 struct net_device *dev, unsigned int iqid)
2865 {
2866 int ret, nentries;
2867 struct fw_eq_ofld_cmd c;
2868 struct sge *s = &adap->sge;
2869 struct port_info *pi = netdev_priv(dev);
2870
2871 /* Add status entries */
2872 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2873
2874 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2875 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2876 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2877 NUMA_NO_NODE);
2878 if (!txq->q.desc)
2879 return -ENOMEM;
2880
2881 memset(&c, 0, sizeof(c));
2882 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
2883 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2884 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
2885 FW_EQ_OFLD_CMD_VFN_V(0));
2886 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
2887 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
2888 c.fetchszm_to_iqid =
2889 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2890 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
2891 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
2892 c.dcaen_to_eqsize =
2893 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2894 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2895 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2896 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
2897 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2898
2899 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2900 if (ret) {
2901 kfree(txq->q.sdesc);
2902 txq->q.sdesc = NULL;
2903 dma_free_coherent(adap->pdev_dev,
2904 nentries * sizeof(struct tx_desc),
2905 txq->q.desc, txq->q.phys_addr);
2906 txq->q.desc = NULL;
2907 return ret;
2908 }
2909
2910 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
2911 txq->adap = adap;
2912 skb_queue_head_init(&txq->sendq);
2913 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2914 txq->full = 0;
2915 txq->mapping_err = 0;
2916 return 0;
2917 }
2918
2919 static void free_txq(struct adapter *adap, struct sge_txq *q)
2920 {
2921 struct sge *s = &adap->sge;
2922
2923 dma_free_coherent(adap->pdev_dev,
2924 q->size * sizeof(struct tx_desc) + s->stat_len,
2925 q->desc, q->phys_addr);
2926 q->cntxt_id = 0;
2927 q->sdesc = NULL;
2928 q->desc = NULL;
2929 }
2930
2931 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2932 struct sge_fl *fl)
2933 {
2934 struct sge *s = &adap->sge;
2935 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2936
2937 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2938 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
2939 rq->cntxt_id, fl_id, 0xffff);
2940 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2941 rq->desc, rq->phys_addr);
2942 napi_hash_del(&rq->napi);
2943 netif_napi_del(&rq->napi);
2944 rq->netdev = NULL;
2945 rq->cntxt_id = rq->abs_id = 0;
2946 rq->desc = NULL;
2947
2948 if (fl) {
2949 free_rx_bufs(adap, fl, fl->avail);
2950 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2951 fl->desc, fl->addr);
2952 kfree(fl->sdesc);
2953 fl->sdesc = NULL;
2954 fl->cntxt_id = 0;
2955 fl->desc = NULL;
2956 }
2957 }
2958
2959 /**
2960 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
2961 * @adap: the adapter
2962 * @n: number of queues
2963 * @q: pointer to first queue
2964 *
2965 * Release the resources of a consecutive block of offload Rx queues.
2966 */
2967 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2968 {
2969 for ( ; n; n--, q++)
2970 if (q->rspq.desc)
2971 free_rspq_fl(adap, &q->rspq,
2972 q->fl.size ? &q->fl : NULL);
2973 }
2974
2975 /**
2976 * t4_free_sge_resources - free SGE resources
2977 * @adap: the adapter
2978 *
2979 * Frees resources used by the SGE queue sets.
2980 */
2981 void t4_free_sge_resources(struct adapter *adap)
2982 {
2983 int i;
2984 struct sge_eth_rxq *eq = adap->sge.ethrxq;
2985 struct sge_eth_txq *etq = adap->sge.ethtxq;
2986
2987 /* clean up Ethernet Tx/Rx queues */
2988 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2989 if (eq->rspq.desc)
2990 free_rspq_fl(adap, &eq->rspq,
2991 eq->fl.size ? &eq->fl : NULL);
2992 if (etq->q.desc) {
2993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
2994 etq->q.cntxt_id);
2995 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2996 kfree(etq->q.sdesc);
2997 free_txq(adap, &etq->q);
2998 }
2999 }
3000
3001 /* clean up RDMA and iSCSI Rx queues */
3002 t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
3003 t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
3004 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
3005 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
3006
3007 /* clean up offload Tx queues */
3008 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
3009 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
3010
3011 if (q->q.desc) {
3012 tasklet_kill(&q->qresume_tsk);
3013 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
3014 q->q.cntxt_id);
3015 free_tx_desc(adap, &q->q, q->q.in_use, false);
3016 kfree(q->q.sdesc);
3017 __skb_queue_purge(&q->sendq);
3018 free_txq(adap, &q->q);
3019 }
3020 }
3021
3022 /* clean up control Tx queues */
3023 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
3024 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
3025
3026 if (cq->q.desc) {
3027 tasklet_kill(&cq->qresume_tsk);
3028 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
3029 cq->q.cntxt_id);
3030 __skb_queue_purge(&cq->sendq);
3031 free_txq(adap, &cq->q);
3032 }
3033 }
3034
3035 if (adap->sge.fw_evtq.desc)
3036 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
3037
3038 if (adap->sge.intrq.desc)
3039 free_rspq_fl(adap, &adap->sge.intrq, NULL);
3040
3041 /* clear the reverse egress queue map */
3042 memset(adap->sge.egr_map, 0,
3043 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
3044 }
3045
3046 void t4_sge_start(struct adapter *adap)
3047 {
3048 adap->sge.ethtxq_rover = 0;
3049 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
3050 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
3051 }
3052
3053 /**
3054 * t4_sge_stop - disable SGE operation
3055 * @adap: the adapter
3056 *
3057 * Stop tasklets and timers associated with the DMA engine. Note that
3058 * this is effective only if measures have been taken to disable any HW
3059 * events that may restart them.
3060 */
3061 void t4_sge_stop(struct adapter *adap)
3062 {
3063 int i;
3064 struct sge *s = &adap->sge;
3065
3066 if (in_interrupt()) /* actions below require waiting */
3067 return;
3068
3069 if (s->rx_timer.function)
3070 del_timer_sync(&s->rx_timer);
3071 if (s->tx_timer.function)
3072 del_timer_sync(&s->tx_timer);
3073
3074 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
3075 struct sge_ofld_txq *q = &s->ofldtxq[i];
3076
3077 if (q->q.desc)
3078 tasklet_kill(&q->qresume_tsk);
3079 }
3080 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
3081 struct sge_ctrl_txq *cq = &s->ctrlq[i];
3082
3083 if (cq->q.desc)
3084 tasklet_kill(&cq->qresume_tsk);
3085 }
3086 }
3087
3088 /**
3089 * t4_sge_init_soft - grab core SGE values needed by SGE code
3090 * @adap: the adapter
3091 *
3092 * We need to grab the SGE operating parameters that we need to have
3093 * in order to do our job and make sure we can live with them.
3094 */
3095
3096 static int t4_sge_init_soft(struct adapter *adap)
3097 {
3098 struct sge *s = &adap->sge;
3099 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
3100 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
3101 u32 ingress_rx_threshold;
3102
3103 /*
3104 * Verify that CPL messages are going to the Ingress Queue for
3105 * process_responses() and that only packet data is going to the
3106 * Free Lists.
3107 */
3108 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
3109 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
3110 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
3111 return -EINVAL;
3112 }
3113
3114 /*
3115 * Validate the Host Buffer Register Array indices that we want to
3116 * use ...
3117 *
3118 * XXX Note that we should really read through the Host Buffer Size
3119 * XXX register array and find the indices of the Buffer Sizes which
3120 * XXX meet our needs!
3121 */
3122 #define READ_FL_BUF(x) \
3123 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
3124
3125 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
3126 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
3127 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
3128 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
3129
3130 /* We only bother using the Large Page logic if the Large Page Buffer
3131 * is larger than our Page Size Buffer.
3132 */
3133 if (fl_large_pg <= fl_small_pg)
3134 fl_large_pg = 0;
3135
3136 #undef READ_FL_BUF
3137
3138 /* The Page Size Buffer must be exactly equal to our Page Size and the
3139 * Large Page Size Buffer should be 0 (per above) or a power of 2.
3140 */
3141 if (fl_small_pg != PAGE_SIZE ||
3142 (fl_large_pg & (fl_large_pg-1)) != 0) {
3143 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
3144 fl_small_pg, fl_large_pg);
3145 return -EINVAL;
3146 }
3147 if (fl_large_pg)
3148 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
3149
3150 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
3151 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
3152 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
3153 fl_small_mtu, fl_large_mtu);
3154 return -EINVAL;
3155 }
3156
3157 /*
3158 * Retrieve our RX interrupt holdoff timer values and counter
3159 * threshold values from the SGE parameters.
3160 */
3161 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
3162 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
3163 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
3164 s->timer_val[0] = core_ticks_to_us(adap,
3165 TIMERVALUE0_G(timer_value_0_and_1));
3166 s->timer_val[1] = core_ticks_to_us(adap,
3167 TIMERVALUE1_G(timer_value_0_and_1));
3168 s->timer_val[2] = core_ticks_to_us(adap,
3169 TIMERVALUE2_G(timer_value_2_and_3));
3170 s->timer_val[3] = core_ticks_to_us(adap,
3171 TIMERVALUE3_G(timer_value_2_and_3));
3172 s->timer_val[4] = core_ticks_to_us(adap,
3173 TIMERVALUE4_G(timer_value_4_and_5));
3174 s->timer_val[5] = core_ticks_to_us(adap,
3175 TIMERVALUE5_G(timer_value_4_and_5));
3176
3177 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
3178 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
3179 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
3180 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
3181 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
3182
3183 return 0;
3184 }
3185
3186 /**
3187 * t4_sge_init - initialize SGE
3188 * @adap: the adapter
3189 *
3190 * Perform low-level SGE code initialization needed every time after a
3191 * chip reset.
3192 */
3193 int t4_sge_init(struct adapter *adap)
3194 {
3195 struct sge *s = &adap->sge;
3196 u32 sge_control, sge_conm_ctrl;
3197 int ret, egress_threshold;
3198
3199 /*
3200 * Ingress Padding Boundary and Egress Status Page Size are set up by
3201 * t4_fixup_host_params().
3202 */
3203 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
3204 s->pktshift = PKTSHIFT_G(sge_control);
3205 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
3206
3207 s->fl_align = t4_fl_pkt_align(adap);
3208 ret = t4_sge_init_soft(adap);
3209 if (ret < 0)
3210 return ret;
3211
3212 /*
3213 * A FL with <= fl_starve_thres buffers is starving and a periodic
3214 * timer will attempt to refill it. This needs to be larger than the
3215 * SGE's Egress Congestion Threshold. If it isn't, then we can get
3216 * stuck waiting for new packets while the SGE is waiting for us to
3217 * give it more Free List entries. (Note that the SGE's Egress
3218 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
3219 * there was only a single field to control this. For T5 there's the
3220 * original field which now only applies to Unpacked Mode Free List
3221 * buffers and a new field which only applies to Packed Mode Free List
3222 * buffers.
3223 */
3224 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
3225 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
3226 case CHELSIO_T4:
3227 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
3228 break;
3229 case CHELSIO_T5:
3230 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3231 break;
3232 case CHELSIO_T6:
3233 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3234 break;
3235 default:
3236 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
3237 CHELSIO_CHIP_VERSION(adap->params.chip));
3238 return -EINVAL;
3239 }
3240 s->fl_starve_thres = 2*egress_threshold + 1;
3241
3242 t4_idma_monitor_init(adap, &s->idma_monitor);
3243
3244 /* Set up timers used for recuring callbacks to process RX and TX
3245 * administrative tasks.
3246 */
3247 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
3248 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
3249
3250 spin_lock_init(&s->intrq_lock);
3251
3252 return 0;
3253 }
This page took 0.103112 seconds and 5 git commands to generate.