cxgb3 - remove SW Tx credits coalescing
[deliverable/linux.git] / drivers / net / cxgb3 / sge.c
CommitLineData
4d22de3e
DLR
1/*
2 * This file is part of the Chelsio T3 Ethernet driver.
3 *
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 */
11
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_vlan.h>
16#include <linux/ip.h>
17#include <linux/tcp.h>
18#include <linux/dma-mapping.h>
19#include "common.h"
20#include "regs.h"
21#include "sge_defs.h"
22#include "t3_cpl.h"
23#include "firmware_exports.h"
24
25#define USE_GTS 0
26
27#define SGE_RX_SM_BUF_SIZE 1536
28#define SGE_RX_COPY_THRES 256
29
30# define SGE_RX_DROP_THRES 16
31
32/*
33 * Period of the Tx buffer reclaim timer. This timer does not need to run
34 * frequently as Tx buffers are usually reclaimed by new Tx packets.
35 */
36#define TX_RECLAIM_PERIOD (HZ / 4)
37
38/* WR size in bytes */
39#define WR_LEN (WR_FLITS * 8)
40
41/*
42 * Types of Tx queues in each queue set. Order here matters, do not change.
43 */
44enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
45
46/* Values for sge_txq.flags */
47enum {
48 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
49 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
50};
51
52struct tx_desc {
53 u64 flit[TX_DESC_FLITS];
54};
55
56struct rx_desc {
57 __be32 addr_lo;
58 __be32 len_gen;
59 __be32 gen2;
60 __be32 addr_hi;
61};
62
63struct tx_sw_desc { /* SW state per Tx descriptor */
64 struct sk_buff *skb;
65};
66
67struct rx_sw_desc { /* SW state per Rx descriptor */
68 struct sk_buff *skb;
69 DECLARE_PCI_UNMAP_ADDR(dma_addr);
70};
71
72struct rsp_desc { /* response queue descriptor */
73 struct rss_header rss_hdr;
74 __be32 flags;
75 __be32 len_cq;
76 u8 imm_data[47];
77 u8 intr_gen;
78};
79
80struct unmap_info { /* packet unmapping info, overlays skb->cb */
81 int sflit; /* start flit of first SGL entry in Tx descriptor */
82 u16 fragidx; /* first page fragment in current Tx descriptor */
83 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
84 u32 len; /* mapped length of skb main body */
85};
86
87/*
88 * Maps a number of flits to the number of Tx descriptors that can hold them.
89 * The formula is
90 *
91 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
92 *
93 * HW allows up to 4 descriptors to be combined into a WR.
94 */
95static u8 flit_desc_map[] = {
96 0,
97#if SGE_NUM_GENBITS == 1
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
102#elif SGE_NUM_GENBITS == 2
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
106 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
107#else
108# error "SGE_NUM_GENBITS must be 1 or 2"
109#endif
110};
111
112static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
113{
114 return container_of(q, struct sge_qset, fl[qidx]);
115}
116
117static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
118{
119 return container_of(q, struct sge_qset, rspq);
120}
121
122static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
123{
124 return container_of(q, struct sge_qset, txq[qidx]);
125}
126
127/**
128 * refill_rspq - replenish an SGE response queue
129 * @adapter: the adapter
130 * @q: the response queue to replenish
131 * @credits: how many new responses to make available
132 *
133 * Replenishes a response queue by making the supplied number of responses
134 * available to HW.
135 */
136static inline void refill_rspq(struct adapter *adapter,
137 const struct sge_rspq *q, unsigned int credits)
138{
139 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
140 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
141}
142
143/**
144 * need_skb_unmap - does the platform need unmapping of sk_buffs?
145 *
146 * Returns true if the platfrom needs sk_buff unmapping. The compiler
147 * optimizes away unecessary code if this returns true.
148 */
149static inline int need_skb_unmap(void)
150{
151 /*
152 * This structure is used to tell if the platfrom needs buffer
153 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
154 */
155 struct dummy {
156 DECLARE_PCI_UNMAP_ADDR(addr);
157 };
158
159 return sizeof(struct dummy) != 0;
160}
161
162/**
163 * unmap_skb - unmap a packet main body and its page fragments
164 * @skb: the packet
165 * @q: the Tx queue containing Tx descriptors for the packet
166 * @cidx: index of Tx descriptor
167 * @pdev: the PCI device
168 *
169 * Unmap the main body of an sk_buff and its page fragments, if any.
170 * Because of the fairly complicated structure of our SGLs and the desire
171 * to conserve space for metadata, we keep the information necessary to
172 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
173 * in the Tx descriptors (the physical addresses of the various data
174 * buffers). The send functions initialize the state in skb->cb so we
175 * can unmap the buffers held in the first Tx descriptor here, and we
176 * have enough information at this point to update the state for the next
177 * Tx descriptor.
178 */
179static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
180 unsigned int cidx, struct pci_dev *pdev)
181{
182 const struct sg_ent *sgp;
183 struct unmap_info *ui = (struct unmap_info *)skb->cb;
184 int nfrags, frag_idx, curflit, j = ui->addr_idx;
185
186 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
187
188 if (ui->len) {
189 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
190 PCI_DMA_TODEVICE);
191 ui->len = 0; /* so we know for next descriptor for this skb */
192 j = 1;
193 }
194
195 frag_idx = ui->fragidx;
196 curflit = ui->sflit + 1 + j;
197 nfrags = skb_shinfo(skb)->nr_frags;
198
199 while (frag_idx < nfrags && curflit < WR_FLITS) {
200 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
201 skb_shinfo(skb)->frags[frag_idx].size,
202 PCI_DMA_TODEVICE);
203 j ^= 1;
204 if (j == 0) {
205 sgp++;
206 curflit++;
207 }
208 curflit++;
209 frag_idx++;
210 }
211
212 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
213 ui->fragidx = frag_idx;
214 ui->addr_idx = j;
215 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
216 }
217}
218
219/**
220 * free_tx_desc - reclaims Tx descriptors and their buffers
221 * @adapter: the adapter
222 * @q: the Tx queue to reclaim descriptors from
223 * @n: the number of descriptors to reclaim
224 *
225 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
226 * Tx buffers. Called with the Tx queue lock held.
227 */
228static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
229 unsigned int n)
230{
231 struct tx_sw_desc *d;
232 struct pci_dev *pdev = adapter->pdev;
233 unsigned int cidx = q->cidx;
234
235 d = &q->sdesc[cidx];
236 while (n--) {
237 if (d->skb) { /* an SGL is present */
238 if (need_skb_unmap())
239 unmap_skb(d->skb, q, cidx, pdev);
240 if (d->skb->priority == cidx)
241 kfree_skb(d->skb);
242 }
243 ++d;
244 if (++cidx == q->size) {
245 cidx = 0;
246 d = q->sdesc;
247 }
248 }
249 q->cidx = cidx;
250}
251
252/**
253 * reclaim_completed_tx - reclaims completed Tx descriptors
254 * @adapter: the adapter
255 * @q: the Tx queue to reclaim completed descriptors from
256 *
257 * Reclaims Tx descriptors that the SGE has indicated it has processed,
258 * and frees the associated buffers if possible. Called with the Tx
259 * queue's lock held.
260 */
261static inline void reclaim_completed_tx(struct adapter *adapter,
262 struct sge_txq *q)
263{
264 unsigned int reclaim = q->processed - q->cleaned;
265
266 if (reclaim) {
267 free_tx_desc(adapter, q, reclaim);
268 q->cleaned += reclaim;
269 q->in_use -= reclaim;
270 }
271}
272
273/**
274 * should_restart_tx - are there enough resources to restart a Tx queue?
275 * @q: the Tx queue
276 *
277 * Checks if there are enough descriptors to restart a suspended Tx queue.
278 */
279static inline int should_restart_tx(const struct sge_txq *q)
280{
281 unsigned int r = q->processed - q->cleaned;
282
283 return q->in_use - r < (q->size >> 1);
284}
285
286/**
287 * free_rx_bufs - free the Rx buffers on an SGE free list
288 * @pdev: the PCI device associated with the adapter
289 * @rxq: the SGE free list to clean up
290 *
291 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
292 * this queue should be stopped before calling this function.
293 */
294static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
295{
296 unsigned int cidx = q->cidx;
297
298 while (q->credits--) {
299 struct rx_sw_desc *d = &q->sdesc[cidx];
300
301 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
302 q->buf_size, PCI_DMA_FROMDEVICE);
303 kfree_skb(d->skb);
304 d->skb = NULL;
305 if (++cidx == q->size)
306 cidx = 0;
307 }
308}
309
310/**
311 * add_one_rx_buf - add a packet buffer to a free-buffer list
312 * @skb: the buffer to add
313 * @len: the buffer length
314 * @d: the HW Rx descriptor to write
315 * @sd: the SW Rx descriptor to write
316 * @gen: the generation bit value
317 * @pdev: the PCI device associated with the adapter
318 *
319 * Add a buffer of the given length to the supplied HW and SW Rx
320 * descriptors.
321 */
322static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
323 struct rx_desc *d, struct rx_sw_desc *sd,
324 unsigned int gen, struct pci_dev *pdev)
325{
326 dma_addr_t mapping;
327
328 sd->skb = skb;
329 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
330 pci_unmap_addr_set(sd, dma_addr, mapping);
331
332 d->addr_lo = cpu_to_be32(mapping);
333 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
334 wmb();
335 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
336 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
337}
338
339/**
340 * refill_fl - refill an SGE free-buffer list
341 * @adapter: the adapter
342 * @q: the free-list to refill
343 * @n: the number of new buffers to allocate
344 * @gfp: the gfp flags for allocating new buffers
345 *
346 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
347 * allocated with the supplied gfp flags. The caller must assure that
348 * @n does not exceed the queue's capacity.
349 */
350static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
351{
352 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
353 struct rx_desc *d = &q->desc[q->pidx];
354
355 while (n--) {
356 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
357
358 if (!skb)
359 break;
360
361 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
362 d++;
363 sd++;
364 if (++q->pidx == q->size) {
365 q->pidx = 0;
366 q->gen ^= 1;
367 sd = q->sdesc;
368 d = q->desc;
369 }
370 q->credits++;
371 }
372
373 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
374}
375
376static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
377{
378 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
379}
380
381/**
382 * recycle_rx_buf - recycle a receive buffer
383 * @adapter: the adapter
384 * @q: the SGE free list
385 * @idx: index of buffer to recycle
386 *
387 * Recycles the specified buffer on the given free list by adding it at
388 * the next available slot on the list.
389 */
390static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
391 unsigned int idx)
392{
393 struct rx_desc *from = &q->desc[idx];
394 struct rx_desc *to = &q->desc[q->pidx];
395
396 q->sdesc[q->pidx] = q->sdesc[idx];
397 to->addr_lo = from->addr_lo; /* already big endian */
398 to->addr_hi = from->addr_hi; /* likewise */
399 wmb();
400 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
401 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
402 q->credits++;
403
404 if (++q->pidx == q->size) {
405 q->pidx = 0;
406 q->gen ^= 1;
407 }
408 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
409}
410
411/**
412 * alloc_ring - allocate resources for an SGE descriptor ring
413 * @pdev: the PCI device
414 * @nelem: the number of descriptors
415 * @elem_size: the size of each descriptor
416 * @sw_size: the size of the SW state associated with each ring element
417 * @phys: the physical address of the allocated ring
418 * @metadata: address of the array holding the SW state for the ring
419 *
420 * Allocates resources for an SGE descriptor ring, such as Tx queues,
421 * free buffer lists, or response queues. Each SGE ring requires
422 * space for its HW descriptors plus, optionally, space for the SW state
423 * associated with each HW entry (the metadata). The function returns
424 * three values: the virtual address for the HW ring (the return value
425 * of the function), the physical address of the HW ring, and the address
426 * of the SW ring.
427 */
428static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
429 size_t sw_size, dma_addr_t *phys, void *metadata)
430{
431 size_t len = nelem * elem_size;
432 void *s = NULL;
433 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
434
435 if (!p)
436 return NULL;
437 if (sw_size) {
438 s = kcalloc(nelem, sw_size, GFP_KERNEL);
439
440 if (!s) {
441 dma_free_coherent(&pdev->dev, len, p, *phys);
442 return NULL;
443 }
444 }
445 if (metadata)
446 *(void **)metadata = s;
447 memset(p, 0, len);
448 return p;
449}
450
451/**
452 * free_qset - free the resources of an SGE queue set
453 * @adapter: the adapter owning the queue set
454 * @q: the queue set
455 *
456 * Release the HW and SW resources associated with an SGE queue set, such
457 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
458 * queue set must be quiesced prior to calling this.
459 */
460void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
461{
462 int i;
463 struct pci_dev *pdev = adapter->pdev;
464
465 if (q->tx_reclaim_timer.function)
466 del_timer_sync(&q->tx_reclaim_timer);
467
468 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
469 if (q->fl[i].desc) {
470 spin_lock(&adapter->sge.reg_lock);
471 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
472 spin_unlock(&adapter->sge.reg_lock);
473 free_rx_bufs(pdev, &q->fl[i]);
474 kfree(q->fl[i].sdesc);
475 dma_free_coherent(&pdev->dev,
476 q->fl[i].size *
477 sizeof(struct rx_desc), q->fl[i].desc,
478 q->fl[i].phys_addr);
479 }
480
481 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
482 if (q->txq[i].desc) {
483 spin_lock(&adapter->sge.reg_lock);
484 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
485 spin_unlock(&adapter->sge.reg_lock);
486 if (q->txq[i].sdesc) {
487 free_tx_desc(adapter, &q->txq[i],
488 q->txq[i].in_use);
489 kfree(q->txq[i].sdesc);
490 }
491 dma_free_coherent(&pdev->dev,
492 q->txq[i].size *
493 sizeof(struct tx_desc),
494 q->txq[i].desc, q->txq[i].phys_addr);
495 __skb_queue_purge(&q->txq[i].sendq);
496 }
497
498 if (q->rspq.desc) {
499 spin_lock(&adapter->sge.reg_lock);
500 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
501 spin_unlock(&adapter->sge.reg_lock);
502 dma_free_coherent(&pdev->dev,
503 q->rspq.size * sizeof(struct rsp_desc),
504 q->rspq.desc, q->rspq.phys_addr);
505 }
506
507 if (q->netdev)
508 q->netdev->atalk_ptr = NULL;
509
510 memset(q, 0, sizeof(*q));
511}
512
513/**
514 * init_qset_cntxt - initialize an SGE queue set context info
515 * @qs: the queue set
516 * @id: the queue set id
517 *
518 * Initializes the TIDs and context ids for the queues of a queue set.
519 */
520static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
521{
522 qs->rspq.cntxt_id = id;
523 qs->fl[0].cntxt_id = 2 * id;
524 qs->fl[1].cntxt_id = 2 * id + 1;
525 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
526 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
527 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
528 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
529 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
530}
531
532/**
533 * sgl_len - calculates the size of an SGL of the given capacity
534 * @n: the number of SGL entries
535 *
536 * Calculates the number of flits needed for a scatter/gather list that
537 * can hold the given number of entries.
538 */
539static inline unsigned int sgl_len(unsigned int n)
540{
541 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
542 return (3 * n) / 2 + (n & 1);
543}
544
545/**
546 * flits_to_desc - returns the num of Tx descriptors for the given flits
547 * @n: the number of flits
548 *
549 * Calculates the number of Tx descriptors needed for the supplied number
550 * of flits.
551 */
552static inline unsigned int flits_to_desc(unsigned int n)
553{
554 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
555 return flit_desc_map[n];
556}
557
558/**
559 * get_packet - return the next ingress packet buffer from a free list
560 * @adap: the adapter that received the packet
561 * @fl: the SGE free list holding the packet
562 * @len: the packet length including any SGE padding
563 * @drop_thres: # of remaining buffers before we start dropping packets
564 *
565 * Get the next packet from a free list and complete setup of the
566 * sk_buff. If the packet is small we make a copy and recycle the
567 * original buffer, otherwise we use the original buffer itself. If a
568 * positive drop threshold is supplied packets are dropped and their
569 * buffers recycled if (a) the number of remaining buffers is under the
570 * threshold and the packet is too big to copy, or (b) the packet should
571 * be copied but there is no memory for the copy.
572 */
573static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
574 unsigned int len, unsigned int drop_thres)
575{
576 struct sk_buff *skb = NULL;
577 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
578
579 prefetch(sd->skb->data);
580
581 if (len <= SGE_RX_COPY_THRES) {
582 skb = alloc_skb(len, GFP_ATOMIC);
583 if (likely(skb != NULL)) {
584 __skb_put(skb, len);
585 pci_dma_sync_single_for_cpu(adap->pdev,
586 pci_unmap_addr(sd,
587 dma_addr),
588 len, PCI_DMA_FROMDEVICE);
589 memcpy(skb->data, sd->skb->data, len);
590 pci_dma_sync_single_for_device(adap->pdev,
591 pci_unmap_addr(sd,
592 dma_addr),
593 len, PCI_DMA_FROMDEVICE);
594 } else if (!drop_thres)
595 goto use_orig_buf;
596 recycle:
597 recycle_rx_buf(adap, fl, fl->cidx);
598 return skb;
599 }
600
601 if (unlikely(fl->credits < drop_thres))
602 goto recycle;
603
604 use_orig_buf:
605 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
606 fl->buf_size, PCI_DMA_FROMDEVICE);
607 skb = sd->skb;
608 skb_put(skb, len);
609 __refill_fl(adap, fl);
610 return skb;
611}
612
613/**
614 * get_imm_packet - return the next ingress packet buffer from a response
615 * @resp: the response descriptor containing the packet data
616 *
617 * Return a packet containing the immediate data of the given response.
618 */
619static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
620{
621 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
622
623 if (skb) {
624 __skb_put(skb, IMMED_PKT_SIZE);
625 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
626 }
627 return skb;
628}
629
630/**
631 * calc_tx_descs - calculate the number of Tx descriptors for a packet
632 * @skb: the packet
633 *
634 * Returns the number of Tx descriptors needed for the given Ethernet
635 * packet. Ethernet packets require addition of WR and CPL headers.
636 */
637static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
638{
639 unsigned int flits;
640
641 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
642 return 1;
643
644 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
645 if (skb_shinfo(skb)->gso_size)
646 flits++;
647 return flits_to_desc(flits);
648}
649
650/**
651 * make_sgl - populate a scatter/gather list for a packet
652 * @skb: the packet
653 * @sgp: the SGL to populate
654 * @start: start address of skb main body data to include in the SGL
655 * @len: length of skb main body data to include in the SGL
656 * @pdev: the PCI device
657 *
658 * Generates a scatter/gather list for the buffers that make up a packet
659 * and returns the SGL size in 8-byte words. The caller must size the SGL
660 * appropriately.
661 */
662static inline unsigned int make_sgl(const struct sk_buff *skb,
663 struct sg_ent *sgp, unsigned char *start,
664 unsigned int len, struct pci_dev *pdev)
665{
666 dma_addr_t mapping;
667 unsigned int i, j = 0, nfrags;
668
669 if (len) {
670 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
671 sgp->len[0] = cpu_to_be32(len);
672 sgp->addr[0] = cpu_to_be64(mapping);
673 j = 1;
674 }
675
676 nfrags = skb_shinfo(skb)->nr_frags;
677 for (i = 0; i < nfrags; i++) {
678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
679
680 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
681 frag->size, PCI_DMA_TODEVICE);
682 sgp->len[j] = cpu_to_be32(frag->size);
683 sgp->addr[j] = cpu_to_be64(mapping);
684 j ^= 1;
685 if (j == 0)
686 ++sgp;
687 }
688 if (j)
689 sgp->len[j] = 0;
690 return ((nfrags + (len != 0)) * 3) / 2 + j;
691}
692
693/**
694 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
695 * @adap: the adapter
696 * @q: the Tx queue
697 *
698 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
699 * where the HW is going to sleep just after we checked, however,
700 * then the interrupt handler will detect the outstanding TX packet
701 * and ring the doorbell for us.
702 *
703 * When GTS is disabled we unconditionally ring the doorbell.
704 */
705static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
706{
707#if USE_GTS
708 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
709 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
710 set_bit(TXQ_LAST_PKT_DB, &q->flags);
711 t3_write_reg(adap, A_SG_KDOORBELL,
712 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
713 }
714#else
715 wmb(); /* write descriptors before telling HW */
716 t3_write_reg(adap, A_SG_KDOORBELL,
717 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
718#endif
719}
720
721static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
722{
723#if SGE_NUM_GENBITS == 2
724 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
725#endif
726}
727
728/**
729 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
730 * @ndesc: number of Tx descriptors spanned by the SGL
731 * @skb: the packet corresponding to the WR
732 * @d: first Tx descriptor to be written
733 * @pidx: index of above descriptors
734 * @q: the SGE Tx queue
735 * @sgl: the SGL
736 * @flits: number of flits to the start of the SGL in the first descriptor
737 * @sgl_flits: the SGL size in flits
738 * @gen: the Tx descriptor generation
739 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
740 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
741 *
742 * Write a work request header and an associated SGL. If the SGL is
743 * small enough to fit into one Tx descriptor it has already been written
744 * and we just need to write the WR header. Otherwise we distribute the
745 * SGL across the number of descriptors it spans.
746 */
747static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
748 struct tx_desc *d, unsigned int pidx,
749 const struct sge_txq *q,
750 const struct sg_ent *sgl,
751 unsigned int flits, unsigned int sgl_flits,
752 unsigned int gen, unsigned int wr_hi,
753 unsigned int wr_lo)
754{
755 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
756 struct tx_sw_desc *sd = &q->sdesc[pidx];
757
758 sd->skb = skb;
759 if (need_skb_unmap()) {
760 struct unmap_info *ui = (struct unmap_info *)skb->cb;
761
762 ui->fragidx = 0;
763 ui->addr_idx = 0;
764 ui->sflit = flits;
765 }
766
767 if (likely(ndesc == 1)) {
768 skb->priority = pidx;
769 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
770 V_WR_SGLSFLT(flits)) | wr_hi;
771 wmb();
772 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
773 V_WR_GEN(gen)) | wr_lo;
774 wr_gen2(d, gen);
775 } else {
776 unsigned int ogen = gen;
777 const u64 *fp = (const u64 *)sgl;
778 struct work_request_hdr *wp = wrp;
779
780 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
781 V_WR_SGLSFLT(flits)) | wr_hi;
782
783 while (sgl_flits) {
784 unsigned int avail = WR_FLITS - flits;
785
786 if (avail > sgl_flits)
787 avail = sgl_flits;
788 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
789 sgl_flits -= avail;
790 ndesc--;
791 if (!sgl_flits)
792 break;
793
794 fp += avail;
795 d++;
796 sd++;
797 if (++pidx == q->size) {
798 pidx = 0;
799 gen ^= 1;
800 d = q->desc;
801 sd = q->sdesc;
802 }
803
804 sd->skb = skb;
805 wrp = (struct work_request_hdr *)d;
806 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
807 V_WR_SGLSFLT(1)) | wr_hi;
808 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
809 sgl_flits + 1)) |
810 V_WR_GEN(gen)) | wr_lo;
811 wr_gen2(d, gen);
812 flits = 1;
813 }
814 skb->priority = pidx;
815 wrp->wr_hi |= htonl(F_WR_EOP);
816 wmb();
817 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
818 wr_gen2((struct tx_desc *)wp, ogen);
819 WARN_ON(ndesc != 0);
820 }
821}
822
823/**
824 * write_tx_pkt_wr - write a TX_PKT work request
825 * @adap: the adapter
826 * @skb: the packet to send
827 * @pi: the egress interface
828 * @pidx: index of the first Tx descriptor to write
829 * @gen: the generation value to use
830 * @q: the Tx queue
831 * @ndesc: number of descriptors the packet will occupy
832 * @compl: the value of the COMPL bit to use
833 *
834 * Generate a TX_PKT work request to send the supplied packet.
835 */
836static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
837 const struct port_info *pi,
838 unsigned int pidx, unsigned int gen,
839 struct sge_txq *q, unsigned int ndesc,
840 unsigned int compl)
841{
842 unsigned int flits, sgl_flits, cntrl, tso_info;
843 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
844 struct tx_desc *d = &q->desc[pidx];
845 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
846
847 cpl->len = htonl(skb->len | 0x80000000);
848 cntrl = V_TXPKT_INTF(pi->port_id);
849
850 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
851 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
852
853 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
854 if (tso_info) {
855 int eth_type;
856 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
857
858 d->flit[2] = 0;
859 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
860 hdr->cntrl = htonl(cntrl);
861 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
862 CPL_ETH_II : CPL_ETH_II_VLAN;
863 tso_info |= V_LSO_ETH_TYPE(eth_type) |
864 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
865 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
866 hdr->lso_info = htonl(tso_info);
867 flits = 3;
868 } else {
869 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
870 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
871 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
872 cpl->cntrl = htonl(cntrl);
873
874 if (skb->len <= WR_LEN - sizeof(*cpl)) {
875 q->sdesc[pidx].skb = NULL;
876 if (!skb->data_len)
877 memcpy(&d->flit[2], skb->data, skb->len);
878 else
879 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
880
881 flits = (skb->len + 7) / 8 + 2;
882 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
883 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
884 | F_WR_SOP | F_WR_EOP | compl);
885 wmb();
886 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
887 V_WR_TID(q->token));
888 wr_gen2(d, gen);
889 kfree_skb(skb);
890 return;
891 }
892
893 flits = 2;
894 }
895
896 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
897 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
898 if (need_skb_unmap())
899 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
900
901 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
902 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
903 htonl(V_WR_TID(q->token)));
904}
905
906/**
907 * eth_xmit - add a packet to the Ethernet Tx queue
908 * @skb: the packet
909 * @dev: the egress net device
910 *
911 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
912 */
913int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
914{
915 unsigned int ndesc, pidx, credits, gen, compl;
916 const struct port_info *pi = netdev_priv(dev);
917 struct adapter *adap = dev->priv;
918 struct sge_qset *qs = dev2qset(dev);
919 struct sge_txq *q = &qs->txq[TXQ_ETH];
920
921 /*
922 * The chip min packet length is 9 octets but play safe and reject
923 * anything shorter than an Ethernet header.
924 */
925 if (unlikely(skb->len < ETH_HLEN)) {
926 dev_kfree_skb(skb);
927 return NETDEV_TX_OK;
928 }
929
930 spin_lock(&q->lock);
931 reclaim_completed_tx(adap, q);
932
933 credits = q->size - q->in_use;
934 ndesc = calc_tx_descs(skb);
935
936 if (unlikely(credits < ndesc)) {
937 if (!netif_queue_stopped(dev)) {
938 netif_stop_queue(dev);
939 set_bit(TXQ_ETH, &qs->txq_stopped);
940 q->stops++;
941 dev_err(&adap->pdev->dev,
942 "%s: Tx ring %u full while queue awake!\n",
943 dev->name, q->cntxt_id & 7);
944 }
945 spin_unlock(&q->lock);
946 return NETDEV_TX_BUSY;
947 }
948
949 q->in_use += ndesc;
950 if (unlikely(credits - ndesc < q->stop_thres)) {
951 q->stops++;
952 netif_stop_queue(dev);
953 set_bit(TXQ_ETH, &qs->txq_stopped);
954#if !USE_GTS
955 if (should_restart_tx(q) &&
956 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
957 q->restarts++;
958 netif_wake_queue(dev);
959 }
960#endif
961 }
962
963 gen = q->gen;
964 q->unacked += ndesc;
965 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
966 q->unacked &= 7;
967 pidx = q->pidx;
968 q->pidx += ndesc;
969 if (q->pidx >= q->size) {
970 q->pidx -= q->size;
971 q->gen ^= 1;
972 }
973
974 /* update port statistics */
975 if (skb->ip_summed == CHECKSUM_COMPLETE)
976 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
977 if (skb_shinfo(skb)->gso_size)
978 qs->port_stats[SGE_PSTAT_TSO]++;
979 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
980 qs->port_stats[SGE_PSTAT_VLANINS]++;
981
982 dev->trans_start = jiffies;
983 spin_unlock(&q->lock);
984
985 /*
986 * We do not use Tx completion interrupts to free DMAd Tx packets.
987 * This is good for performamce but means that we rely on new Tx
988 * packets arriving to run the destructors of completed packets,
989 * which open up space in their sockets' send queues. Sometimes
990 * we do not get such new packets causing Tx to stall. A single
991 * UDP transmitter is a good example of this situation. We have
992 * a clean up timer that periodically reclaims completed packets
993 * but it doesn't run often enough (nor do we want it to) to prevent
994 * lengthy stalls. A solution to this problem is to run the
995 * destructor early, after the packet is queued but before it's DMAd.
996 * A cons is that we lie to socket memory accounting, but the amount
997 * of extra memory is reasonable (limited by the number of Tx
998 * descriptors), the packets do actually get freed quickly by new
999 * packets almost always, and for protocols like TCP that wait for
1000 * acks to really free up the data the extra memory is even less.
1001 * On the positive side we run the destructors on the sending CPU
1002 * rather than on a potentially different completing CPU, usually a
1003 * good thing. We also run them without holding our Tx queue lock,
1004 * unlike what reclaim_completed_tx() would otherwise do.
1005 *
1006 * Run the destructor before telling the DMA engine about the packet
1007 * to make sure it doesn't complete and get freed prematurely.
1008 */
1009 if (likely(!skb_shared(skb)))
1010 skb_orphan(skb);
1011
1012 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1013 check_ring_tx_db(adap, q);
1014 return NETDEV_TX_OK;
1015}
1016
1017/**
1018 * write_imm - write a packet into a Tx descriptor as immediate data
1019 * @d: the Tx descriptor to write
1020 * @skb: the packet
1021 * @len: the length of packet data to write as immediate data
1022 * @gen: the generation bit value to write
1023 *
1024 * Writes a packet as immediate data into a Tx descriptor. The packet
1025 * contains a work request at its beginning. We must write the packet
1026 * carefully so the SGE doesn't read accidentally before it's written in
1027 * its entirety.
1028 */
1029static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1030 unsigned int len, unsigned int gen)
1031{
1032 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1033 struct work_request_hdr *to = (struct work_request_hdr *)d;
1034
1035 memcpy(&to[1], &from[1], len - sizeof(*from));
1036 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1037 V_WR_BCNTLFLT(len & 7));
1038 wmb();
1039 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1040 V_WR_LEN((len + 7) / 8));
1041 wr_gen2(d, gen);
1042 kfree_skb(skb);
1043}
1044
1045/**
1046 * check_desc_avail - check descriptor availability on a send queue
1047 * @adap: the adapter
1048 * @q: the send queue
1049 * @skb: the packet needing the descriptors
1050 * @ndesc: the number of Tx descriptors needed
1051 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1052 *
1053 * Checks if the requested number of Tx descriptors is available on an
1054 * SGE send queue. If the queue is already suspended or not enough
1055 * descriptors are available the packet is queued for later transmission.
1056 * Must be called with the Tx queue locked.
1057 *
1058 * Returns 0 if enough descriptors are available, 1 if there aren't
1059 * enough descriptors and the packet has been queued, and 2 if the caller
1060 * needs to retry because there weren't enough descriptors at the
1061 * beginning of the call but some freed up in the mean time.
1062 */
1063static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1064 struct sk_buff *skb, unsigned int ndesc,
1065 unsigned int qid)
1066{
1067 if (unlikely(!skb_queue_empty(&q->sendq))) {
1068 addq_exit:__skb_queue_tail(&q->sendq, skb);
1069 return 1;
1070 }
1071 if (unlikely(q->size - q->in_use < ndesc)) {
1072 struct sge_qset *qs = txq_to_qset(q, qid);
1073
1074 set_bit(qid, &qs->txq_stopped);
1075 smp_mb__after_clear_bit();
1076
1077 if (should_restart_tx(q) &&
1078 test_and_clear_bit(qid, &qs->txq_stopped))
1079 return 2;
1080
1081 q->stops++;
1082 goto addq_exit;
1083 }
1084 return 0;
1085}
1086
1087/**
1088 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1089 * @q: the SGE control Tx queue
1090 *
1091 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1092 * that send only immediate data (presently just the control queues) and
1093 * thus do not have any sk_buffs to release.
1094 */
1095static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1096{
1097 unsigned int reclaim = q->processed - q->cleaned;
1098
1099 q->in_use -= reclaim;
1100 q->cleaned += reclaim;
1101}
1102
1103static inline int immediate(const struct sk_buff *skb)
1104{
1105 return skb->len <= WR_LEN && !skb->data_len;
1106}
1107
1108/**
1109 * ctrl_xmit - send a packet through an SGE control Tx queue
1110 * @adap: the adapter
1111 * @q: the control queue
1112 * @skb: the packet
1113 *
1114 * Send a packet through an SGE control Tx queue. Packets sent through
1115 * a control queue must fit entirely as immediate data in a single Tx
1116 * descriptor and have no page fragments.
1117 */
1118static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1119 struct sk_buff *skb)
1120{
1121 int ret;
1122 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1123
1124 if (unlikely(!immediate(skb))) {
1125 WARN_ON(1);
1126 dev_kfree_skb(skb);
1127 return NET_XMIT_SUCCESS;
1128 }
1129
1130 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1131 wrp->wr_lo = htonl(V_WR_TID(q->token));
1132
1133 spin_lock(&q->lock);
1134 again:reclaim_completed_tx_imm(q);
1135
1136 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1137 if (unlikely(ret)) {
1138 if (ret == 1) {
1139 spin_unlock(&q->lock);
1140 return NET_XMIT_CN;
1141 }
1142 goto again;
1143 }
1144
1145 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1146
1147 q->in_use++;
1148 if (++q->pidx >= q->size) {
1149 q->pidx = 0;
1150 q->gen ^= 1;
1151 }
1152 spin_unlock(&q->lock);
1153 wmb();
1154 t3_write_reg(adap, A_SG_KDOORBELL,
1155 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1156 return NET_XMIT_SUCCESS;
1157}
1158
1159/**
1160 * restart_ctrlq - restart a suspended control queue
1161 * @qs: the queue set cotaining the control queue
1162 *
1163 * Resumes transmission on a suspended Tx control queue.
1164 */
1165static void restart_ctrlq(unsigned long data)
1166{
1167 struct sk_buff *skb;
1168 struct sge_qset *qs = (struct sge_qset *)data;
1169 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1170 struct adapter *adap = qs->netdev->priv;
1171
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1174
1175 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1176
1177 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1178
1179 if (++q->pidx >= q->size) {
1180 q->pidx = 0;
1181 q->gen ^= 1;
1182 }
1183 q->in_use++;
1184 }
1185
1186 if (!skb_queue_empty(&q->sendq)) {
1187 set_bit(TXQ_CTRL, &qs->txq_stopped);
1188 smp_mb__after_clear_bit();
1189
1190 if (should_restart_tx(q) &&
1191 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1192 goto again;
1193 q->stops++;
1194 }
1195
1196 spin_unlock(&q->lock);
1197 t3_write_reg(adap, A_SG_KDOORBELL,
1198 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1199}
1200
14ab9892
DLR
1201/*
1202 * Send a management message through control queue 0
1203 */
1204int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1205{
1206 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1207}
1208
4d22de3e
DLR
1209/**
1210 * write_ofld_wr - write an offload work request
1211 * @adap: the adapter
1212 * @skb: the packet to send
1213 * @q: the Tx queue
1214 * @pidx: index of the first Tx descriptor to write
1215 * @gen: the generation value to use
1216 * @ndesc: number of descriptors the packet will occupy
1217 *
1218 * Write an offload work request to send the supplied packet. The packet
1219 * data already carry the work request with most fields populated.
1220 */
1221static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1222 struct sge_txq *q, unsigned int pidx,
1223 unsigned int gen, unsigned int ndesc)
1224{
1225 unsigned int sgl_flits, flits;
1226 struct work_request_hdr *from;
1227 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1228 struct tx_desc *d = &q->desc[pidx];
1229
1230 if (immediate(skb)) {
1231 q->sdesc[pidx].skb = NULL;
1232 write_imm(d, skb, skb->len, gen);
1233 return;
1234 }
1235
1236 /* Only TX_DATA builds SGLs */
1237
1238 from = (struct work_request_hdr *)skb->data;
1239 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1240
1241 flits = (skb->h.raw - skb->data) / 8;
1242 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1243 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1244 adap->pdev);
1245 if (need_skb_unmap())
1246 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1247
1248 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1249 gen, from->wr_hi, from->wr_lo);
1250}
1251
1252/**
1253 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1254 * @skb: the packet
1255 *
1256 * Returns the number of Tx descriptors needed for the given offload
1257 * packet. These packets are already fully constructed.
1258 */
1259static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1260{
1261 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1262
1263 if (skb->len <= WR_LEN && cnt == 0)
1264 return 1; /* packet fits as immediate data */
1265
1266 flits = (skb->h.raw - skb->data) / 8; /* headers */
1267 if (skb->tail != skb->h.raw)
1268 cnt++;
1269 return flits_to_desc(flits + sgl_len(cnt));
1270}
1271
1272/**
1273 * ofld_xmit - send a packet through an offload queue
1274 * @adap: the adapter
1275 * @q: the Tx offload queue
1276 * @skb: the packet
1277 *
1278 * Send an offload packet through an SGE offload queue.
1279 */
1280static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1281 struct sk_buff *skb)
1282{
1283 int ret;
1284 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1285
1286 spin_lock(&q->lock);
1287 again:reclaim_completed_tx(adap, q);
1288
1289 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1290 if (unlikely(ret)) {
1291 if (ret == 1) {
1292 skb->priority = ndesc; /* save for restart */
1293 spin_unlock(&q->lock);
1294 return NET_XMIT_CN;
1295 }
1296 goto again;
1297 }
1298
1299 gen = q->gen;
1300 q->in_use += ndesc;
1301 pidx = q->pidx;
1302 q->pidx += ndesc;
1303 if (q->pidx >= q->size) {
1304 q->pidx -= q->size;
1305 q->gen ^= 1;
1306 }
1307 spin_unlock(&q->lock);
1308
1309 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1310 check_ring_tx_db(adap, q);
1311 return NET_XMIT_SUCCESS;
1312}
1313
1314/**
1315 * restart_offloadq - restart a suspended offload queue
1316 * @qs: the queue set cotaining the offload queue
1317 *
1318 * Resumes transmission on a suspended Tx offload queue.
1319 */
1320static void restart_offloadq(unsigned long data)
1321{
1322 struct sk_buff *skb;
1323 struct sge_qset *qs = (struct sge_qset *)data;
1324 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1325 struct adapter *adap = qs->netdev->priv;
1326
1327 spin_lock(&q->lock);
1328 again:reclaim_completed_tx(adap, q);
1329
1330 while ((skb = skb_peek(&q->sendq)) != NULL) {
1331 unsigned int gen, pidx;
1332 unsigned int ndesc = skb->priority;
1333
1334 if (unlikely(q->size - q->in_use < ndesc)) {
1335 set_bit(TXQ_OFLD, &qs->txq_stopped);
1336 smp_mb__after_clear_bit();
1337
1338 if (should_restart_tx(q) &&
1339 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1340 goto again;
1341 q->stops++;
1342 break;
1343 }
1344
1345 gen = q->gen;
1346 q->in_use += ndesc;
1347 pidx = q->pidx;
1348 q->pidx += ndesc;
1349 if (q->pidx >= q->size) {
1350 q->pidx -= q->size;
1351 q->gen ^= 1;
1352 }
1353 __skb_unlink(skb, &q->sendq);
1354 spin_unlock(&q->lock);
1355
1356 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1357 spin_lock(&q->lock);
1358 }
1359 spin_unlock(&q->lock);
1360
1361#if USE_GTS
1362 set_bit(TXQ_RUNNING, &q->flags);
1363 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1364#endif
1365 t3_write_reg(adap, A_SG_KDOORBELL,
1366 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1367}
1368
1369/**
1370 * queue_set - return the queue set a packet should use
1371 * @skb: the packet
1372 *
1373 * Maps a packet to the SGE queue set it should use. The desired queue
1374 * set is carried in bits 1-3 in the packet's priority.
1375 */
1376static inline int queue_set(const struct sk_buff *skb)
1377{
1378 return skb->priority >> 1;
1379}
1380
1381/**
1382 * is_ctrl_pkt - return whether an offload packet is a control packet
1383 * @skb: the packet
1384 *
1385 * Determines whether an offload packet should use an OFLD or a CTRL
1386 * Tx queue. This is indicated by bit 0 in the packet's priority.
1387 */
1388static inline int is_ctrl_pkt(const struct sk_buff *skb)
1389{
1390 return skb->priority & 1;
1391}
1392
1393/**
1394 * t3_offload_tx - send an offload packet
1395 * @tdev: the offload device to send to
1396 * @skb: the packet
1397 *
1398 * Sends an offload packet. We use the packet priority to select the
1399 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1400 * should be sent as regular or control, bits 1-3 select the queue set.
1401 */
1402int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1403{
1404 struct adapter *adap = tdev2adap(tdev);
1405 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1406
1407 if (unlikely(is_ctrl_pkt(skb)))
1408 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1409
1410 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1411}
1412
1413/**
1414 * offload_enqueue - add an offload packet to an SGE offload receive queue
1415 * @q: the SGE response queue
1416 * @skb: the packet
1417 *
1418 * Add a new offload packet to an SGE response queue's offload packet
1419 * queue. If the packet is the first on the queue it schedules the RX
1420 * softirq to process the queue.
1421 */
1422static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1423{
1424 skb->next = skb->prev = NULL;
1425 if (q->rx_tail)
1426 q->rx_tail->next = skb;
1427 else {
1428 struct sge_qset *qs = rspq_to_qset(q);
1429
1430 if (__netif_rx_schedule_prep(qs->netdev))
1431 __netif_rx_schedule(qs->netdev);
1432 q->rx_head = skb;
1433 }
1434 q->rx_tail = skb;
1435}
1436
1437/**
1438 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1439 * @tdev: the offload device that will be receiving the packets
1440 * @q: the SGE response queue that assembled the bundle
1441 * @skbs: the partial bundle
1442 * @n: the number of packets in the bundle
1443 *
1444 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1445 */
1446static inline void deliver_partial_bundle(struct t3cdev *tdev,
1447 struct sge_rspq *q,
1448 struct sk_buff *skbs[], int n)
1449{
1450 if (n) {
1451 q->offload_bundles++;
1452 tdev->recv(tdev, skbs, n);
1453 }
1454}
1455
1456/**
1457 * ofld_poll - NAPI handler for offload packets in interrupt mode
1458 * @dev: the network device doing the polling
1459 * @budget: polling budget
1460 *
1461 * The NAPI handler for offload packets when a response queue is serviced
1462 * by the hard interrupt handler, i.e., when it's operating in non-polling
1463 * mode. Creates small packet batches and sends them through the offload
1464 * receive handler. Batches need to be of modest size as we do prefetches
1465 * on the packets in each.
1466 */
1467static int ofld_poll(struct net_device *dev, int *budget)
1468{
1469 struct adapter *adapter = dev->priv;
1470 struct sge_qset *qs = dev2qset(dev);
1471 struct sge_rspq *q = &qs->rspq;
1472 int work_done, limit = min(*budget, dev->quota), avail = limit;
1473
1474 while (avail) {
1475 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1476 int ngathered;
1477
1478 spin_lock_irq(&q->lock);
1479 head = q->rx_head;
1480 if (!head) {
1481 work_done = limit - avail;
1482 *budget -= work_done;
1483 dev->quota -= work_done;
1484 __netif_rx_complete(dev);
1485 spin_unlock_irq(&q->lock);
1486 return 0;
1487 }
1488
1489 tail = q->rx_tail;
1490 q->rx_head = q->rx_tail = NULL;
1491 spin_unlock_irq(&q->lock);
1492
1493 for (ngathered = 0; avail && head; avail--) {
1494 prefetch(head->data);
1495 skbs[ngathered] = head;
1496 head = head->next;
1497 skbs[ngathered]->next = NULL;
1498 if (++ngathered == RX_BUNDLE_SIZE) {
1499 q->offload_bundles++;
1500 adapter->tdev.recv(&adapter->tdev, skbs,
1501 ngathered);
1502 ngathered = 0;
1503 }
1504 }
1505 if (head) { /* splice remaining packets back onto Rx queue */
1506 spin_lock_irq(&q->lock);
1507 tail->next = q->rx_head;
1508 if (!q->rx_head)
1509 q->rx_tail = tail;
1510 q->rx_head = head;
1511 spin_unlock_irq(&q->lock);
1512 }
1513 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1514 }
1515 work_done = limit - avail;
1516 *budget -= work_done;
1517 dev->quota -= work_done;
1518 return 1;
1519}
1520
1521/**
1522 * rx_offload - process a received offload packet
1523 * @tdev: the offload device receiving the packet
1524 * @rq: the response queue that received the packet
1525 * @skb: the packet
1526 * @rx_gather: a gather list of packets if we are building a bundle
1527 * @gather_idx: index of the next available slot in the bundle
1528 *
1529 * Process an ingress offload pakcet and add it to the offload ingress
1530 * queue. Returns the index of the next available slot in the bundle.
1531 */
1532static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1533 struct sk_buff *skb, struct sk_buff *rx_gather[],
1534 unsigned int gather_idx)
1535{
1536 rq->offload_pkts++;
1537 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1538
1539 if (rq->polling) {
1540 rx_gather[gather_idx++] = skb;
1541 if (gather_idx == RX_BUNDLE_SIZE) {
1542 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1543 gather_idx = 0;
1544 rq->offload_bundles++;
1545 }
1546 } else
1547 offload_enqueue(rq, skb);
1548
1549 return gather_idx;
1550}
1551
4d22de3e
DLR
1552/**
1553 * restart_tx - check whether to restart suspended Tx queues
1554 * @qs: the queue set to resume
1555 *
1556 * Restarts suspended Tx queues of an SGE queue set if they have enough
1557 * free resources to resume operation.
1558 */
1559static void restart_tx(struct sge_qset *qs)
1560{
1561 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1562 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1563 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1564 qs->txq[TXQ_ETH].restarts++;
1565 if (netif_running(qs->netdev))
1566 netif_wake_queue(qs->netdev);
1567 }
1568
1569 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1570 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1571 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1572 qs->txq[TXQ_OFLD].restarts++;
1573 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1574 }
1575 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1576 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1577 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1578 qs->txq[TXQ_CTRL].restarts++;
1579 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1580 }
1581}
1582
1583/**
1584 * rx_eth - process an ingress ethernet packet
1585 * @adap: the adapter
1586 * @rq: the response queue that received the packet
1587 * @skb: the packet
1588 * @pad: amount of padding at the start of the buffer
1589 *
1590 * Process an ingress ethernet pakcet and deliver it to the stack.
1591 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1592 * if it was immediate data in a response.
1593 */
1594static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1595 struct sk_buff *skb, int pad)
1596{
1597 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1598 struct port_info *pi;
1599
1600 rq->eth_pkts++;
1601 skb_pull(skb, sizeof(*p) + pad);
1602 skb->dev = adap->port[p->iff];
1603 skb->dev->last_rx = jiffies;
1604 skb->protocol = eth_type_trans(skb, skb->dev);
1605 pi = netdev_priv(skb->dev);
1606 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1607 !p->fragment) {
1608 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1609 skb->ip_summed = CHECKSUM_UNNECESSARY;
1610 } else
1611 skb->ip_summed = CHECKSUM_NONE;
1612
1613 if (unlikely(p->vlan_valid)) {
1614 struct vlan_group *grp = pi->vlan_grp;
1615
1616 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1617 if (likely(grp))
1618 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1619 rq->polling);
1620 else
1621 dev_kfree_skb_any(skb);
1622 } else if (rq->polling)
1623 netif_receive_skb(skb);
1624 else
1625 netif_rx(skb);
1626}
1627
1628/**
1629 * handle_rsp_cntrl_info - handles control information in a response
1630 * @qs: the queue set corresponding to the response
1631 * @flags: the response control flags
4d22de3e
DLR
1632 *
1633 * Handles the control information of an SGE response, such as GTS
1634 * indications and completion credits for the queue set's Tx queues.
6195c71d 1635 * HW coalesces credits, we don't do any extra SW coalescing.
4d22de3e 1636 */
6195c71d 1637static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
4d22de3e
DLR
1638{
1639 unsigned int credits;
1640
1641#if USE_GTS
1642 if (flags & F_RSPD_TXQ0_GTS)
1643 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1644#endif
1645
4d22de3e
DLR
1646 credits = G_RSPD_TXQ0_CR(flags);
1647 if (credits)
1648 qs->txq[TXQ_ETH].processed += credits;
1649
6195c71d
DLR
1650 credits = G_RSPD_TXQ2_CR(flags);
1651 if (credits)
1652 qs->txq[TXQ_CTRL].processed += credits;
1653
4d22de3e
DLR
1654# if USE_GTS
1655 if (flags & F_RSPD_TXQ1_GTS)
1656 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1657# endif
6195c71d
DLR
1658 credits = G_RSPD_TXQ1_CR(flags);
1659 if (credits)
1660 qs->txq[TXQ_OFLD].processed += credits;
4d22de3e
DLR
1661}
1662
1663/**
1664 * check_ring_db - check if we need to ring any doorbells
1665 * @adapter: the adapter
1666 * @qs: the queue set whose Tx queues are to be examined
1667 * @sleeping: indicates which Tx queue sent GTS
1668 *
1669 * Checks if some of a queue set's Tx queues need to ring their doorbells
1670 * to resume transmission after idling while they still have unprocessed
1671 * descriptors.
1672 */
1673static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1674 unsigned int sleeping)
1675{
1676 if (sleeping & F_RSPD_TXQ0_GTS) {
1677 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1678
1679 if (txq->cleaned + txq->in_use != txq->processed &&
1680 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1681 set_bit(TXQ_RUNNING, &txq->flags);
1682 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1683 V_EGRCNTX(txq->cntxt_id));
1684 }
1685 }
1686
1687 if (sleeping & F_RSPD_TXQ1_GTS) {
1688 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1689
1690 if (txq->cleaned + txq->in_use != txq->processed &&
1691 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1692 set_bit(TXQ_RUNNING, &txq->flags);
1693 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1694 V_EGRCNTX(txq->cntxt_id));
1695 }
1696 }
1697}
1698
1699/**
1700 * is_new_response - check if a response is newly written
1701 * @r: the response descriptor
1702 * @q: the response queue
1703 *
1704 * Returns true if a response descriptor contains a yet unprocessed
1705 * response.
1706 */
1707static inline int is_new_response(const struct rsp_desc *r,
1708 const struct sge_rspq *q)
1709{
1710 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1711}
1712
1713#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1714#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1715 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1716 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1717 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1718
1719/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1720#define NOMEM_INTR_DELAY 2500
1721
1722/**
1723 * process_responses - process responses from an SGE response queue
1724 * @adap: the adapter
1725 * @qs: the queue set to which the response queue belongs
1726 * @budget: how many responses can be processed in this round
1727 *
1728 * Process responses from an SGE response queue up to the supplied budget.
1729 * Responses include received packets as well as credits and other events
1730 * for the queues that belong to the response queue's queue set.
1731 * A negative budget is effectively unlimited.
1732 *
1733 * Additionally choose the interrupt holdoff time for the next interrupt
1734 * on this queue. If the system is under memory shortage use a fairly
1735 * long delay to help recovery.
1736 */
1737static int process_responses(struct adapter *adap, struct sge_qset *qs,
1738 int budget)
1739{
1740 struct sge_rspq *q = &qs->rspq;
1741 struct rsp_desc *r = &q->desc[q->cidx];
1742 int budget_left = budget;
6195c71d 1743 unsigned int sleeping = 0;
4d22de3e
DLR
1744 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1745 int ngathered = 0;
1746
1747 q->next_holdoff = q->holdoff_tmr;
1748
1749 while (likely(budget_left && is_new_response(r, q))) {
1750 int eth, ethpad = 0;
1751 struct sk_buff *skb = NULL;
1752 u32 len, flags = ntohl(r->flags);
1753 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1754
1755 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1756
1757 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1758 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1759 if (!skb)
1760 goto no_mem;
1761
1762 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1763 skb->data[0] = CPL_ASYNC_NOTIF;
1764 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1765 q->async_notif++;
1766 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1767 skb = get_imm_packet(r);
1768 if (unlikely(!skb)) {
1769 no_mem:
1770 q->next_holdoff = NOMEM_INTR_DELAY;
1771 q->nomem++;
1772 /* consume one credit since we tried */
1773 budget_left--;
1774 break;
1775 }
1776 q->imm_data++;
1777 } else if ((len = ntohl(r->len_cq)) != 0) {
1778 struct sge_fl *fl;
1779
1780 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1781 fl->credits--;
1782 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1783 eth ? SGE_RX_DROP_THRES : 0);
1784 if (!skb)
1785 q->rx_drops++;
1786 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1787 __skb_pull(skb, 2);
1788 ethpad = 2;
1789 if (++fl->cidx == fl->size)
1790 fl->cidx = 0;
1791 } else
1792 q->pure_rsps++;
1793
1794 if (flags & RSPD_CTRL_MASK) {
1795 sleeping |= flags & RSPD_GTS_MASK;
6195c71d 1796 handle_rsp_cntrl_info(qs, flags);
4d22de3e
DLR
1797 }
1798
1799 r++;
1800 if (unlikely(++q->cidx == q->size)) {
1801 q->cidx = 0;
1802 q->gen ^= 1;
1803 r = q->desc;
1804 }
1805 prefetch(r);
1806
1807 if (++q->credits >= (q->size / 4)) {
1808 refill_rspq(adap, q, q->credits);
1809 q->credits = 0;
1810 }
1811
1812 if (likely(skb != NULL)) {
1813 if (eth)
1814 rx_eth(adap, q, skb, ethpad);
1815 else {
1816 /* Preserve the RSS info in csum & priority */
1817 skb->csum = rss_hi;
1818 skb->priority = rss_lo;
1819 ngathered = rx_offload(&adap->tdev, q, skb,
1820 offload_skbs, ngathered);
1821 }
1822 }
1823
1824 --budget_left;
1825 }
1826
4d22de3e
DLR
1827 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1828 if (sleeping)
1829 check_ring_db(adap, qs, sleeping);
1830
1831 smp_mb(); /* commit Tx queue .processed updates */
1832 if (unlikely(qs->txq_stopped != 0))
1833 restart_tx(qs);
1834
1835 budget -= budget_left;
1836 return budget;
1837}
1838
1839static inline int is_pure_response(const struct rsp_desc *r)
1840{
1841 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1842
1843 return (n | r->len_cq) == 0;
1844}
1845
1846/**
1847 * napi_rx_handler - the NAPI handler for Rx processing
1848 * @dev: the net device
1849 * @budget: how many packets we can process in this round
1850 *
1851 * Handler for new data events when using NAPI.
1852 */
1853static int napi_rx_handler(struct net_device *dev, int *budget)
1854{
1855 struct adapter *adap = dev->priv;
1856 struct sge_qset *qs = dev2qset(dev);
1857 int effective_budget = min(*budget, dev->quota);
1858
1859 int work_done = process_responses(adap, qs, effective_budget);
1860 *budget -= work_done;
1861 dev->quota -= work_done;
1862
1863 if (work_done >= effective_budget)
1864 return 1;
1865
1866 netif_rx_complete(dev);
1867
1868 /*
1869 * Because we don't atomically flush the following write it is
1870 * possible that in very rare cases it can reach the device in a way
1871 * that races with a new response being written plus an error interrupt
1872 * causing the NAPI interrupt handler below to return unhandled status
1873 * to the OS. To protect against this would require flushing the write
1874 * and doing both the write and the flush with interrupts off. Way too
1875 * expensive and unjustifiable given the rarity of the race.
1876 *
1877 * The race cannot happen at all with MSI-X.
1878 */
1879 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1880 V_NEWTIMER(qs->rspq.next_holdoff) |
1881 V_NEWINDEX(qs->rspq.cidx));
1882 return 0;
1883}
1884
1885/*
1886 * Returns true if the device is already scheduled for polling.
1887 */
1888static inline int napi_is_scheduled(struct net_device *dev)
1889{
1890 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1891}
1892
1893/**
1894 * process_pure_responses - process pure responses from a response queue
1895 * @adap: the adapter
1896 * @qs: the queue set owning the response queue
1897 * @r: the first pure response to process
1898 *
1899 * A simpler version of process_responses() that handles only pure (i.e.,
1900 * non data-carrying) responses. Such respones are too light-weight to
1901 * justify calling a softirq under NAPI, so we handle them specially in
1902 * the interrupt handler. The function is called with a pointer to a
1903 * response, which the caller must ensure is a valid pure response.
1904 *
1905 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1906 */
1907static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1908 struct rsp_desc *r)
1909{
1910 struct sge_rspq *q = &qs->rspq;
6195c71d 1911 unsigned int sleeping = 0;
4d22de3e
DLR
1912
1913 do {
1914 u32 flags = ntohl(r->flags);
1915
1916 r++;
1917 if (unlikely(++q->cidx == q->size)) {
1918 q->cidx = 0;
1919 q->gen ^= 1;
1920 r = q->desc;
1921 }
1922 prefetch(r);
1923
1924 if (flags & RSPD_CTRL_MASK) {
1925 sleeping |= flags & RSPD_GTS_MASK;
6195c71d 1926 handle_rsp_cntrl_info(qs, flags);
4d22de3e
DLR
1927 }
1928
1929 q->pure_rsps++;
1930 if (++q->credits >= (q->size / 4)) {
1931 refill_rspq(adap, q, q->credits);
1932 q->credits = 0;
1933 }
1934 } while (is_new_response(r, q) && is_pure_response(r));
1935
4d22de3e
DLR
1936 if (sleeping)
1937 check_ring_db(adap, qs, sleeping);
1938
1939 smp_mb(); /* commit Tx queue .processed updates */
1940 if (unlikely(qs->txq_stopped != 0))
1941 restart_tx(qs);
1942
1943 return is_new_response(r, q);
1944}
1945
1946/**
1947 * handle_responses - decide what to do with new responses in NAPI mode
1948 * @adap: the adapter
1949 * @q: the response queue
1950 *
1951 * This is used by the NAPI interrupt handlers to decide what to do with
1952 * new SGE responses. If there are no new responses it returns -1. If
1953 * there are new responses and they are pure (i.e., non-data carrying)
1954 * it handles them straight in hard interrupt context as they are very
1955 * cheap and don't deliver any packets. Finally, if there are any data
1956 * signaling responses it schedules the NAPI handler. Returns 1 if it
1957 * schedules NAPI, 0 if all new responses were pure.
1958 *
1959 * The caller must ascertain NAPI is not already running.
1960 */
1961static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
1962{
1963 struct sge_qset *qs = rspq_to_qset(q);
1964 struct rsp_desc *r = &q->desc[q->cidx];
1965
1966 if (!is_new_response(r, q))
1967 return -1;
1968 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
1969 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
1970 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
1971 return 0;
1972 }
1973 if (likely(__netif_rx_schedule_prep(qs->netdev)))
1974 __netif_rx_schedule(qs->netdev);
1975 return 1;
1976}
1977
1978/*
1979 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
1980 * (i.e., response queue serviced in hard interrupt).
1981 */
1982irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
1983{
1984 struct sge_qset *qs = cookie;
1985 struct adapter *adap = qs->netdev->priv;
1986 struct sge_rspq *q = &qs->rspq;
1987
1988 spin_lock(&q->lock);
1989 if (process_responses(adap, qs, -1) == 0)
1990 q->unhandled_irqs++;
1991 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
1992 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
1993 spin_unlock(&q->lock);
1994 return IRQ_HANDLED;
1995}
1996
1997/*
1998 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1999 * (i.e., response queue serviced by NAPI polling).
2000 */
2001irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2002{
2003 struct sge_qset *qs = cookie;
2004 struct adapter *adap = qs->netdev->priv;
2005 struct sge_rspq *q = &qs->rspq;
2006
2007 spin_lock(&q->lock);
2008 BUG_ON(napi_is_scheduled(qs->netdev));
2009
2010 if (handle_responses(adap, q) < 0)
2011 q->unhandled_irqs++;
2012 spin_unlock(&q->lock);
2013 return IRQ_HANDLED;
2014}
2015
2016/*
2017 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2018 * SGE response queues as well as error and other async events as they all use
2019 * the same MSI vector. We use one SGE response queue per port in this mode
2020 * and protect all response queues with queue 0's lock.
2021 */
2022static irqreturn_t t3_intr_msi(int irq, void *cookie)
2023{
2024 int new_packets = 0;
2025 struct adapter *adap = cookie;
2026 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2027
2028 spin_lock(&q->lock);
2029
2030 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2031 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2032 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2033 new_packets = 1;
2034 }
2035
2036 if (adap->params.nports == 2 &&
2037 process_responses(adap, &adap->sge.qs[1], -1)) {
2038 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2039
2040 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2041 V_NEWTIMER(q1->next_holdoff) |
2042 V_NEWINDEX(q1->cidx));
2043 new_packets = 1;
2044 }
2045
2046 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2047 q->unhandled_irqs++;
2048
2049 spin_unlock(&q->lock);
2050 return IRQ_HANDLED;
2051}
2052
2053static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2054{
2055 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2056 if (likely(__netif_rx_schedule_prep(dev)))
2057 __netif_rx_schedule(dev);
2058 return 1;
2059 }
2060 return 0;
2061}
2062
2063/*
2064 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2065 * by NAPI polling). Handles data events from SGE response queues as well as
2066 * error and other async events as they all use the same MSI vector. We use
2067 * one SGE response queue per port in this mode and protect all response
2068 * queues with queue 0's lock.
2069 */
2070irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2071{
2072 int new_packets;
2073 struct adapter *adap = cookie;
2074 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2075
2076 spin_lock(&q->lock);
2077
2078 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2079 if (adap->params.nports == 2)
2080 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2081 &adap->sge.qs[1].rspq);
2082 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2083 q->unhandled_irqs++;
2084
2085 spin_unlock(&q->lock);
2086 return IRQ_HANDLED;
2087}
2088
2089/*
2090 * A helper function that processes responses and issues GTS.
2091 */
2092static inline int process_responses_gts(struct adapter *adap,
2093 struct sge_rspq *rq)
2094{
2095 int work;
2096
2097 work = process_responses(adap, rspq_to_qset(rq), -1);
2098 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2099 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2100 return work;
2101}
2102
2103/*
2104 * The legacy INTx interrupt handler. This needs to handle data events from
2105 * SGE response queues as well as error and other async events as they all use
2106 * the same interrupt pin. We use one SGE response queue per port in this mode
2107 * and protect all response queues with queue 0's lock.
2108 */
2109static irqreturn_t t3_intr(int irq, void *cookie)
2110{
2111 int work_done, w0, w1;
2112 struct adapter *adap = cookie;
2113 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2114 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2115
2116 spin_lock(&q0->lock);
2117
2118 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2119 w1 = adap->params.nports == 2 &&
2120 is_new_response(&q1->desc[q1->cidx], q1);
2121
2122 if (likely(w0 | w1)) {
2123 t3_write_reg(adap, A_PL_CLI, 0);
2124 t3_read_reg(adap, A_PL_CLI); /* flush */
2125
2126 if (likely(w0))
2127 process_responses_gts(adap, q0);
2128
2129 if (w1)
2130 process_responses_gts(adap, q1);
2131
2132 work_done = w0 | w1;
2133 } else
2134 work_done = t3_slow_intr_handler(adap);
2135
2136 spin_unlock(&q0->lock);
2137 return IRQ_RETVAL(work_done != 0);
2138}
2139
2140/*
2141 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2142 * Handles data events from SGE response queues as well as error and other
2143 * async events as they all use the same interrupt pin. We use one SGE
2144 * response queue per port in this mode and protect all response queues with
2145 * queue 0's lock.
2146 */
2147static irqreturn_t t3b_intr(int irq, void *cookie)
2148{
2149 u32 map;
2150 struct adapter *adap = cookie;
2151 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2152
2153 t3_write_reg(adap, A_PL_CLI, 0);
2154 map = t3_read_reg(adap, A_SG_DATA_INTR);
2155
2156 if (unlikely(!map)) /* shared interrupt, most likely */
2157 return IRQ_NONE;
2158
2159 spin_lock(&q0->lock);
2160
2161 if (unlikely(map & F_ERRINTR))
2162 t3_slow_intr_handler(adap);
2163
2164 if (likely(map & 1))
2165 process_responses_gts(adap, q0);
2166
2167 if (map & 2)
2168 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2169
2170 spin_unlock(&q0->lock);
2171 return IRQ_HANDLED;
2172}
2173
2174/*
2175 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2176 * Handles data events from SGE response queues as well as error and other
2177 * async events as they all use the same interrupt pin. We use one SGE
2178 * response queue per port in this mode and protect all response queues with
2179 * queue 0's lock.
2180 */
2181static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2182{
2183 u32 map;
2184 struct net_device *dev;
2185 struct adapter *adap = cookie;
2186 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2187
2188 t3_write_reg(adap, A_PL_CLI, 0);
2189 map = t3_read_reg(adap, A_SG_DATA_INTR);
2190
2191 if (unlikely(!map)) /* shared interrupt, most likely */
2192 return IRQ_NONE;
2193
2194 spin_lock(&q0->lock);
2195
2196 if (unlikely(map & F_ERRINTR))
2197 t3_slow_intr_handler(adap);
2198
2199 if (likely(map & 1)) {
2200 dev = adap->sge.qs[0].netdev;
2201
2202 BUG_ON(napi_is_scheduled(dev));
2203 if (likely(__netif_rx_schedule_prep(dev)))
2204 __netif_rx_schedule(dev);
2205 }
2206 if (map & 2) {
2207 dev = adap->sge.qs[1].netdev;
2208
2209 BUG_ON(napi_is_scheduled(dev));
2210 if (likely(__netif_rx_schedule_prep(dev)))
2211 __netif_rx_schedule(dev);
2212 }
2213
2214 spin_unlock(&q0->lock);
2215 return IRQ_HANDLED;
2216}
2217
2218/**
2219 * t3_intr_handler - select the top-level interrupt handler
2220 * @adap: the adapter
2221 * @polling: whether using NAPI to service response queues
2222 *
2223 * Selects the top-level interrupt handler based on the type of interrupts
2224 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2225 * response queues.
2226 */
2227intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2228{
2229 if (adap->flags & USING_MSIX)
2230 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2231 if (adap->flags & USING_MSI)
2232 return polling ? t3_intr_msi_napi : t3_intr_msi;
2233 if (adap->params.rev > 0)
2234 return polling ? t3b_intr_napi : t3b_intr;
2235 return t3_intr;
2236}
2237
2238/**
2239 * t3_sge_err_intr_handler - SGE async event interrupt handler
2240 * @adapter: the adapter
2241 *
2242 * Interrupt handler for SGE asynchronous (non-data) events.
2243 */
2244void t3_sge_err_intr_handler(struct adapter *adapter)
2245{
2246 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2247
2248 if (status & F_RSPQCREDITOVERFOW)
2249 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2250
2251 if (status & F_RSPQDISABLED) {
2252 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2253
2254 CH_ALERT(adapter,
2255 "packet delivered to disabled response queue "
2256 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2257 }
2258
2259 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2260 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2261 t3_fatal_err(adapter);
2262}
2263
2264/**
2265 * sge_timer_cb - perform periodic maintenance of an SGE qset
2266 * @data: the SGE queue set to maintain
2267 *
2268 * Runs periodically from a timer to perform maintenance of an SGE queue
2269 * set. It performs two tasks:
2270 *
2271 * a) Cleans up any completed Tx descriptors that may still be pending.
2272 * Normal descriptor cleanup happens when new packets are added to a Tx
2273 * queue so this timer is relatively infrequent and does any cleanup only
2274 * if the Tx queue has not seen any new packets in a while. We make a
2275 * best effort attempt to reclaim descriptors, in that we don't wait
2276 * around if we cannot get a queue's lock (which most likely is because
2277 * someone else is queueing new packets and so will also handle the clean
2278 * up). Since control queues use immediate data exclusively we don't
2279 * bother cleaning them up here.
2280 *
2281 * b) Replenishes Rx queues that have run out due to memory shortage.
2282 * Normally new Rx buffers are added when existing ones are consumed but
2283 * when out of memory a queue can become empty. We try to add only a few
2284 * buffers here, the queue will be replenished fully as these new buffers
2285 * are used up if memory shortage has subsided.
2286 */
2287static void sge_timer_cb(unsigned long data)
2288{
2289 spinlock_t *lock;
2290 struct sge_qset *qs = (struct sge_qset *)data;
2291 struct adapter *adap = qs->netdev->priv;
2292
2293 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2294 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2295 spin_unlock(&qs->txq[TXQ_ETH].lock);
2296 }
2297 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2298 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2299 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2300 }
2301 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2302 &adap->sge.qs[0].rspq.lock;
2303 if (spin_trylock_irq(lock)) {
2304 if (!napi_is_scheduled(qs->netdev)) {
2305 if (qs->fl[0].credits < qs->fl[0].size)
2306 __refill_fl(adap, &qs->fl[0]);
2307 if (qs->fl[1].credits < qs->fl[1].size)
2308 __refill_fl(adap, &qs->fl[1]);
2309 }
2310 spin_unlock_irq(lock);
2311 }
2312 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2313}
2314
2315/**
2316 * t3_update_qset_coalesce - update coalescing settings for a queue set
2317 * @qs: the SGE queue set
2318 * @p: new queue set parameters
2319 *
2320 * Update the coalescing settings for an SGE queue set. Nothing is done
2321 * if the queue set is not initialized yet.
2322 */
2323void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2324{
2325 if (!qs->netdev)
2326 return;
2327
2328 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2329 qs->rspq.polling = p->polling;
2330 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2331}
2332
2333/**
2334 * t3_sge_alloc_qset - initialize an SGE queue set
2335 * @adapter: the adapter
2336 * @id: the queue set id
2337 * @nports: how many Ethernet ports will be using this queue set
2338 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2339 * @p: configuration parameters for this queue set
2340 * @ntxq: number of Tx queues for the queue set
2341 * @netdev: net device associated with this queue set
2342 *
2343 * Allocate resources and initialize an SGE queue set. A queue set
2344 * comprises a response queue, two Rx free-buffer queues, and up to 3
2345 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2346 * queue, offload queue, and control queue.
2347 */
2348int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2349 int irq_vec_idx, const struct qset_params *p,
2350 int ntxq, struct net_device *netdev)
2351{
2352 int i, ret = -ENOMEM;
2353 struct sge_qset *q = &adapter->sge.qs[id];
2354
2355 init_qset_cntxt(q, id);
2356 init_timer(&q->tx_reclaim_timer);
2357 q->tx_reclaim_timer.data = (unsigned long)q;
2358 q->tx_reclaim_timer.function = sge_timer_cb;
2359
2360 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2361 sizeof(struct rx_desc),
2362 sizeof(struct rx_sw_desc),
2363 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2364 if (!q->fl[0].desc)
2365 goto err;
2366
2367 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2368 sizeof(struct rx_desc),
2369 sizeof(struct rx_sw_desc),
2370 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2371 if (!q->fl[1].desc)
2372 goto err;
2373
2374 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2375 sizeof(struct rsp_desc), 0,
2376 &q->rspq.phys_addr, NULL);
2377 if (!q->rspq.desc)
2378 goto err;
2379
2380 for (i = 0; i < ntxq; ++i) {
2381 /*
2382 * The control queue always uses immediate data so does not
2383 * need to keep track of any sk_buffs.
2384 */
2385 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2386
2387 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2388 sizeof(struct tx_desc), sz,
2389 &q->txq[i].phys_addr,
2390 &q->txq[i].sdesc);
2391 if (!q->txq[i].desc)
2392 goto err;
2393
2394 q->txq[i].gen = 1;
2395 q->txq[i].size = p->txq_size[i];
2396 spin_lock_init(&q->txq[i].lock);
2397 skb_queue_head_init(&q->txq[i].sendq);
2398 }
2399
2400 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2401 (unsigned long)q);
2402 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2403 (unsigned long)q);
2404
2405 q->fl[0].gen = q->fl[1].gen = 1;
2406 q->fl[0].size = p->fl_size;
2407 q->fl[1].size = p->jumbo_size;
2408
2409 q->rspq.gen = 1;
2410 q->rspq.size = p->rspq_size;
2411 spin_lock_init(&q->rspq.lock);
2412
2413 q->txq[TXQ_ETH].stop_thres = nports *
2414 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2415
2416 if (ntxq == 1) {
2417 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2418 sizeof(struct cpl_rx_pkt);
2419 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2420 sizeof(struct cpl_rx_pkt);
2421 } else {
2422 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2423 sizeof(struct cpl_rx_data);
2424 q->fl[1].buf_size = (16 * 1024) -
2425 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2426 }
2427
2428 spin_lock(&adapter->sge.reg_lock);
2429
2430 /* FL threshold comparison uses < */
2431 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2432 q->rspq.phys_addr, q->rspq.size,
2433 q->fl[0].buf_size, 1, 0);
2434 if (ret)
2435 goto err_unlock;
2436
2437 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2438 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2439 q->fl[i].phys_addr, q->fl[i].size,
2440 q->fl[i].buf_size, p->cong_thres, 1,
2441 0);
2442 if (ret)
2443 goto err_unlock;
2444 }
2445
2446 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2447 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2448 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2449 1, 0);
2450 if (ret)
2451 goto err_unlock;
2452
2453 if (ntxq > 1) {
2454 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2455 USE_GTS, SGE_CNTXT_OFLD, id,
2456 q->txq[TXQ_OFLD].phys_addr,
2457 q->txq[TXQ_OFLD].size, 0, 1, 0);
2458 if (ret)
2459 goto err_unlock;
2460 }
2461
2462 if (ntxq > 2) {
2463 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2464 SGE_CNTXT_CTRL, id,
2465 q->txq[TXQ_CTRL].phys_addr,
2466 q->txq[TXQ_CTRL].size,
2467 q->txq[TXQ_CTRL].token, 1, 0);
2468 if (ret)
2469 goto err_unlock;
2470 }
2471
2472 spin_unlock(&adapter->sge.reg_lock);
2473 q->netdev = netdev;
2474 t3_update_qset_coalesce(q, p);
2475
2476 /*
2477 * We use atalk_ptr as a backpointer to a qset. In case a device is
2478 * associated with multiple queue sets only the first one sets
2479 * atalk_ptr.
2480 */
2481 if (netdev->atalk_ptr == NULL)
2482 netdev->atalk_ptr = q;
2483
2484 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2485 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2486 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2487
2488 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2489 V_NEWTIMER(q->rspq.holdoff_tmr));
2490
2491 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2492 return 0;
2493
2494 err_unlock:
2495 spin_unlock(&adapter->sge.reg_lock);
2496 err:
2497 t3_free_qset(adapter, q);
2498 return ret;
2499}
2500
2501/**
2502 * t3_free_sge_resources - free SGE resources
2503 * @adap: the adapter
2504 *
2505 * Frees resources used by the SGE queue sets.
2506 */
2507void t3_free_sge_resources(struct adapter *adap)
2508{
2509 int i;
2510
2511 for (i = 0; i < SGE_QSETS; ++i)
2512 t3_free_qset(adap, &adap->sge.qs[i]);
2513}
2514
2515/**
2516 * t3_sge_start - enable SGE
2517 * @adap: the adapter
2518 *
2519 * Enables the SGE for DMAs. This is the last step in starting packet
2520 * transfers.
2521 */
2522void t3_sge_start(struct adapter *adap)
2523{
2524 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2525}
2526
2527/**
2528 * t3_sge_stop - disable SGE operation
2529 * @adap: the adapter
2530 *
2531 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2532 * from error interrupts) or from normal process context. In the latter
2533 * case it also disables any pending queue restart tasklets. Note that
2534 * if it is called in interrupt context it cannot disable the restart
2535 * tasklets as it cannot wait, however the tasklets will have no effect
2536 * since the doorbells are disabled and the driver will call this again
2537 * later from process context, at which time the tasklets will be stopped
2538 * if they are still running.
2539 */
2540void t3_sge_stop(struct adapter *adap)
2541{
2542 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2543 if (!in_interrupt()) {
2544 int i;
2545
2546 for (i = 0; i < SGE_QSETS; ++i) {
2547 struct sge_qset *qs = &adap->sge.qs[i];
2548
2549 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2550 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2551 }
2552 }
2553}
2554
2555/**
2556 * t3_sge_init - initialize SGE
2557 * @adap: the adapter
2558 * @p: the SGE parameters
2559 *
2560 * Performs SGE initialization needed every time after a chip reset.
2561 * We do not initialize any of the queue sets here, instead the driver
2562 * top-level must request those individually. We also do not enable DMA
2563 * here, that should be done after the queues have been set up.
2564 */
2565void t3_sge_init(struct adapter *adap, struct sge_params *p)
2566{
2567 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2568
2569 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2570 F_CQCRDTCTRL |
2571 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2572 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2573#if SGE_NUM_GENBITS == 1
2574 ctrl |= F_EGRGENCTRL;
2575#endif
2576 if (adap->params.rev > 0) {
2577 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2578 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2579 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2580 }
2581 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2582 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2583 V_LORCQDRBTHRSH(512));
2584 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2585 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
6195c71d 2586 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
4d22de3e
DLR
2587 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2588 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2589 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2590 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2591 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2592 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2593}
2594
2595/**
2596 * t3_sge_prep - one-time SGE initialization
2597 * @adap: the associated adapter
2598 * @p: SGE parameters
2599 *
2600 * Performs one-time initialization of SGE SW state. Includes determining
2601 * defaults for the assorted SGE parameters, which admins can change until
2602 * they are used to initialize the SGE.
2603 */
2604void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2605{
2606 int i;
2607
2608 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2609 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2610
2611 for (i = 0; i < SGE_QSETS; ++i) {
2612 struct qset_params *q = p->qset + i;
2613
2614 q->polling = adap->params.rev > 0;
2615 q->coalesce_usecs = 5;
2616 q->rspq_size = 1024;
2617 q->fl_size = 4096;
2618 q->jumbo_size = 512;
2619 q->txq_size[TXQ_ETH] = 1024;
2620 q->txq_size[TXQ_OFLD] = 1024;
2621 q->txq_size[TXQ_CTRL] = 256;
2622 q->cong_thres = 0;
2623 }
2624
2625 spin_lock_init(&adap->sge.reg_lock);
2626}
2627
2628/**
2629 * t3_get_desc - dump an SGE descriptor for debugging purposes
2630 * @qs: the queue set
2631 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2632 * @idx: the descriptor index in the queue
2633 * @data: where to dump the descriptor contents
2634 *
2635 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2636 * size of the descriptor.
2637 */
2638int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2639 unsigned char *data)
2640{
2641 if (qnum >= 6)
2642 return -EINVAL;
2643
2644 if (qnum < 3) {
2645 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2646 return -EINVAL;
2647 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2648 return sizeof(struct tx_desc);
2649 }
2650
2651 if (qnum == 3) {
2652 if (!qs->rspq.desc || idx >= qs->rspq.size)
2653 return -EINVAL;
2654 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2655 return sizeof(struct rsp_desc);
2656 }
2657
2658 qnum -= 4;
2659 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2660 return -EINVAL;
2661 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2662 return sizeof(struct rx_desc);
2663}
This page took 0.197117 seconds and 5 git commands to generate.