Commit | Line | Data |
---|---|---|
fd3a4790 DM |
1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | |
3 | * | |
ce100b8b | 4 | * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. |
fd3a4790 DM |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/skbuff.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/etherdevice.h> | |
38 | #include <linux/if_vlan.h> | |
39 | #include <linux/ip.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/jiffies.h> | |
70c71606 | 42 | #include <linux/prefetch.h> |
ee40fa06 | 43 | #include <linux/export.h> |
fd3a4790 DM |
44 | #include <net/ipv6.h> |
45 | #include <net/tcp.h> | |
3a336cb1 HS |
46 | #ifdef CONFIG_NET_RX_BUSY_POLL |
47 | #include <net/busy_poll.h> | |
48 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
84a200b3 VP |
49 | #ifdef CONFIG_CHELSIO_T4_FCOE |
50 | #include <scsi/fc/fc_fcoe.h> | |
51 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
fd3a4790 DM |
52 | #include "cxgb4.h" |
53 | #include "t4_regs.h" | |
f612b815 | 54 | #include "t4_values.h" |
fd3a4790 DM |
55 | #include "t4_msg.h" |
56 | #include "t4fw_api.h" | |
57 | ||
58 | /* | |
59 | * Rx buffer size. We use largish buffers if possible but settle for single | |
60 | * pages under memory shortage. | |
61 | */ | |
62 | #if PAGE_SHIFT >= 16 | |
63 | # define FL_PG_ORDER 0 | |
64 | #else | |
65 | # define FL_PG_ORDER (16 - PAGE_SHIFT) | |
66 | #endif | |
67 | ||
68 | /* RX_PULL_LEN should be <= RX_COPY_THRES */ | |
69 | #define RX_COPY_THRES 256 | |
70 | #define RX_PULL_LEN 128 | |
71 | ||
72 | /* | |
73 | * Main body length for sk_buffs used for Rx Ethernet packets with fragments. | |
74 | * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. | |
75 | */ | |
76 | #define RX_PKT_SKB_LEN 512 | |
77 | ||
fd3a4790 DM |
78 | /* |
79 | * Max number of Tx descriptors we clean up at a time. Should be modest as | |
80 | * freeing skbs isn't cheap and it happens while holding locks. We just need | |
81 | * to free packets faster than they arrive, we eventually catch up and keep | |
82 | * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. | |
83 | */ | |
84 | #define MAX_TX_RECLAIM 16 | |
85 | ||
86 | /* | |
87 | * Max number of Rx buffers we replenish at a time. Again keep this modest, | |
88 | * allocating buffers isn't cheap either. | |
89 | */ | |
90 | #define MAX_RX_REFILL 16U | |
91 | ||
92 | /* | |
93 | * Period of the Rx queue check timer. This timer is infrequent as it has | |
94 | * something to do only when the system experiences severe memory shortage. | |
95 | */ | |
96 | #define RX_QCHECK_PERIOD (HZ / 2) | |
97 | ||
98 | /* | |
99 | * Period of the Tx queue check timer. | |
100 | */ | |
101 | #define TX_QCHECK_PERIOD (HZ / 2) | |
102 | ||
103 | /* | |
104 | * Max number of Tx descriptors to be reclaimed by the Tx timer. | |
105 | */ | |
106 | #define MAX_TIMER_TX_RECLAIM 100 | |
107 | ||
108 | /* | |
109 | * Timer index used when backing off due to memory shortage. | |
110 | */ | |
111 | #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) | |
112 | ||
fd3a4790 DM |
113 | /* |
114 | * Suspend an Ethernet Tx queue with fewer available descriptors than this. | |
115 | * This is the same as calc_tx_descs() for a TSO packet with | |
116 | * nr_frags == MAX_SKB_FRAGS. | |
117 | */ | |
118 | #define ETHTXQ_STOP_THRES \ | |
119 | (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) | |
120 | ||
121 | /* | |
122 | * Suspension threshold for non-Ethernet Tx queues. We require enough room | |
123 | * for a full sized WR. | |
124 | */ | |
125 | #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) | |
126 | ||
127 | /* | |
128 | * Max Tx descriptor space we allow for an Ethernet packet to be inlined | |
129 | * into a WR. | |
130 | */ | |
21dcfad6 | 131 | #define MAX_IMM_TX_PKT_LEN 256 |
fd3a4790 DM |
132 | |
133 | /* | |
134 | * Max size of a WR sent through a control Tx queue. | |
135 | */ | |
136 | #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN | |
137 | ||
fd3a4790 DM |
138 | struct tx_sw_desc { /* SW state per Tx descriptor */ |
139 | struct sk_buff *skb; | |
140 | struct ulptx_sgl *sgl; | |
141 | }; | |
142 | ||
143 | struct rx_sw_desc { /* SW state per Rx descriptor */ | |
144 | struct page *page; | |
145 | dma_addr_t dma_addr; | |
146 | }; | |
147 | ||
148 | /* | |
52367a76 VP |
149 | * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb |
150 | * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. | |
151 | * We could easily support more but there doesn't seem to be much need for | |
152 | * that ... | |
153 | */ | |
154 | #define FL_MTU_SMALL 1500 | |
155 | #define FL_MTU_LARGE 9000 | |
156 | ||
157 | static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, | |
158 | unsigned int mtu) | |
159 | { | |
160 | struct sge *s = &adapter->sge; | |
161 | ||
162 | return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); | |
163 | } | |
164 | ||
165 | #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) | |
166 | #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) | |
167 | ||
168 | /* | |
169 | * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses | |
170 | * these to specify the buffer size as an index into the SGE Free List Buffer | |
171 | * Size register array. We also use bit 4, when the buffer has been unmapped | |
172 | * for DMA, but this is of course never sent to the hardware and is only used | |
173 | * to prevent double unmappings. All of the above requires that the Free List | |
174 | * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are | |
175 | * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal | |
176 | * Free List Buffer alignment is 32 bytes, this works out for us ... | |
fd3a4790 DM |
177 | */ |
178 | enum { | |
52367a76 VP |
179 | RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ |
180 | RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ | |
181 | RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ | |
182 | ||
183 | /* | |
184 | * XXX We shouldn't depend on being able to use these indices. | |
185 | * XXX Especially when some other Master PF has initialized the | |
186 | * XXX adapter or we use the Firmware Configuration File. We | |
187 | * XXX should really search through the Host Buffer Size register | |
188 | * XXX array for the appropriately sized buffer indices. | |
189 | */ | |
190 | RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ | |
191 | RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ | |
192 | ||
193 | RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ | |
194 | RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ | |
fd3a4790 DM |
195 | }; |
196 | ||
e553ec3f HS |
197 | static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; |
198 | #define MIN_NAPI_WORK 1 | |
199 | ||
fd3a4790 DM |
200 | static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) |
201 | { | |
52367a76 | 202 | return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; |
fd3a4790 DM |
203 | } |
204 | ||
205 | static inline bool is_buf_mapped(const struct rx_sw_desc *d) | |
206 | { | |
207 | return !(d->dma_addr & RX_UNMAPPED_BUF); | |
208 | } | |
209 | ||
210 | /** | |
211 | * txq_avail - return the number of available slots in a Tx queue | |
212 | * @q: the Tx queue | |
213 | * | |
214 | * Returns the number of descriptors in a Tx queue available to write new | |
215 | * packets. | |
216 | */ | |
217 | static inline unsigned int txq_avail(const struct sge_txq *q) | |
218 | { | |
219 | return q->size - 1 - q->in_use; | |
220 | } | |
221 | ||
222 | /** | |
223 | * fl_cap - return the capacity of a free-buffer list | |
224 | * @fl: the FL | |
225 | * | |
226 | * Returns the capacity of a free-buffer list. The capacity is less than | |
227 | * the size because one descriptor needs to be left unpopulated, otherwise | |
228 | * HW will think the FL is empty. | |
229 | */ | |
230 | static inline unsigned int fl_cap(const struct sge_fl *fl) | |
231 | { | |
232 | return fl->size - 8; /* 1 descriptor = 8 buffers */ | |
233 | } | |
234 | ||
c098b026 HS |
235 | /** |
236 | * fl_starving - return whether a Free List is starving. | |
237 | * @adapter: pointer to the adapter | |
238 | * @fl: the Free List | |
239 | * | |
240 | * Tests specified Free List to see whether the number of buffers | |
241 | * available to the hardware has falled below our "starvation" | |
242 | * threshold. | |
243 | */ | |
244 | static inline bool fl_starving(const struct adapter *adapter, | |
245 | const struct sge_fl *fl) | |
fd3a4790 | 246 | { |
c098b026 HS |
247 | const struct sge *s = &adapter->sge; |
248 | ||
249 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; | |
fd3a4790 DM |
250 | } |
251 | ||
252 | static int map_skb(struct device *dev, const struct sk_buff *skb, | |
253 | dma_addr_t *addr) | |
254 | { | |
255 | const skb_frag_t *fp, *end; | |
256 | const struct skb_shared_info *si; | |
257 | ||
258 | *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); | |
259 | if (dma_mapping_error(dev, *addr)) | |
260 | goto out_err; | |
261 | ||
262 | si = skb_shinfo(skb); | |
263 | end = &si->frags[si->nr_frags]; | |
264 | ||
265 | for (fp = si->frags; fp < end; fp++) { | |
e91b0f24 IC |
266 | *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), |
267 | DMA_TO_DEVICE); | |
fd3a4790 DM |
268 | if (dma_mapping_error(dev, *addr)) |
269 | goto unwind; | |
270 | } | |
271 | return 0; | |
272 | ||
273 | unwind: | |
274 | while (fp-- > si->frags) | |
9e903e08 | 275 | dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); |
fd3a4790 DM |
276 | |
277 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); | |
278 | out_err: | |
279 | return -ENOMEM; | |
280 | } | |
281 | ||
282 | #ifdef CONFIG_NEED_DMA_MAP_STATE | |
283 | static void unmap_skb(struct device *dev, const struct sk_buff *skb, | |
284 | const dma_addr_t *addr) | |
285 | { | |
286 | const skb_frag_t *fp, *end; | |
287 | const struct skb_shared_info *si; | |
288 | ||
289 | dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); | |
290 | ||
291 | si = skb_shinfo(skb); | |
292 | end = &si->frags[si->nr_frags]; | |
293 | for (fp = si->frags; fp < end; fp++) | |
9e903e08 | 294 | dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); |
fd3a4790 DM |
295 | } |
296 | ||
297 | /** | |
298 | * deferred_unmap_destructor - unmap a packet when it is freed | |
299 | * @skb: the packet | |
300 | * | |
301 | * This is the packet destructor used for Tx packets that need to remain | |
302 | * mapped until they are freed rather than until their Tx descriptors are | |
303 | * freed. | |
304 | */ | |
305 | static void deferred_unmap_destructor(struct sk_buff *skb) | |
306 | { | |
307 | unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); | |
308 | } | |
309 | #endif | |
310 | ||
311 | static void unmap_sgl(struct device *dev, const struct sk_buff *skb, | |
312 | const struct ulptx_sgl *sgl, const struct sge_txq *q) | |
313 | { | |
314 | const struct ulptx_sge_pair *p; | |
315 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | |
316 | ||
317 | if (likely(skb_headlen(skb))) | |
318 | dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | |
319 | DMA_TO_DEVICE); | |
320 | else { | |
321 | dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), | |
322 | DMA_TO_DEVICE); | |
323 | nfrags--; | |
324 | } | |
325 | ||
326 | /* | |
327 | * the complexity below is because of the possibility of a wrap-around | |
328 | * in the middle of an SGL | |
329 | */ | |
330 | for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { | |
331 | if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { | |
332 | unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | |
333 | ntohl(p->len[0]), DMA_TO_DEVICE); | |
334 | dma_unmap_page(dev, be64_to_cpu(p->addr[1]), | |
335 | ntohl(p->len[1]), DMA_TO_DEVICE); | |
336 | p++; | |
337 | } else if ((u8 *)p == (u8 *)q->stat) { | |
338 | p = (const struct ulptx_sge_pair *)q->desc; | |
339 | goto unmap; | |
340 | } else if ((u8 *)p + 8 == (u8 *)q->stat) { | |
341 | const __be64 *addr = (const __be64 *)q->desc; | |
342 | ||
343 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | |
344 | ntohl(p->len[0]), DMA_TO_DEVICE); | |
345 | dma_unmap_page(dev, be64_to_cpu(addr[1]), | |
346 | ntohl(p->len[1]), DMA_TO_DEVICE); | |
347 | p = (const struct ulptx_sge_pair *)&addr[2]; | |
348 | } else { | |
349 | const __be64 *addr = (const __be64 *)q->desc; | |
350 | ||
351 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | |
352 | ntohl(p->len[0]), DMA_TO_DEVICE); | |
353 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | |
354 | ntohl(p->len[1]), DMA_TO_DEVICE); | |
355 | p = (const struct ulptx_sge_pair *)&addr[1]; | |
356 | } | |
357 | } | |
358 | if (nfrags) { | |
359 | __be64 addr; | |
360 | ||
361 | if ((u8 *)p == (u8 *)q->stat) | |
362 | p = (const struct ulptx_sge_pair *)q->desc; | |
363 | addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : | |
364 | *(const __be64 *)q->desc; | |
365 | dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), | |
366 | DMA_TO_DEVICE); | |
367 | } | |
368 | } | |
369 | ||
370 | /** | |
371 | * free_tx_desc - reclaims Tx descriptors and their buffers | |
372 | * @adapter: the adapter | |
373 | * @q: the Tx queue to reclaim descriptors from | |
374 | * @n: the number of descriptors to reclaim | |
375 | * @unmap: whether the buffers should be unmapped for DMA | |
376 | * | |
377 | * Reclaims Tx descriptors from an SGE Tx queue and frees the associated | |
378 | * Tx buffers. Called with the Tx queue lock held. | |
379 | */ | |
380 | static void free_tx_desc(struct adapter *adap, struct sge_txq *q, | |
381 | unsigned int n, bool unmap) | |
382 | { | |
383 | struct tx_sw_desc *d; | |
384 | unsigned int cidx = q->cidx; | |
385 | struct device *dev = adap->pdev_dev; | |
386 | ||
387 | d = &q->sdesc[cidx]; | |
388 | while (n--) { | |
389 | if (d->skb) { /* an SGL is present */ | |
390 | if (unmap) | |
391 | unmap_sgl(dev, d->skb, d->sgl, q); | |
a7525198 | 392 | dev_consume_skb_any(d->skb); |
fd3a4790 DM |
393 | d->skb = NULL; |
394 | } | |
395 | ++d; | |
396 | if (++cidx == q->size) { | |
397 | cidx = 0; | |
398 | d = q->sdesc; | |
399 | } | |
400 | } | |
401 | q->cidx = cidx; | |
402 | } | |
403 | ||
404 | /* | |
405 | * Return the number of reclaimable descriptors in a Tx queue. | |
406 | */ | |
407 | static inline int reclaimable(const struct sge_txq *q) | |
408 | { | |
409 | int hw_cidx = ntohs(q->stat->cidx); | |
410 | hw_cidx -= q->cidx; | |
411 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; | |
412 | } | |
413 | ||
414 | /** | |
415 | * reclaim_completed_tx - reclaims completed Tx descriptors | |
416 | * @adap: the adapter | |
417 | * @q: the Tx queue to reclaim completed descriptors from | |
418 | * @unmap: whether the buffers should be unmapped for DMA | |
419 | * | |
420 | * Reclaims Tx descriptors that the SGE has indicated it has processed, | |
421 | * and frees the associated buffers if possible. Called with the Tx | |
422 | * queue locked. | |
423 | */ | |
424 | static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, | |
425 | bool unmap) | |
426 | { | |
427 | int avail = reclaimable(q); | |
428 | ||
429 | if (avail) { | |
430 | /* | |
431 | * Limit the amount of clean up work we do at a time to keep | |
432 | * the Tx lock hold time O(1). | |
433 | */ | |
434 | if (avail > MAX_TX_RECLAIM) | |
435 | avail = MAX_TX_RECLAIM; | |
436 | ||
437 | free_tx_desc(adap, q, avail, unmap); | |
438 | q->in_use -= avail; | |
439 | } | |
440 | } | |
441 | ||
52367a76 VP |
442 | static inline int get_buf_size(struct adapter *adapter, |
443 | const struct rx_sw_desc *d) | |
fd3a4790 | 444 | { |
52367a76 VP |
445 | struct sge *s = &adapter->sge; |
446 | unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; | |
447 | int buf_size; | |
448 | ||
449 | switch (rx_buf_size_idx) { | |
450 | case RX_SMALL_PG_BUF: | |
451 | buf_size = PAGE_SIZE; | |
452 | break; | |
453 | ||
454 | case RX_LARGE_PG_BUF: | |
455 | buf_size = PAGE_SIZE << s->fl_pg_order; | |
456 | break; | |
457 | ||
458 | case RX_SMALL_MTU_BUF: | |
459 | buf_size = FL_MTU_SMALL_BUFSIZE(adapter); | |
460 | break; | |
461 | ||
462 | case RX_LARGE_MTU_BUF: | |
463 | buf_size = FL_MTU_LARGE_BUFSIZE(adapter); | |
464 | break; | |
465 | ||
466 | default: | |
467 | BUG_ON(1); | |
468 | } | |
469 | ||
470 | return buf_size; | |
fd3a4790 DM |
471 | } |
472 | ||
473 | /** | |
474 | * free_rx_bufs - free the Rx buffers on an SGE free list | |
475 | * @adap: the adapter | |
476 | * @q: the SGE free list to free buffers from | |
477 | * @n: how many buffers to free | |
478 | * | |
479 | * Release the next @n buffers on an SGE free-buffer Rx queue. The | |
480 | * buffers must be made inaccessible to HW before calling this function. | |
481 | */ | |
482 | static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) | |
483 | { | |
484 | while (n--) { | |
485 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | |
486 | ||
487 | if (is_buf_mapped(d)) | |
488 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | |
52367a76 VP |
489 | get_buf_size(adap, d), |
490 | PCI_DMA_FROMDEVICE); | |
fd3a4790 DM |
491 | put_page(d->page); |
492 | d->page = NULL; | |
493 | if (++q->cidx == q->size) | |
494 | q->cidx = 0; | |
495 | q->avail--; | |
496 | } | |
497 | } | |
498 | ||
499 | /** | |
500 | * unmap_rx_buf - unmap the current Rx buffer on an SGE free list | |
501 | * @adap: the adapter | |
502 | * @q: the SGE free list | |
503 | * | |
504 | * Unmap the current buffer on an SGE free-buffer Rx queue. The | |
505 | * buffer must be made inaccessible to HW before calling this function. | |
506 | * | |
507 | * This is similar to @free_rx_bufs above but does not free the buffer. | |
508 | * Do note that the FL still loses any further access to the buffer. | |
509 | */ | |
510 | static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) | |
511 | { | |
512 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; | |
513 | ||
514 | if (is_buf_mapped(d)) | |
515 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), | |
52367a76 | 516 | get_buf_size(adap, d), PCI_DMA_FROMDEVICE); |
fd3a4790 DM |
517 | d->page = NULL; |
518 | if (++q->cidx == q->size) | |
519 | q->cidx = 0; | |
520 | q->avail--; | |
521 | } | |
522 | ||
523 | static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) | |
524 | { | |
0a57a536 | 525 | u32 val; |
fd3a4790 | 526 | if (q->pend_cred >= 8) { |
f612b815 HS |
527 | if (is_t4(adap->params.chip)) |
528 | val = PIDX_V(q->pend_cred / 8); | |
529 | else | |
530 | val = PIDX_T5_V(q->pend_cred / 8) | | |
531 | DBTYPE_F; | |
532 | val |= DBPRIO_F; | |
1ecc7b7a HS |
533 | |
534 | /* Make sure all memory writes to the Free List queue are | |
535 | * committed before we tell the hardware about them. | |
536 | */ | |
fd3a4790 | 537 | wmb(); |
d63a6dcf | 538 | |
df64e4d3 HS |
539 | /* If we don't have access to the new User Doorbell (T5+), use |
540 | * the old doorbell mechanism; otherwise use the new BAR2 | |
541 | * mechanism. | |
d63a6dcf | 542 | */ |
df64e4d3 | 543 | if (unlikely(q->bar2_addr == NULL)) { |
f612b815 HS |
544 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
545 | val | QID_V(q->cntxt_id)); | |
d63a6dcf | 546 | } else { |
f612b815 | 547 | writel(val | QID_V(q->bar2_qid), |
df64e4d3 | 548 | q->bar2_addr + SGE_UDB_KDOORBELL); |
d63a6dcf HS |
549 | |
550 | /* This Write memory Barrier will force the write to | |
551 | * the User Doorbell area to be flushed. | |
552 | */ | |
553 | wmb(); | |
554 | } | |
fd3a4790 DM |
555 | q->pend_cred &= 7; |
556 | } | |
557 | } | |
558 | ||
559 | static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, | |
560 | dma_addr_t mapping) | |
561 | { | |
562 | sd->page = pg; | |
563 | sd->dma_addr = mapping; /* includes size low bits */ | |
564 | } | |
565 | ||
566 | /** | |
567 | * refill_fl - refill an SGE Rx buffer ring | |
568 | * @adap: the adapter | |
569 | * @q: the ring to refill | |
570 | * @n: the number of new buffers to allocate | |
571 | * @gfp: the gfp flags for the allocations | |
572 | * | |
573 | * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, | |
574 | * allocated with the supplied gfp flags. The caller must assure that | |
575 | * @n does not exceed the queue's capacity. If afterwards the queue is | |
576 | * found critically low mark it as starving in the bitmap of starving FLs. | |
577 | * | |
578 | * Returns the number of buffers allocated. | |
579 | */ | |
580 | static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, | |
581 | gfp_t gfp) | |
582 | { | |
52367a76 | 583 | struct sge *s = &adap->sge; |
fd3a4790 DM |
584 | struct page *pg; |
585 | dma_addr_t mapping; | |
586 | unsigned int cred = q->avail; | |
587 | __be64 *d = &q->desc[q->pidx]; | |
588 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | |
d52ce920 | 589 | int node; |
fd3a4790 | 590 | |
5b377d11 HS |
591 | #ifdef CONFIG_DEBUG_FS |
592 | if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) | |
593 | goto out; | |
594 | #endif | |
595 | ||
aa9cd31c | 596 | gfp |= __GFP_NOWARN; |
d52ce920 | 597 | node = dev_to_node(adap->pdev_dev); |
fd3a4790 | 598 | |
52367a76 VP |
599 | if (s->fl_pg_order == 0) |
600 | goto alloc_small_pages; | |
601 | ||
fd3a4790 DM |
602 | /* |
603 | * Prefer large buffers | |
604 | */ | |
605 | while (n) { | |
d52ce920 | 606 | pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); |
fd3a4790 DM |
607 | if (unlikely(!pg)) { |
608 | q->large_alloc_failed++; | |
609 | break; /* fall back to single pages */ | |
610 | } | |
611 | ||
612 | mapping = dma_map_page(adap->pdev_dev, pg, 0, | |
52367a76 | 613 | PAGE_SIZE << s->fl_pg_order, |
fd3a4790 DM |
614 | PCI_DMA_FROMDEVICE); |
615 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | |
52367a76 | 616 | __free_pages(pg, s->fl_pg_order); |
fd3a4790 DM |
617 | goto out; /* do not try small pages for this error */ |
618 | } | |
52367a76 | 619 | mapping |= RX_LARGE_PG_BUF; |
fd3a4790 DM |
620 | *d++ = cpu_to_be64(mapping); |
621 | ||
622 | set_rx_sw_desc(sd, pg, mapping); | |
623 | sd++; | |
624 | ||
625 | q->avail++; | |
626 | if (++q->pidx == q->size) { | |
627 | q->pidx = 0; | |
628 | sd = q->sdesc; | |
629 | d = q->desc; | |
630 | } | |
631 | n--; | |
632 | } | |
fd3a4790 | 633 | |
52367a76 | 634 | alloc_small_pages: |
fd3a4790 | 635 | while (n--) { |
d52ce920 | 636 | pg = alloc_pages_node(node, gfp, 0); |
fd3a4790 DM |
637 | if (unlikely(!pg)) { |
638 | q->alloc_failed++; | |
639 | break; | |
640 | } | |
641 | ||
642 | mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, | |
643 | PCI_DMA_FROMDEVICE); | |
644 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { | |
1f2149c1 | 645 | put_page(pg); |
fd3a4790 DM |
646 | goto out; |
647 | } | |
648 | *d++ = cpu_to_be64(mapping); | |
649 | ||
650 | set_rx_sw_desc(sd, pg, mapping); | |
651 | sd++; | |
652 | ||
653 | q->avail++; | |
654 | if (++q->pidx == q->size) { | |
655 | q->pidx = 0; | |
656 | sd = q->sdesc; | |
657 | d = q->desc; | |
658 | } | |
659 | } | |
660 | ||
661 | out: cred = q->avail - cred; | |
662 | q->pend_cred += cred; | |
663 | ring_fl_db(adap, q); | |
664 | ||
c098b026 | 665 | if (unlikely(fl_starving(adap, q))) { |
fd3a4790 | 666 | smp_wmb(); |
e46dab4d DM |
667 | set_bit(q->cntxt_id - adap->sge.egr_start, |
668 | adap->sge.starving_fl); | |
fd3a4790 DM |
669 | } |
670 | ||
671 | return cred; | |
672 | } | |
673 | ||
674 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) | |
675 | { | |
676 | refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), | |
677 | GFP_ATOMIC); | |
678 | } | |
679 | ||
680 | /** | |
681 | * alloc_ring - allocate resources for an SGE descriptor ring | |
682 | * @dev: the PCI device's core device | |
683 | * @nelem: the number of descriptors | |
684 | * @elem_size: the size of each descriptor | |
685 | * @sw_size: the size of the SW state associated with each ring element | |
686 | * @phys: the physical address of the allocated ring | |
687 | * @metadata: address of the array holding the SW state for the ring | |
688 | * @stat_size: extra space in HW ring for status information | |
ad6bad3e | 689 | * @node: preferred node for memory allocations |
fd3a4790 DM |
690 | * |
691 | * Allocates resources for an SGE descriptor ring, such as Tx queues, | |
692 | * free buffer lists, or response queues. Each SGE ring requires | |
693 | * space for its HW descriptors plus, optionally, space for the SW state | |
694 | * associated with each HW entry (the metadata). The function returns | |
695 | * three values: the virtual address for the HW ring (the return value | |
696 | * of the function), the bus address of the HW ring, and the address | |
697 | * of the SW ring. | |
698 | */ | |
699 | static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, | |
700 | size_t sw_size, dma_addr_t *phys, void *metadata, | |
ad6bad3e | 701 | size_t stat_size, int node) |
fd3a4790 DM |
702 | { |
703 | size_t len = nelem * elem_size + stat_size; | |
704 | void *s = NULL; | |
705 | void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); | |
706 | ||
707 | if (!p) | |
708 | return NULL; | |
709 | if (sw_size) { | |
ad6bad3e | 710 | s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node); |
fd3a4790 DM |
711 | |
712 | if (!s) { | |
713 | dma_free_coherent(dev, len, p, *phys); | |
714 | return NULL; | |
715 | } | |
716 | } | |
717 | if (metadata) | |
718 | *(void **)metadata = s; | |
719 | memset(p, 0, len); | |
720 | return p; | |
721 | } | |
722 | ||
723 | /** | |
724 | * sgl_len - calculates the size of an SGL of the given capacity | |
725 | * @n: the number of SGL entries | |
726 | * | |
727 | * Calculates the number of flits needed for a scatter/gather list that | |
728 | * can hold the given number of entries. | |
729 | */ | |
730 | static inline unsigned int sgl_len(unsigned int n) | |
731 | { | |
0aac3f56 HS |
732 | /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA |
733 | * addresses. The DSGL Work Request starts off with a 32-bit DSGL | |
734 | * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, | |
735 | * repeated sequences of { Length[i], Length[i+1], Address[i], | |
736 | * Address[i+1] } (this ensures that all addresses are on 64-bit | |
737 | * boundaries). If N is even, then Length[N+1] should be set to 0 and | |
738 | * Address[N+1] is omitted. | |
739 | * | |
740 | * The following calculation incorporates all of the above. It's | |
741 | * somewhat hard to follow but, briefly: the "+2" accounts for the | |
742 | * first two flits which include the DSGL header, Length0 and | |
743 | * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 | |
744 | * flits for every pair of the remaining N) +1 if (n-1) is odd; and | |
745 | * finally the "+((n-1)&1)" adds the one remaining flit needed if | |
746 | * (n-1) is odd ... | |
747 | */ | |
fd3a4790 DM |
748 | n--; |
749 | return (3 * n) / 2 + (n & 1) + 2; | |
750 | } | |
751 | ||
752 | /** | |
753 | * flits_to_desc - returns the num of Tx descriptors for the given flits | |
754 | * @n: the number of flits | |
755 | * | |
756 | * Returns the number of Tx descriptors needed for the supplied number | |
757 | * of flits. | |
758 | */ | |
759 | static inline unsigned int flits_to_desc(unsigned int n) | |
760 | { | |
761 | BUG_ON(n > SGE_MAX_WR_LEN / 8); | |
762 | return DIV_ROUND_UP(n, 8); | |
763 | } | |
764 | ||
765 | /** | |
766 | * is_eth_imm - can an Ethernet packet be sent as immediate data? | |
767 | * @skb: the packet | |
768 | * | |
769 | * Returns whether an Ethernet packet is small enough to fit as | |
0034b298 | 770 | * immediate data. Return value corresponds to headroom required. |
fd3a4790 DM |
771 | */ |
772 | static inline int is_eth_imm(const struct sk_buff *skb) | |
773 | { | |
0034b298 KS |
774 | int hdrlen = skb_shinfo(skb)->gso_size ? |
775 | sizeof(struct cpl_tx_pkt_lso_core) : 0; | |
776 | ||
777 | hdrlen += sizeof(struct cpl_tx_pkt); | |
778 | if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) | |
779 | return hdrlen; | |
780 | return 0; | |
fd3a4790 DM |
781 | } |
782 | ||
783 | /** | |
784 | * calc_tx_flits - calculate the number of flits for a packet Tx WR | |
785 | * @skb: the packet | |
786 | * | |
787 | * Returns the number of flits needed for a Tx WR for the given Ethernet | |
788 | * packet, including the needed WR and CPL headers. | |
789 | */ | |
790 | static inline unsigned int calc_tx_flits(const struct sk_buff *skb) | |
791 | { | |
792 | unsigned int flits; | |
0034b298 | 793 | int hdrlen = is_eth_imm(skb); |
fd3a4790 | 794 | |
0aac3f56 HS |
795 | /* If the skb is small enough, we can pump it out as a work request |
796 | * with only immediate data. In that case we just have to have the | |
797 | * TX Packet header plus the skb data in the Work Request. | |
798 | */ | |
799 | ||
0034b298 KS |
800 | if (hdrlen) |
801 | return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); | |
fd3a4790 | 802 | |
0aac3f56 HS |
803 | /* Otherwise, we're going to have to construct a Scatter gather list |
804 | * of the skb body and fragments. We also include the flits necessary | |
805 | * for the TX Packet Work Request and CPL. We always have a firmware | |
806 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and | |
807 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL | |
808 | * message or, if we're doing a Large Send Offload, an LSO CPL message | |
809 | * with an embedded TX Packet Write CPL message. | |
810 | */ | |
fd3a4790 DM |
811 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; |
812 | if (skb_shinfo(skb)->gso_size) | |
0aac3f56 HS |
813 | flits += (sizeof(struct fw_eth_tx_pkt_wr) + |
814 | sizeof(struct cpl_tx_pkt_lso_core) + | |
815 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | |
816 | else | |
817 | flits += (sizeof(struct fw_eth_tx_pkt_wr) + | |
818 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | |
fd3a4790 DM |
819 | return flits; |
820 | } | |
821 | ||
822 | /** | |
823 | * calc_tx_descs - calculate the number of Tx descriptors for a packet | |
824 | * @skb: the packet | |
825 | * | |
826 | * Returns the number of Tx descriptors needed for the given Ethernet | |
827 | * packet, including the needed WR and CPL headers. | |
828 | */ | |
829 | static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |
830 | { | |
831 | return flits_to_desc(calc_tx_flits(skb)); | |
832 | } | |
833 | ||
834 | /** | |
835 | * write_sgl - populate a scatter/gather list for a packet | |
836 | * @skb: the packet | |
837 | * @q: the Tx queue we are writing into | |
838 | * @sgl: starting location for writing the SGL | |
839 | * @end: points right after the end of the SGL | |
840 | * @start: start offset into skb main-body data to include in the SGL | |
841 | * @addr: the list of bus addresses for the SGL elements | |
842 | * | |
843 | * Generates a gather list for the buffers that make up a packet. | |
844 | * The caller must provide adequate space for the SGL that will be written. | |
845 | * The SGL includes all of the packet's page fragments and the data in its | |
846 | * main body except for the first @start bytes. @sgl must be 16-byte | |
847 | * aligned and within a Tx descriptor with available space. @end points | |
848 | * right after the end of the SGL but does not account for any potential | |
849 | * wrap around, i.e., @end > @sgl. | |
850 | */ | |
851 | static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | |
852 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, | |
853 | const dma_addr_t *addr) | |
854 | { | |
855 | unsigned int i, len; | |
856 | struct ulptx_sge_pair *to; | |
857 | const struct skb_shared_info *si = skb_shinfo(skb); | |
858 | unsigned int nfrags = si->nr_frags; | |
859 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; | |
860 | ||
861 | len = skb_headlen(skb) - start; | |
862 | if (likely(len)) { | |
863 | sgl->len0 = htonl(len); | |
864 | sgl->addr0 = cpu_to_be64(addr[0] + start); | |
865 | nfrags++; | |
866 | } else { | |
9e903e08 | 867 | sgl->len0 = htonl(skb_frag_size(&si->frags[0])); |
fd3a4790 DM |
868 | sgl->addr0 = cpu_to_be64(addr[1]); |
869 | } | |
870 | ||
bdc590b9 HS |
871 | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
872 | ULPTX_NSGE_V(nfrags)); | |
fd3a4790 DM |
873 | if (likely(--nfrags == 0)) |
874 | return; | |
875 | /* | |
876 | * Most of the complexity below deals with the possibility we hit the | |
877 | * end of the queue in the middle of writing the SGL. For this case | |
878 | * only we create the SGL in a temporary buffer and then copy it. | |
879 | */ | |
880 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; | |
881 | ||
882 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { | |
9e903e08 ED |
883 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
884 | to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); | |
fd3a4790 DM |
885 | to->addr[0] = cpu_to_be64(addr[i]); |
886 | to->addr[1] = cpu_to_be64(addr[++i]); | |
887 | } | |
888 | if (nfrags) { | |
9e903e08 | 889 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
fd3a4790 DM |
890 | to->len[1] = cpu_to_be32(0); |
891 | to->addr[0] = cpu_to_be64(addr[i + 1]); | |
892 | } | |
893 | if (unlikely((u8 *)end > (u8 *)q->stat)) { | |
894 | unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; | |
895 | ||
896 | if (likely(part0)) | |
897 | memcpy(sgl->sge, buf, part0); | |
898 | part1 = (u8 *)end - (u8 *)q->stat; | |
899 | memcpy(q->desc, (u8 *)buf + part0, part1); | |
900 | end = (void *)q->desc + part1; | |
901 | } | |
902 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ | |
64699336 | 903 | *end = 0; |
fd3a4790 DM |
904 | } |
905 | ||
df64e4d3 HS |
906 | /* This function copies 64 byte coalesced work request to |
907 | * memory mapped BAR2 space. For coalesced WR SGE fetches | |
908 | * data from the FIFO instead of from Host. | |
22adfe0a | 909 | */ |
df64e4d3 | 910 | static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) |
22adfe0a | 911 | { |
df64e4d3 | 912 | int count = 8; |
22adfe0a SR |
913 | |
914 | while (count) { | |
915 | writeq(*src, dst); | |
916 | src++; | |
917 | dst++; | |
918 | count--; | |
919 | } | |
920 | } | |
921 | ||
fd3a4790 DM |
922 | /** |
923 | * ring_tx_db - check and potentially ring a Tx queue's doorbell | |
924 | * @adap: the adapter | |
925 | * @q: the Tx queue | |
926 | * @n: number of new descriptors to give to HW | |
927 | * | |
928 | * Ring the doorbel for a Tx queue. | |
929 | */ | |
930 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | |
931 | { | |
1ecc7b7a HS |
932 | /* Make sure that all writes to the TX Descriptors are committed |
933 | * before we tell the hardware about them. | |
934 | */ | |
935 | wmb(); | |
d63a6dcf | 936 | |
df64e4d3 HS |
937 | /* If we don't have access to the new User Doorbell (T5+), use the old |
938 | * doorbell mechanism; otherwise use the new BAR2 mechanism. | |
939 | */ | |
940 | if (unlikely(q->bar2_addr == NULL)) { | |
f612b815 | 941 | u32 val = PIDX_V(n); |
d63a6dcf HS |
942 | unsigned long flags; |
943 | ||
944 | /* For T4 we need to participate in the Doorbell Recovery | |
945 | * mechanism. | |
946 | */ | |
947 | spin_lock_irqsave(&q->db_lock, flags); | |
948 | if (!q->db_disabled) | |
f612b815 HS |
949 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
950 | QID_V(q->cntxt_id) | val); | |
d63a6dcf HS |
951 | else |
952 | q->db_pidx_inc += n; | |
953 | q->db_pidx = q->pidx; | |
954 | spin_unlock_irqrestore(&q->db_lock, flags); | |
955 | } else { | |
f612b815 | 956 | u32 val = PIDX_T5_V(n); |
d63a6dcf HS |
957 | |
958 | /* T4 and later chips share the same PIDX field offset within | |
959 | * the doorbell, but T5 and later shrank the field in order to | |
960 | * gain a bit for Doorbell Priority. The field was absurdly | |
961 | * large in the first place (14 bits) so we just use the T5 | |
962 | * and later limits and warn if a Queue ID is too large. | |
963 | */ | |
f612b815 | 964 | WARN_ON(val & DBPRIO_F); |
d63a6dcf | 965 | |
df64e4d3 HS |
966 | /* If we're only writing a single TX Descriptor and we can use |
967 | * Inferred QID registers, we can use the Write Combining | |
968 | * Gather Buffer; otherwise we use the simple doorbell. | |
d63a6dcf | 969 | */ |
df64e4d3 | 970 | if (n == 1 && q->bar2_qid == 0) { |
d63a6dcf HS |
971 | int index = (q->pidx |
972 | ? (q->pidx - 1) | |
973 | : (q->size - 1)); | |
df64e4d3 | 974 | u64 *wr = (u64 *)&q->desc[index]; |
d63a6dcf | 975 | |
df64e4d3 HS |
976 | cxgb_pio_copy((u64 __iomem *) |
977 | (q->bar2_addr + SGE_UDB_WCDOORBELL), | |
978 | wr); | |
22adfe0a | 979 | } else { |
f612b815 | 980 | writel(val | QID_V(q->bar2_qid), |
df64e4d3 | 981 | q->bar2_addr + SGE_UDB_KDOORBELL); |
22adfe0a | 982 | } |
d63a6dcf HS |
983 | |
984 | /* This Write Memory Barrier will force the write to the User | |
985 | * Doorbell area to be flushed. This is needed to prevent | |
986 | * writes on different CPUs for the same queue from hitting | |
987 | * the adapter out of order. This is required when some Work | |
988 | * Requests take the Write Combine Gather Buffer path (user | |
989 | * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some | |
990 | * take the traditional path where we simply increment the | |
991 | * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the | |
992 | * hardware DMA read the actual Work Request. | |
993 | */ | |
994 | wmb(); | |
995 | } | |
fd3a4790 DM |
996 | } |
997 | ||
998 | /** | |
999 | * inline_tx_skb - inline a packet's data into Tx descriptors | |
1000 | * @skb: the packet | |
1001 | * @q: the Tx queue where the packet will be inlined | |
1002 | * @pos: starting position in the Tx queue where to inline the packet | |
1003 | * | |
1004 | * Inline a packet's contents directly into Tx descriptors, starting at | |
1005 | * the given position within the Tx DMA ring. | |
1006 | * Most of the complexity of this operation is dealing with wrap arounds | |
1007 | * in the middle of the packet we want to inline. | |
1008 | */ | |
1009 | static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, | |
1010 | void *pos) | |
1011 | { | |
1012 | u64 *p; | |
1013 | int left = (void *)q->stat - pos; | |
1014 | ||
1015 | if (likely(skb->len <= left)) { | |
1016 | if (likely(!skb->data_len)) | |
1017 | skb_copy_from_linear_data(skb, pos, skb->len); | |
1018 | else | |
1019 | skb_copy_bits(skb, 0, pos, skb->len); | |
1020 | pos += skb->len; | |
1021 | } else { | |
1022 | skb_copy_bits(skb, 0, pos, left); | |
1023 | skb_copy_bits(skb, left, q->desc, skb->len - left); | |
1024 | pos = (void *)q->desc + (skb->len - left); | |
1025 | } | |
1026 | ||
1027 | /* 0-pad to multiple of 16 */ | |
1028 | p = PTR_ALIGN(pos, 8); | |
1029 | if ((uintptr_t)p & 8) | |
1030 | *p = 0; | |
1031 | } | |
1032 | ||
1033 | /* | |
1034 | * Figure out what HW csum a packet wants and return the appropriate control | |
1035 | * bits. | |
1036 | */ | |
1037 | static u64 hwcsum(const struct sk_buff *skb) | |
1038 | { | |
1039 | int csum_type; | |
1040 | const struct iphdr *iph = ip_hdr(skb); | |
1041 | ||
1042 | if (iph->version == 4) { | |
1043 | if (iph->protocol == IPPROTO_TCP) | |
1044 | csum_type = TX_CSUM_TCPIP; | |
1045 | else if (iph->protocol == IPPROTO_UDP) | |
1046 | csum_type = TX_CSUM_UDPIP; | |
1047 | else { | |
1048 | nocsum: /* | |
1049 | * unknown protocol, disable HW csum | |
1050 | * and hope a bad packet is detected | |
1051 | */ | |
1ecc7b7a | 1052 | return TXPKT_L4CSUM_DIS_F; |
fd3a4790 DM |
1053 | } |
1054 | } else { | |
1055 | /* | |
1056 | * this doesn't work with extension headers | |
1057 | */ | |
1058 | const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; | |
1059 | ||
1060 | if (ip6h->nexthdr == IPPROTO_TCP) | |
1061 | csum_type = TX_CSUM_TCPIP6; | |
1062 | else if (ip6h->nexthdr == IPPROTO_UDP) | |
1063 | csum_type = TX_CSUM_UDPIP6; | |
1064 | else | |
1065 | goto nocsum; | |
1066 | } | |
1067 | ||
1068 | if (likely(csum_type >= TX_CSUM_TCPIP)) | |
1ecc7b7a HS |
1069 | return TXPKT_CSUM_TYPE_V(csum_type) | |
1070 | TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) | | |
1071 | TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN); | |
fd3a4790 DM |
1072 | else { |
1073 | int start = skb_transport_offset(skb); | |
1074 | ||
1ecc7b7a HS |
1075 | return TXPKT_CSUM_TYPE_V(csum_type) | |
1076 | TXPKT_CSUM_START_V(start) | | |
1077 | TXPKT_CSUM_LOC_V(start + skb->csum_offset); | |
fd3a4790 DM |
1078 | } |
1079 | } | |
1080 | ||
1081 | static void eth_txq_stop(struct sge_eth_txq *q) | |
1082 | { | |
1083 | netif_tx_stop_queue(q->txq); | |
1084 | q->q.stops++; | |
1085 | } | |
1086 | ||
1087 | static inline void txq_advance(struct sge_txq *q, unsigned int n) | |
1088 | { | |
1089 | q->in_use += n; | |
1090 | q->pidx += n; | |
1091 | if (q->pidx >= q->size) | |
1092 | q->pidx -= q->size; | |
1093 | } | |
1094 | ||
84a200b3 VP |
1095 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1096 | static inline int | |
1097 | cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, | |
1098 | const struct port_info *pi, u64 *cntrl) | |
1099 | { | |
1100 | const struct cxgb_fcoe *fcoe = &pi->fcoe; | |
1101 | ||
1102 | if (!(fcoe->flags & CXGB_FCOE_ENABLED)) | |
1103 | return 0; | |
1104 | ||
1105 | if (skb->protocol != htons(ETH_P_FCOE)) | |
1106 | return 0; | |
1107 | ||
1108 | skb_reset_mac_header(skb); | |
1109 | skb->mac_len = sizeof(struct ethhdr); | |
1110 | ||
1111 | skb_set_network_header(skb, skb->mac_len); | |
1112 | skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); | |
1113 | ||
1114 | if (!cxgb_fcoe_sof_eof_supported(adap, skb)) | |
1115 | return -ENOTSUPP; | |
1116 | ||
1117 | /* FC CRC offload */ | |
1ecc7b7a HS |
1118 | *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | |
1119 | TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | | |
1120 | TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | | |
1121 | TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | | |
1122 | TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); | |
84a200b3 VP |
1123 | return 0; |
1124 | } | |
1125 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
1126 | ||
fd3a4790 DM |
1127 | /** |
1128 | * t4_eth_xmit - add a packet to an Ethernet Tx queue | |
1129 | * @skb: the packet | |
1130 | * @dev: the egress net device | |
1131 | * | |
1132 | * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. | |
1133 | */ | |
1134 | netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |
1135 | { | |
1136 | u32 wr_mid; | |
1137 | u64 cntrl, *end; | |
1138 | int qidx, credits; | |
1139 | unsigned int flits, ndesc; | |
1140 | struct adapter *adap; | |
1141 | struct sge_eth_txq *q; | |
1142 | const struct port_info *pi; | |
1143 | struct fw_eth_tx_pkt_wr *wr; | |
1144 | struct cpl_tx_pkt_core *cpl; | |
1145 | const struct skb_shared_info *ssi; | |
1146 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | |
0034b298 | 1147 | bool immediate = false; |
637d3e99 | 1148 | int len, max_pkt_len; |
84a200b3 VP |
1149 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1150 | int err; | |
1151 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
fd3a4790 DM |
1152 | |
1153 | /* | |
1154 | * The chip min packet length is 10 octets but play safe and reject | |
1155 | * anything shorter than an Ethernet header. | |
1156 | */ | |
1157 | if (unlikely(skb->len < ETH_HLEN)) { | |
a7525198 | 1158 | out_free: dev_kfree_skb_any(skb); |
fd3a4790 DM |
1159 | return NETDEV_TX_OK; |
1160 | } | |
1161 | ||
637d3e99 HS |
1162 | /* Discard the packet if the length is greater than mtu */ |
1163 | max_pkt_len = ETH_HLEN + dev->mtu; | |
1164 | if (skb_vlan_tag_present(skb)) | |
1165 | max_pkt_len += VLAN_HLEN; | |
1166 | if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) | |
1167 | goto out_free; | |
1168 | ||
fd3a4790 DM |
1169 | pi = netdev_priv(dev); |
1170 | adap = pi->adapter; | |
1171 | qidx = skb_get_queue_mapping(skb); | |
1172 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; | |
1173 | ||
1174 | reclaim_completed_tx(adap, &q->q, true); | |
1ecc7b7a | 1175 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; |
84a200b3 VP |
1176 | |
1177 | #ifdef CONFIG_CHELSIO_T4_FCOE | |
1178 | err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); | |
1179 | if (unlikely(err == -ENOTSUPP)) | |
1180 | goto out_free; | |
1181 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
fd3a4790 DM |
1182 | |
1183 | flits = calc_tx_flits(skb); | |
1184 | ndesc = flits_to_desc(flits); | |
1185 | credits = txq_avail(&q->q) - ndesc; | |
1186 | ||
1187 | if (unlikely(credits < 0)) { | |
1188 | eth_txq_stop(q); | |
1189 | dev_err(adap->pdev_dev, | |
1190 | "%s: Tx ring %u full while queue awake!\n", | |
1191 | dev->name, qidx); | |
1192 | return NETDEV_TX_BUSY; | |
1193 | } | |
1194 | ||
0034b298 KS |
1195 | if (is_eth_imm(skb)) |
1196 | immediate = true; | |
1197 | ||
1198 | if (!immediate && | |
fd3a4790 DM |
1199 | unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { |
1200 | q->mapping_err++; | |
1201 | goto out_free; | |
1202 | } | |
1203 | ||
e2ac9628 | 1204 | wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); |
fd3a4790 DM |
1205 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
1206 | eth_txq_stop(q); | |
e2ac9628 | 1207 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; |
fd3a4790 DM |
1208 | } |
1209 | ||
1210 | wr = (void *)&q->q.desc[q->q.pidx]; | |
1211 | wr->equiq_to_len16 = htonl(wr_mid); | |
1212 | wr->r3 = cpu_to_be64(0); | |
1213 | end = (u64 *)wr + flits; | |
1214 | ||
0034b298 | 1215 | len = immediate ? skb->len : 0; |
fd3a4790 DM |
1216 | ssi = skb_shinfo(skb); |
1217 | if (ssi->gso_size) { | |
625ac6ae | 1218 | struct cpl_tx_pkt_lso *lso = (void *)wr; |
fd3a4790 DM |
1219 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; |
1220 | int l3hdr_len = skb_network_header_len(skb); | |
1221 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; | |
1222 | ||
0034b298 | 1223 | len += sizeof(*lso); |
e2ac9628 HS |
1224 | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | |
1225 | FW_WR_IMMDLEN_V(len)); | |
1ecc7b7a HS |
1226 | lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | |
1227 | LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | | |
1228 | LSO_IPV6_V(v6) | | |
1229 | LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | | |
1230 | LSO_IPHDR_LEN_V(l3hdr_len / 4) | | |
1231 | LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); | |
625ac6ae DM |
1232 | lso->c.ipid_ofst = htons(0); |
1233 | lso->c.mss = htons(ssi->gso_size); | |
1234 | lso->c.seqno_offset = htonl(0); | |
7207c0d1 HS |
1235 | if (is_t4(adap->params.chip)) |
1236 | lso->c.len = htonl(skb->len); | |
1237 | else | |
1ecc7b7a | 1238 | lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); |
fd3a4790 | 1239 | cpl = (void *)(lso + 1); |
1ecc7b7a HS |
1240 | cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | |
1241 | TXPKT_IPHDR_LEN_V(l3hdr_len) | | |
1242 | TXPKT_ETHHDR_LEN_V(eth_xtra_len); | |
fd3a4790 DM |
1243 | q->tso++; |
1244 | q->tx_cso += ssi->gso_segs; | |
1245 | } else { | |
ca71de6b | 1246 | len += sizeof(*cpl); |
e2ac9628 HS |
1247 | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | |
1248 | FW_WR_IMMDLEN_V(len)); | |
fd3a4790 DM |
1249 | cpl = (void *)(wr + 1); |
1250 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1ecc7b7a | 1251 | cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F; |
fd3a4790 | 1252 | q->tx_cso++; |
84a200b3 | 1253 | } |
fd3a4790 DM |
1254 | } |
1255 | ||
df8a39de | 1256 | if (skb_vlan_tag_present(skb)) { |
fd3a4790 | 1257 | q->vlan_ins++; |
1ecc7b7a | 1258 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); |
84a200b3 VP |
1259 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1260 | if (skb->protocol == htons(ETH_P_FCOE)) | |
1ecc7b7a | 1261 | cntrl |= TXPKT_VLAN_V( |
84a200b3 VP |
1262 | ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); |
1263 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
fd3a4790 DM |
1264 | } |
1265 | ||
1ecc7b7a HS |
1266 | cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | |
1267 | TXPKT_INTF_V(pi->tx_chan) | | |
1268 | TXPKT_PF_V(adap->fn)); | |
fd3a4790 DM |
1269 | cpl->pack = htons(0); |
1270 | cpl->len = htons(skb->len); | |
1271 | cpl->ctrl1 = cpu_to_be64(cntrl); | |
1272 | ||
0034b298 | 1273 | if (immediate) { |
fd3a4790 | 1274 | inline_tx_skb(skb, &q->q, cpl + 1); |
a7525198 | 1275 | dev_consume_skb_any(skb); |
fd3a4790 DM |
1276 | } else { |
1277 | int last_desc; | |
1278 | ||
1279 | write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, | |
1280 | addr); | |
1281 | skb_orphan(skb); | |
1282 | ||
1283 | last_desc = q->q.pidx + ndesc - 1; | |
1284 | if (last_desc >= q->q.size) | |
1285 | last_desc -= q->q.size; | |
1286 | q->q.sdesc[last_desc].skb = skb; | |
1287 | q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); | |
1288 | } | |
1289 | ||
1290 | txq_advance(&q->q, ndesc); | |
1291 | ||
1292 | ring_tx_db(adap, &q->q, ndesc); | |
1293 | return NETDEV_TX_OK; | |
1294 | } | |
1295 | ||
1296 | /** | |
1297 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs | |
1298 | * @q: the SGE control Tx queue | |
1299 | * | |
1300 | * This is a variant of reclaim_completed_tx() that is used for Tx queues | |
1301 | * that send only immediate data (presently just the control queues) and | |
1302 | * thus do not have any sk_buffs to release. | |
1303 | */ | |
1304 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | |
1305 | { | |
1306 | int hw_cidx = ntohs(q->stat->cidx); | |
1307 | int reclaim = hw_cidx - q->cidx; | |
1308 | ||
1309 | if (reclaim < 0) | |
1310 | reclaim += q->size; | |
1311 | ||
1312 | q->in_use -= reclaim; | |
1313 | q->cidx = hw_cidx; | |
1314 | } | |
1315 | ||
1316 | /** | |
1317 | * is_imm - check whether a packet can be sent as immediate data | |
1318 | * @skb: the packet | |
1319 | * | |
1320 | * Returns true if a packet can be sent as a WR with immediate data. | |
1321 | */ | |
1322 | static inline int is_imm(const struct sk_buff *skb) | |
1323 | { | |
1324 | return skb->len <= MAX_CTRL_WR_LEN; | |
1325 | } | |
1326 | ||
1327 | /** | |
1328 | * ctrlq_check_stop - check if a control queue is full and should stop | |
1329 | * @q: the queue | |
1330 | * @wr: most recent WR written to the queue | |
1331 | * | |
1332 | * Check if a control queue has become full and should be stopped. | |
1333 | * We clean up control queue descriptors very lazily, only when we are out. | |
1334 | * If the queue is still full after reclaiming any completed descriptors | |
1335 | * we suspend it and have the last WR wake it up. | |
1336 | */ | |
1337 | static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) | |
1338 | { | |
1339 | reclaim_completed_tx_imm(&q->q); | |
1340 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | |
e2ac9628 | 1341 | wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); |
fd3a4790 DM |
1342 | q->q.stops++; |
1343 | q->full = 1; | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | /** | |
1348 | * ctrl_xmit - send a packet through an SGE control Tx queue | |
1349 | * @q: the control queue | |
1350 | * @skb: the packet | |
1351 | * | |
1352 | * Send a packet through an SGE control Tx queue. Packets sent through | |
1353 | * a control queue must fit entirely as immediate data. | |
1354 | */ | |
1355 | static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) | |
1356 | { | |
1357 | unsigned int ndesc; | |
1358 | struct fw_wr_hdr *wr; | |
1359 | ||
1360 | if (unlikely(!is_imm(skb))) { | |
1361 | WARN_ON(1); | |
1362 | dev_kfree_skb(skb); | |
1363 | return NET_XMIT_DROP; | |
1364 | } | |
1365 | ||
1366 | ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); | |
1367 | spin_lock(&q->sendq.lock); | |
1368 | ||
1369 | if (unlikely(q->full)) { | |
1370 | skb->priority = ndesc; /* save for restart */ | |
1371 | __skb_queue_tail(&q->sendq, skb); | |
1372 | spin_unlock(&q->sendq.lock); | |
1373 | return NET_XMIT_CN; | |
1374 | } | |
1375 | ||
1376 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | |
1377 | inline_tx_skb(skb, &q->q, wr); | |
1378 | ||
1379 | txq_advance(&q->q, ndesc); | |
1380 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) | |
1381 | ctrlq_check_stop(q, wr); | |
1382 | ||
1383 | ring_tx_db(q->adap, &q->q, ndesc); | |
1384 | spin_unlock(&q->sendq.lock); | |
1385 | ||
1386 | kfree_skb(skb); | |
1387 | return NET_XMIT_SUCCESS; | |
1388 | } | |
1389 | ||
1390 | /** | |
1391 | * restart_ctrlq - restart a suspended control queue | |
1392 | * @data: the control queue to restart | |
1393 | * | |
1394 | * Resumes transmission on a suspended Tx control queue. | |
1395 | */ | |
1396 | static void restart_ctrlq(unsigned long data) | |
1397 | { | |
1398 | struct sk_buff *skb; | |
1399 | unsigned int written = 0; | |
1400 | struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; | |
1401 | ||
1402 | spin_lock(&q->sendq.lock); | |
1403 | reclaim_completed_tx_imm(&q->q); | |
1404 | BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ | |
1405 | ||
1406 | while ((skb = __skb_dequeue(&q->sendq)) != NULL) { | |
1407 | struct fw_wr_hdr *wr; | |
1408 | unsigned int ndesc = skb->priority; /* previously saved */ | |
1409 | ||
1410 | /* | |
1411 | * Write descriptors and free skbs outside the lock to limit | |
1412 | * wait times. q->full is still set so new skbs will be queued. | |
1413 | */ | |
1414 | spin_unlock(&q->sendq.lock); | |
1415 | ||
1416 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | |
1417 | inline_tx_skb(skb, &q->q, wr); | |
1418 | kfree_skb(skb); | |
1419 | ||
1420 | written += ndesc; | |
1421 | txq_advance(&q->q, ndesc); | |
1422 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | |
1423 | unsigned long old = q->q.stops; | |
1424 | ||
1425 | ctrlq_check_stop(q, wr); | |
1426 | if (q->q.stops != old) { /* suspended anew */ | |
1427 | spin_lock(&q->sendq.lock); | |
1428 | goto ringdb; | |
1429 | } | |
1430 | } | |
1431 | if (written > 16) { | |
1432 | ring_tx_db(q->adap, &q->q, written); | |
1433 | written = 0; | |
1434 | } | |
1435 | spin_lock(&q->sendq.lock); | |
1436 | } | |
1437 | q->full = 0; | |
1438 | ringdb: if (written) | |
1439 | ring_tx_db(q->adap, &q->q, written); | |
1440 | spin_unlock(&q->sendq.lock); | |
1441 | } | |
1442 | ||
1443 | /** | |
1444 | * t4_mgmt_tx - send a management message | |
1445 | * @adap: the adapter | |
1446 | * @skb: the packet containing the management message | |
1447 | * | |
1448 | * Send a management message through control queue 0. | |
1449 | */ | |
1450 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | |
1451 | { | |
1452 | int ret; | |
1453 | ||
1454 | local_bh_disable(); | |
1455 | ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); | |
1456 | local_bh_enable(); | |
1457 | return ret; | |
1458 | } | |
1459 | ||
1460 | /** | |
1461 | * is_ofld_imm - check whether a packet can be sent as immediate data | |
1462 | * @skb: the packet | |
1463 | * | |
1464 | * Returns true if a packet can be sent as an offload WR with immediate | |
1465 | * data. We currently use the same limit as for Ethernet packets. | |
1466 | */ | |
1467 | static inline int is_ofld_imm(const struct sk_buff *skb) | |
1468 | { | |
1469 | return skb->len <= MAX_IMM_TX_PKT_LEN; | |
1470 | } | |
1471 | ||
1472 | /** | |
1473 | * calc_tx_flits_ofld - calculate # of flits for an offload packet | |
1474 | * @skb: the packet | |
1475 | * | |
1476 | * Returns the number of flits needed for the given offload packet. | |
1477 | * These packets are already fully constructed and no additional headers | |
1478 | * will be added. | |
1479 | */ | |
1480 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) | |
1481 | { | |
1482 | unsigned int flits, cnt; | |
1483 | ||
1484 | if (is_ofld_imm(skb)) | |
1485 | return DIV_ROUND_UP(skb->len, 8); | |
1486 | ||
1487 | flits = skb_transport_offset(skb) / 8U; /* headers */ | |
1488 | cnt = skb_shinfo(skb)->nr_frags; | |
15dd16c2 | 1489 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) |
fd3a4790 DM |
1490 | cnt++; |
1491 | return flits + sgl_len(cnt); | |
1492 | } | |
1493 | ||
1494 | /** | |
1495 | * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion | |
1496 | * @adap: the adapter | |
1497 | * @q: the queue to stop | |
1498 | * | |
1499 | * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting | |
1500 | * inability to map packets. A periodic timer attempts to restart | |
1501 | * queues so marked. | |
1502 | */ | |
1503 | static void txq_stop_maperr(struct sge_ofld_txq *q) | |
1504 | { | |
1505 | q->mapping_err++; | |
1506 | q->q.stops++; | |
e46dab4d DM |
1507 | set_bit(q->q.cntxt_id - q->adap->sge.egr_start, |
1508 | q->adap->sge.txq_maperr); | |
fd3a4790 DM |
1509 | } |
1510 | ||
1511 | /** | |
1512 | * ofldtxq_stop - stop an offload Tx queue that has become full | |
1513 | * @q: the queue to stop | |
1514 | * @skb: the packet causing the queue to become full | |
1515 | * | |
1516 | * Stops an offload Tx queue that has become full and modifies the packet | |
1517 | * being written to request a wakeup. | |
1518 | */ | |
1519 | static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) | |
1520 | { | |
1521 | struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; | |
1522 | ||
e2ac9628 | 1523 | wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); |
fd3a4790 DM |
1524 | q->q.stops++; |
1525 | q->full = 1; | |
1526 | } | |
1527 | ||
1528 | /** | |
1529 | * service_ofldq - restart a suspended offload queue | |
1530 | * @q: the offload queue | |
1531 | * | |
1532 | * Services an offload Tx queue by moving packets from its packet queue | |
1533 | * to the HW Tx ring. The function starts and ends with the queue locked. | |
1534 | */ | |
1535 | static void service_ofldq(struct sge_ofld_txq *q) | |
1536 | { | |
1537 | u64 *pos; | |
1538 | int credits; | |
1539 | struct sk_buff *skb; | |
1540 | unsigned int written = 0; | |
1541 | unsigned int flits, ndesc; | |
1542 | ||
1543 | while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { | |
1544 | /* | |
1545 | * We drop the lock but leave skb on sendq, thus retaining | |
1546 | * exclusive access to the state of the queue. | |
1547 | */ | |
1548 | spin_unlock(&q->sendq.lock); | |
1549 | ||
1550 | reclaim_completed_tx(q->adap, &q->q, false); | |
1551 | ||
1552 | flits = skb->priority; /* previously saved */ | |
1553 | ndesc = flits_to_desc(flits); | |
1554 | credits = txq_avail(&q->q) - ndesc; | |
1555 | BUG_ON(credits < 0); | |
1556 | if (unlikely(credits < TXQ_STOP_THRES)) | |
1557 | ofldtxq_stop(q, skb); | |
1558 | ||
1559 | pos = (u64 *)&q->q.desc[q->q.pidx]; | |
1560 | if (is_ofld_imm(skb)) | |
1561 | inline_tx_skb(skb, &q->q, pos); | |
1562 | else if (map_skb(q->adap->pdev_dev, skb, | |
1563 | (dma_addr_t *)skb->head)) { | |
1564 | txq_stop_maperr(q); | |
1565 | spin_lock(&q->sendq.lock); | |
1566 | break; | |
1567 | } else { | |
1568 | int last_desc, hdr_len = skb_transport_offset(skb); | |
1569 | ||
1570 | memcpy(pos, skb->data, hdr_len); | |
1571 | write_sgl(skb, &q->q, (void *)pos + hdr_len, | |
1572 | pos + flits, hdr_len, | |
1573 | (dma_addr_t *)skb->head); | |
1574 | #ifdef CONFIG_NEED_DMA_MAP_STATE | |
1575 | skb->dev = q->adap->port[0]; | |
1576 | skb->destructor = deferred_unmap_destructor; | |
1577 | #endif | |
1578 | last_desc = q->q.pidx + ndesc - 1; | |
1579 | if (last_desc >= q->q.size) | |
1580 | last_desc -= q->q.size; | |
1581 | q->q.sdesc[last_desc].skb = skb; | |
1582 | } | |
1583 | ||
1584 | txq_advance(&q->q, ndesc); | |
1585 | written += ndesc; | |
1586 | if (unlikely(written > 32)) { | |
1587 | ring_tx_db(q->adap, &q->q, written); | |
1588 | written = 0; | |
1589 | } | |
1590 | ||
1591 | spin_lock(&q->sendq.lock); | |
1592 | __skb_unlink(skb, &q->sendq); | |
1593 | if (is_ofld_imm(skb)) | |
1594 | kfree_skb(skb); | |
1595 | } | |
1596 | if (likely(written)) | |
1597 | ring_tx_db(q->adap, &q->q, written); | |
1598 | } | |
1599 | ||
1600 | /** | |
1601 | * ofld_xmit - send a packet through an offload queue | |
1602 | * @q: the Tx offload queue | |
1603 | * @skb: the packet | |
1604 | * | |
1605 | * Send an offload packet through an SGE offload queue. | |
1606 | */ | |
1607 | static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) | |
1608 | { | |
1609 | skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ | |
1610 | spin_lock(&q->sendq.lock); | |
1611 | __skb_queue_tail(&q->sendq, skb); | |
1612 | if (q->sendq.qlen == 1) | |
1613 | service_ofldq(q); | |
1614 | spin_unlock(&q->sendq.lock); | |
1615 | return NET_XMIT_SUCCESS; | |
1616 | } | |
1617 | ||
1618 | /** | |
1619 | * restart_ofldq - restart a suspended offload queue | |
1620 | * @data: the offload queue to restart | |
1621 | * | |
1622 | * Resumes transmission on a suspended Tx offload queue. | |
1623 | */ | |
1624 | static void restart_ofldq(unsigned long data) | |
1625 | { | |
1626 | struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; | |
1627 | ||
1628 | spin_lock(&q->sendq.lock); | |
1629 | q->full = 0; /* the queue actually is completely empty now */ | |
1630 | service_ofldq(q); | |
1631 | spin_unlock(&q->sendq.lock); | |
1632 | } | |
1633 | ||
1634 | /** | |
1635 | * skb_txq - return the Tx queue an offload packet should use | |
1636 | * @skb: the packet | |
1637 | * | |
1638 | * Returns the Tx queue an offload packet should use as indicated by bits | |
1639 | * 1-15 in the packet's queue_mapping. | |
1640 | */ | |
1641 | static inline unsigned int skb_txq(const struct sk_buff *skb) | |
1642 | { | |
1643 | return skb->queue_mapping >> 1; | |
1644 | } | |
1645 | ||
1646 | /** | |
1647 | * is_ctrl_pkt - return whether an offload packet is a control packet | |
1648 | * @skb: the packet | |
1649 | * | |
1650 | * Returns whether an offload packet should use an OFLD or a CTRL | |
1651 | * Tx queue as indicated by bit 0 in the packet's queue_mapping. | |
1652 | */ | |
1653 | static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) | |
1654 | { | |
1655 | return skb->queue_mapping & 1; | |
1656 | } | |
1657 | ||
1658 | static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) | |
1659 | { | |
1660 | unsigned int idx = skb_txq(skb); | |
1661 | ||
4fe44dd7 KS |
1662 | if (unlikely(is_ctrl_pkt(skb))) { |
1663 | /* Single ctrl queue is a requirement for LE workaround path */ | |
1664 | if (adap->tids.nsftids) | |
1665 | idx = 0; | |
fd3a4790 | 1666 | return ctrl_xmit(&adap->sge.ctrlq[idx], skb); |
4fe44dd7 | 1667 | } |
fd3a4790 DM |
1668 | return ofld_xmit(&adap->sge.ofldtxq[idx], skb); |
1669 | } | |
1670 | ||
1671 | /** | |
1672 | * t4_ofld_send - send an offload packet | |
1673 | * @adap: the adapter | |
1674 | * @skb: the packet | |
1675 | * | |
1676 | * Sends an offload packet. We use the packet queue_mapping to select the | |
1677 | * appropriate Tx queue as follows: bit 0 indicates whether the packet | |
1678 | * should be sent as regular or control, bits 1-15 select the queue. | |
1679 | */ | |
1680 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) | |
1681 | { | |
1682 | int ret; | |
1683 | ||
1684 | local_bh_disable(); | |
1685 | ret = ofld_send(adap, skb); | |
1686 | local_bh_enable(); | |
1687 | return ret; | |
1688 | } | |
1689 | ||
1690 | /** | |
1691 | * cxgb4_ofld_send - send an offload packet | |
1692 | * @dev: the net device | |
1693 | * @skb: the packet | |
1694 | * | |
1695 | * Sends an offload packet. This is an exported version of @t4_ofld_send, | |
1696 | * intended for ULDs. | |
1697 | */ | |
1698 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) | |
1699 | { | |
1700 | return t4_ofld_send(netdev2adap(dev), skb); | |
1701 | } | |
1702 | EXPORT_SYMBOL(cxgb4_ofld_send); | |
1703 | ||
e91b0f24 | 1704 | static inline void copy_frags(struct sk_buff *skb, |
fd3a4790 DM |
1705 | const struct pkt_gl *gl, unsigned int offset) |
1706 | { | |
e91b0f24 | 1707 | int i; |
fd3a4790 DM |
1708 | |
1709 | /* usually there's just one frag */ | |
e91b0f24 IC |
1710 | __skb_fill_page_desc(skb, 0, gl->frags[0].page, |
1711 | gl->frags[0].offset + offset, | |
1712 | gl->frags[0].size - offset); | |
1713 | skb_shinfo(skb)->nr_frags = gl->nfrags; | |
1714 | for (i = 1; i < gl->nfrags; i++) | |
1715 | __skb_fill_page_desc(skb, i, gl->frags[i].page, | |
1716 | gl->frags[i].offset, | |
1717 | gl->frags[i].size); | |
fd3a4790 DM |
1718 | |
1719 | /* get a reference to the last page, we don't own it */ | |
e91b0f24 | 1720 | get_page(gl->frags[gl->nfrags - 1].page); |
fd3a4790 DM |
1721 | } |
1722 | ||
1723 | /** | |
1724 | * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list | |
1725 | * @gl: the gather list | |
1726 | * @skb_len: size of sk_buff main body if it carries fragments | |
1727 | * @pull_len: amount of data to move to the sk_buff's main body | |
1728 | * | |
1729 | * Builds an sk_buff from the given packet gather list. Returns the | |
1730 | * sk_buff or %NULL if sk_buff allocation failed. | |
1731 | */ | |
1732 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | |
1733 | unsigned int skb_len, unsigned int pull_len) | |
1734 | { | |
1735 | struct sk_buff *skb; | |
1736 | ||
1737 | /* | |
1738 | * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer | |
1739 | * size, which is expected since buffers are at least PAGE_SIZEd. | |
1740 | * In this case packets up to RX_COPY_THRES have only one fragment. | |
1741 | */ | |
1742 | if (gl->tot_len <= RX_COPY_THRES) { | |
1743 | skb = dev_alloc_skb(gl->tot_len); | |
1744 | if (unlikely(!skb)) | |
1745 | goto out; | |
1746 | __skb_put(skb, gl->tot_len); | |
1747 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | |
1748 | } else { | |
1749 | skb = dev_alloc_skb(skb_len); | |
1750 | if (unlikely(!skb)) | |
1751 | goto out; | |
1752 | __skb_put(skb, pull_len); | |
1753 | skb_copy_to_linear_data(skb, gl->va, pull_len); | |
1754 | ||
e91b0f24 | 1755 | copy_frags(skb, gl, pull_len); |
fd3a4790 DM |
1756 | skb->len = gl->tot_len; |
1757 | skb->data_len = skb->len - pull_len; | |
1758 | skb->truesize += skb->data_len; | |
1759 | } | |
1760 | out: return skb; | |
1761 | } | |
1762 | EXPORT_SYMBOL(cxgb4_pktgl_to_skb); | |
1763 | ||
1764 | /** | |
1765 | * t4_pktgl_free - free a packet gather list | |
1766 | * @gl: the gather list | |
1767 | * | |
1768 | * Releases the pages of a packet gather list. We do not own the last | |
1769 | * page on the list and do not free it. | |
1770 | */ | |
de498c89 | 1771 | static void t4_pktgl_free(const struct pkt_gl *gl) |
fd3a4790 DM |
1772 | { |
1773 | int n; | |
e91b0f24 | 1774 | const struct page_frag *p; |
fd3a4790 DM |
1775 | |
1776 | for (p = gl->frags, n = gl->nfrags - 1; n--; p++) | |
1777 | put_page(p->page); | |
1778 | } | |
1779 | ||
1780 | /* | |
1781 | * Process an MPS trace packet. Give it an unused protocol number so it won't | |
1782 | * be delivered to anyone and send it to the stack for capture. | |
1783 | */ | |
1784 | static noinline int handle_trace_pkt(struct adapter *adap, | |
1785 | const struct pkt_gl *gl) | |
1786 | { | |
1787 | struct sk_buff *skb; | |
fd3a4790 DM |
1788 | |
1789 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); | |
1790 | if (unlikely(!skb)) { | |
1791 | t4_pktgl_free(gl); | |
1792 | return 0; | |
1793 | } | |
1794 | ||
d14807dd | 1795 | if (is_t4(adap->params.chip)) |
0a57a536 SR |
1796 | __skb_pull(skb, sizeof(struct cpl_trace_pkt)); |
1797 | else | |
1798 | __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); | |
1799 | ||
fd3a4790 DM |
1800 | skb_reset_mac_header(skb); |
1801 | skb->protocol = htons(0xffff); | |
1802 | skb->dev = adap->port[0]; | |
1803 | netif_receive_skb(skb); | |
1804 | return 0; | |
1805 | } | |
1806 | ||
1807 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | |
1808 | const struct cpl_rx_pkt *pkt) | |
1809 | { | |
52367a76 VP |
1810 | struct adapter *adapter = rxq->rspq.adap; |
1811 | struct sge *s = &adapter->sge; | |
fd3a4790 DM |
1812 | int ret; |
1813 | struct sk_buff *skb; | |
1814 | ||
1815 | skb = napi_get_frags(&rxq->rspq.napi); | |
1816 | if (unlikely(!skb)) { | |
1817 | t4_pktgl_free(gl); | |
1818 | rxq->stats.rx_drops++; | |
1819 | return; | |
1820 | } | |
1821 | ||
52367a76 VP |
1822 | copy_frags(skb, gl, s->pktshift); |
1823 | skb->len = gl->tot_len - s->pktshift; | |
fd3a4790 DM |
1824 | skb->data_len = skb->len; |
1825 | skb->truesize += skb->data_len; | |
1826 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1827 | skb_record_rx_queue(skb, rxq->rspq.idx); | |
3a336cb1 | 1828 | skb_mark_napi_id(skb, &rxq->rspq.napi); |
87b6cf51 | 1829 | if (rxq->rspq.netdev->features & NETIF_F_RXHASH) |
8264989c TH |
1830 | skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, |
1831 | PKT_HASH_TYPE_L3); | |
fd3a4790 DM |
1832 | |
1833 | if (unlikely(pkt->vlan_ex)) { | |
86a9bad3 | 1834 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); |
fd3a4790 | 1835 | rxq->stats.vlan_ex++; |
fd3a4790 DM |
1836 | } |
1837 | ret = napi_gro_frags(&rxq->rspq.napi); | |
19ecae2c | 1838 | if (ret == GRO_HELD) |
fd3a4790 DM |
1839 | rxq->stats.lro_pkts++; |
1840 | else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) | |
1841 | rxq->stats.lro_merged++; | |
1842 | rxq->stats.pkts++; | |
1843 | rxq->stats.rx_cso++; | |
1844 | } | |
1845 | ||
1846 | /** | |
1847 | * t4_ethrx_handler - process an ingress ethernet packet | |
1848 | * @q: the response queue that received the packet | |
1849 | * @rsp: the response queue descriptor holding the RX_PKT message | |
1850 | * @si: the gather list of packet fragments | |
1851 | * | |
1852 | * Process an ingress ethernet packet and deliver it to the stack. | |
1853 | */ | |
1854 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, | |
1855 | const struct pkt_gl *si) | |
1856 | { | |
1857 | bool csum_ok; | |
1858 | struct sk_buff *skb; | |
fd3a4790 DM |
1859 | const struct cpl_rx_pkt *pkt; |
1860 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | |
52367a76 | 1861 | struct sge *s = &q->adap->sge; |
d14807dd | 1862 | int cpl_trace_pkt = is_t4(q->adap->params.chip) ? |
0a57a536 | 1863 | CPL_TRACE_PKT : CPL_TRACE_PKT_T5; |
84a200b3 VP |
1864 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1865 | struct port_info *pi; | |
1866 | #endif | |
fd3a4790 | 1867 | |
0a57a536 | 1868 | if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) |
fd3a4790 DM |
1869 | return handle_trace_pkt(q->adap, si); |
1870 | ||
87b6cf51 | 1871 | pkt = (const struct cpl_rx_pkt *)rsp; |
cca2822d HS |
1872 | csum_ok = pkt->csum_calc && !pkt->err_vec && |
1873 | (q->netdev->features & NETIF_F_RXCSUM); | |
bdc590b9 | 1874 | if ((pkt->l2info & htonl(RXF_TCP_F)) && |
3a336cb1 | 1875 | !(cxgb_poll_busy_polling(q)) && |
fd3a4790 DM |
1876 | (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { |
1877 | do_gro(rxq, si, pkt); | |
1878 | return 0; | |
1879 | } | |
1880 | ||
1881 | skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); | |
1882 | if (unlikely(!skb)) { | |
1883 | t4_pktgl_free(si); | |
1884 | rxq->stats.rx_drops++; | |
1885 | return 0; | |
1886 | } | |
1887 | ||
52367a76 | 1888 | __skb_pull(skb, s->pktshift); /* remove ethernet header padding */ |
fd3a4790 DM |
1889 | skb->protocol = eth_type_trans(skb, q->netdev); |
1890 | skb_record_rx_queue(skb, q->idx); | |
87b6cf51 | 1891 | if (skb->dev->features & NETIF_F_RXHASH) |
8264989c TH |
1892 | skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, |
1893 | PKT_HASH_TYPE_L3); | |
87b6cf51 | 1894 | |
fd3a4790 DM |
1895 | rxq->stats.pkts++; |
1896 | ||
bdc590b9 | 1897 | if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { |
ba5d3c66 | 1898 | if (!pkt->ip_frag) { |
fd3a4790 | 1899 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
ba5d3c66 | 1900 | rxq->stats.rx_cso++; |
bdc590b9 | 1901 | } else if (pkt->l2info & htonl(RXF_IP_F)) { |
fd3a4790 DM |
1902 | __sum16 c = (__force __sum16)pkt->csum; |
1903 | skb->csum = csum_unfold(c); | |
1904 | skb->ip_summed = CHECKSUM_COMPLETE; | |
ba5d3c66 | 1905 | rxq->stats.rx_cso++; |
fd3a4790 | 1906 | } |
84a200b3 | 1907 | } else { |
bc8acf2c | 1908 | skb_checksum_none_assert(skb); |
84a200b3 VP |
1909 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1910 | #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ | |
1911 | RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) | |
1912 | ||
1913 | pi = netdev_priv(skb->dev); | |
1914 | if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { | |
1915 | if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && | |
1916 | (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { | |
1917 | if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F))) | |
1918 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | #undef CPL_RX_PKT_FLAGS | |
1923 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
1924 | } | |
fd3a4790 DM |
1925 | |
1926 | if (unlikely(pkt->vlan_ex)) { | |
86a9bad3 | 1927 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); |
fd3a4790 | 1928 | rxq->stats.vlan_ex++; |
19ecae2c | 1929 | } |
3a336cb1 | 1930 | skb_mark_napi_id(skb, &q->napi); |
19ecae2c | 1931 | netif_receive_skb(skb); |
fd3a4790 DM |
1932 | return 0; |
1933 | } | |
1934 | ||
1935 | /** | |
1936 | * restore_rx_bufs - put back a packet's Rx buffers | |
1937 | * @si: the packet gather list | |
1938 | * @q: the SGE free list | |
1939 | * @frags: number of FL buffers to restore | |
1940 | * | |
1941 | * Puts back on an FL the Rx buffers associated with @si. The buffers | |
1942 | * have already been unmapped and are left unmapped, we mark them so to | |
1943 | * prevent further unmapping attempts. | |
1944 | * | |
1945 | * This function undoes a series of @unmap_rx_buf calls when we find out | |
1946 | * that the current packet can't be processed right away afterall and we | |
1947 | * need to come back to it later. This is a very rare event and there's | |
1948 | * no effort to make this particularly efficient. | |
1949 | */ | |
1950 | static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, | |
1951 | int frags) | |
1952 | { | |
1953 | struct rx_sw_desc *d; | |
1954 | ||
1955 | while (frags--) { | |
1956 | if (q->cidx == 0) | |
1957 | q->cidx = q->size - 1; | |
1958 | else | |
1959 | q->cidx--; | |
1960 | d = &q->sdesc[q->cidx]; | |
1961 | d->page = si->frags[frags].page; | |
1962 | d->dma_addr |= RX_UNMAPPED_BUF; | |
1963 | q->avail++; | |
1964 | } | |
1965 | } | |
1966 | ||
1967 | /** | |
1968 | * is_new_response - check if a response is newly written | |
1969 | * @r: the response descriptor | |
1970 | * @q: the response queue | |
1971 | * | |
1972 | * Returns true if a response descriptor contains a yet unprocessed | |
1973 | * response. | |
1974 | */ | |
1975 | static inline bool is_new_response(const struct rsp_ctrl *r, | |
1976 | const struct sge_rspq *q) | |
1977 | { | |
1ecc7b7a | 1978 | return (r->type_gen >> RSPD_GEN_S) == q->gen; |
fd3a4790 DM |
1979 | } |
1980 | ||
1981 | /** | |
1982 | * rspq_next - advance to the next entry in a response queue | |
1983 | * @q: the queue | |
1984 | * | |
1985 | * Updates the state of a response queue to advance it to the next entry. | |
1986 | */ | |
1987 | static inline void rspq_next(struct sge_rspq *q) | |
1988 | { | |
1989 | q->cur_desc = (void *)q->cur_desc + q->iqe_len; | |
1990 | if (unlikely(++q->cidx == q->size)) { | |
1991 | q->cidx = 0; | |
1992 | q->gen ^= 1; | |
1993 | q->cur_desc = q->desc; | |
1994 | } | |
1995 | } | |
1996 | ||
1997 | /** | |
1998 | * process_responses - process responses from an SGE response queue | |
1999 | * @q: the ingress queue to process | |
2000 | * @budget: how many responses can be processed in this round | |
2001 | * | |
2002 | * Process responses from an SGE response queue up to the supplied budget. | |
2003 | * Responses include received packets as well as control messages from FW | |
2004 | * or HW. | |
2005 | * | |
2006 | * Additionally choose the interrupt holdoff time for the next interrupt | |
2007 | * on this queue. If the system is under memory shortage use a fairly | |
2008 | * long delay to help recovery. | |
2009 | */ | |
2010 | static int process_responses(struct sge_rspq *q, int budget) | |
2011 | { | |
2012 | int ret, rsp_type; | |
2013 | int budget_left = budget; | |
2014 | const struct rsp_ctrl *rc; | |
2015 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); | |
52367a76 VP |
2016 | struct adapter *adapter = q->adap; |
2017 | struct sge *s = &adapter->sge; | |
fd3a4790 DM |
2018 | |
2019 | while (likely(budget_left)) { | |
2020 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | |
2021 | if (!is_new_response(rc, q)) | |
2022 | break; | |
2023 | ||
019be1cf | 2024 | dma_rmb(); |
1ecc7b7a HS |
2025 | rsp_type = RSPD_TYPE_G(rc->type_gen); |
2026 | if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { | |
e91b0f24 | 2027 | struct page_frag *fp; |
fd3a4790 DM |
2028 | struct pkt_gl si; |
2029 | const struct rx_sw_desc *rsd; | |
2030 | u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; | |
2031 | ||
1ecc7b7a | 2032 | if (len & RSPD_NEWBUF_F) { |
fd3a4790 DM |
2033 | if (likely(q->offset > 0)) { |
2034 | free_rx_bufs(q->adap, &rxq->fl, 1); | |
2035 | q->offset = 0; | |
2036 | } | |
1ecc7b7a | 2037 | len = RSPD_LEN_G(len); |
fd3a4790 DM |
2038 | } |
2039 | si.tot_len = len; | |
2040 | ||
2041 | /* gather packet fragments */ | |
2042 | for (frags = 0, fp = si.frags; ; frags++, fp++) { | |
2043 | rsd = &rxq->fl.sdesc[rxq->fl.cidx]; | |
52367a76 | 2044 | bufsz = get_buf_size(adapter, rsd); |
fd3a4790 | 2045 | fp->page = rsd->page; |
e91b0f24 IC |
2046 | fp->offset = q->offset; |
2047 | fp->size = min(bufsz, len); | |
2048 | len -= fp->size; | |
fd3a4790 DM |
2049 | if (!len) |
2050 | break; | |
2051 | unmap_rx_buf(q->adap, &rxq->fl); | |
2052 | } | |
2053 | ||
2054 | /* | |
2055 | * Last buffer remains mapped so explicitly make it | |
2056 | * coherent for CPU access. | |
2057 | */ | |
2058 | dma_sync_single_for_cpu(q->adap->pdev_dev, | |
2059 | get_buf_addr(rsd), | |
e91b0f24 | 2060 | fp->size, DMA_FROM_DEVICE); |
fd3a4790 DM |
2061 | |
2062 | si.va = page_address(si.frags[0].page) + | |
e91b0f24 | 2063 | si.frags[0].offset; |
fd3a4790 DM |
2064 | prefetch(si.va); |
2065 | ||
2066 | si.nfrags = frags + 1; | |
2067 | ret = q->handler(q, q->cur_desc, &si); | |
2068 | if (likely(ret == 0)) | |
52367a76 | 2069 | q->offset += ALIGN(fp->size, s->fl_align); |
fd3a4790 DM |
2070 | else |
2071 | restore_rx_bufs(&si, &rxq->fl, frags); | |
1ecc7b7a | 2072 | } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { |
fd3a4790 DM |
2073 | ret = q->handler(q, q->cur_desc, NULL); |
2074 | } else { | |
2075 | ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); | |
2076 | } | |
2077 | ||
2078 | if (unlikely(ret)) { | |
2079 | /* couldn't process descriptor, back off for recovery */ | |
1ecc7b7a | 2080 | q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); |
fd3a4790 DM |
2081 | break; |
2082 | } | |
2083 | ||
2084 | rspq_next(q); | |
2085 | budget_left--; | |
2086 | } | |
2087 | ||
2088 | if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) | |
2089 | __refill_fl(q->adap, &rxq->fl); | |
2090 | return budget - budget_left; | |
2091 | } | |
2092 | ||
3a336cb1 HS |
2093 | #ifdef CONFIG_NET_RX_BUSY_POLL |
2094 | int cxgb_busy_poll(struct napi_struct *napi) | |
2095 | { | |
2096 | struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); | |
2097 | unsigned int params, work_done; | |
2098 | u32 val; | |
2099 | ||
2100 | if (!cxgb_poll_lock_poll(q)) | |
2101 | return LL_FLUSH_BUSY; | |
2102 | ||
2103 | work_done = process_responses(q, 4); | |
1ecc7b7a | 2104 | params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1); |
3a336cb1 HS |
2105 | q->next_intr_params = params; |
2106 | val = CIDXINC_V(work_done) | SEINTARM_V(params); | |
2107 | ||
2108 | /* If we don't have access to the new User GTS (T5+), use the old | |
2109 | * doorbell mechanism; otherwise use the new BAR2 mechanism. | |
2110 | */ | |
2111 | if (unlikely(!q->bar2_addr)) | |
2112 | t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), | |
2113 | val | INGRESSQID_V((u32)q->cntxt_id)); | |
2114 | else { | |
2115 | writel(val | INGRESSQID_V(q->bar2_qid), | |
2116 | q->bar2_addr + SGE_UDB_GTS); | |
2117 | wmb(); | |
2118 | } | |
2119 | ||
2120 | cxgb_poll_unlock_poll(q); | |
2121 | return work_done; | |
2122 | } | |
2123 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
2124 | ||
fd3a4790 DM |
2125 | /** |
2126 | * napi_rx_handler - the NAPI handler for Rx processing | |
2127 | * @napi: the napi instance | |
2128 | * @budget: how many packets we can process in this round | |
2129 | * | |
2130 | * Handler for new data events when using NAPI. This does not need any | |
2131 | * locking or protection from interrupts as data interrupts are off at | |
2132 | * this point and other adapter interrupts do not interfere (the latter | |
2133 | * in not a concern at all with MSI-X as non-data interrupts then have | |
2134 | * a separate handler). | |
2135 | */ | |
2136 | static int napi_rx_handler(struct napi_struct *napi, int budget) | |
2137 | { | |
2138 | unsigned int params; | |
2139 | struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); | |
3a336cb1 | 2140 | int work_done; |
d63a6dcf | 2141 | u32 val; |
fd3a4790 | 2142 | |
3a336cb1 HS |
2143 | if (!cxgb_poll_lock_napi(q)) |
2144 | return budget; | |
2145 | ||
2146 | work_done = process_responses(q, budget); | |
fd3a4790 | 2147 | if (likely(work_done < budget)) { |
e553ec3f HS |
2148 | int timer_index; |
2149 | ||
fd3a4790 | 2150 | napi_complete(napi); |
1ecc7b7a | 2151 | timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); |
e553ec3f HS |
2152 | |
2153 | if (q->adaptive_rx) { | |
2154 | if (work_done > max(timer_pkt_quota[timer_index], | |
2155 | MIN_NAPI_WORK)) | |
2156 | timer_index = (timer_index + 1); | |
2157 | else | |
2158 | timer_index = timer_index - 1; | |
2159 | ||
2160 | timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); | |
1ecc7b7a HS |
2161 | q->next_intr_params = |
2162 | QINTR_TIMER_IDX_V(timer_index) | | |
2163 | QINTR_CNT_EN_V(0); | |
e553ec3f HS |
2164 | params = q->next_intr_params; |
2165 | } else { | |
2166 | params = q->next_intr_params; | |
2167 | q->next_intr_params = q->intr_params; | |
2168 | } | |
fd3a4790 | 2169 | } else |
1ecc7b7a | 2170 | params = QINTR_TIMER_IDX_V(7); |
fd3a4790 | 2171 | |
f612b815 | 2172 | val = CIDXINC_V(work_done) | SEINTARM_V(params); |
df64e4d3 HS |
2173 | |
2174 | /* If we don't have access to the new User GTS (T5+), use the old | |
2175 | * doorbell mechanism; otherwise use the new BAR2 mechanism. | |
2176 | */ | |
2177 | if (unlikely(q->bar2_addr == NULL)) { | |
f612b815 HS |
2178 | t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), |
2179 | val | INGRESSQID_V((u32)q->cntxt_id)); | |
d63a6dcf | 2180 | } else { |
f612b815 | 2181 | writel(val | INGRESSQID_V(q->bar2_qid), |
df64e4d3 | 2182 | q->bar2_addr + SGE_UDB_GTS); |
d63a6dcf HS |
2183 | wmb(); |
2184 | } | |
3a336cb1 | 2185 | cxgb_poll_unlock_napi(q); |
fd3a4790 DM |
2186 | return work_done; |
2187 | } | |
2188 | ||
2189 | /* | |
2190 | * The MSI-X interrupt handler for an SGE response queue. | |
2191 | */ | |
2192 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie) | |
2193 | { | |
2194 | struct sge_rspq *q = cookie; | |
2195 | ||
2196 | napi_schedule(&q->napi); | |
2197 | return IRQ_HANDLED; | |
2198 | } | |
2199 | ||
2200 | /* | |
2201 | * Process the indirect interrupt entries in the interrupt queue and kick off | |
2202 | * NAPI for each queue that has generated an entry. | |
2203 | */ | |
2204 | static unsigned int process_intrq(struct adapter *adap) | |
2205 | { | |
2206 | unsigned int credits; | |
2207 | const struct rsp_ctrl *rc; | |
2208 | struct sge_rspq *q = &adap->sge.intrq; | |
d63a6dcf | 2209 | u32 val; |
fd3a4790 DM |
2210 | |
2211 | spin_lock(&adap->sge.intrq_lock); | |
2212 | for (credits = 0; ; credits++) { | |
2213 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); | |
2214 | if (!is_new_response(rc, q)) | |
2215 | break; | |
2216 | ||
019be1cf | 2217 | dma_rmb(); |
1ecc7b7a | 2218 | if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { |
fd3a4790 DM |
2219 | unsigned int qid = ntohl(rc->pldbuflen_qid); |
2220 | ||
e46dab4d | 2221 | qid -= adap->sge.ingr_start; |
fd3a4790 DM |
2222 | napi_schedule(&adap->sge.ingr_map[qid]->napi); |
2223 | } | |
2224 | ||
2225 | rspq_next(q); | |
2226 | } | |
2227 | ||
f612b815 | 2228 | val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); |
df64e4d3 HS |
2229 | |
2230 | /* If we don't have access to the new User GTS (T5+), use the old | |
2231 | * doorbell mechanism; otherwise use the new BAR2 mechanism. | |
2232 | */ | |
2233 | if (unlikely(q->bar2_addr == NULL)) { | |
f612b815 HS |
2234 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), |
2235 | val | INGRESSQID_V(q->cntxt_id)); | |
d63a6dcf | 2236 | } else { |
f612b815 | 2237 | writel(val | INGRESSQID_V(q->bar2_qid), |
df64e4d3 | 2238 | q->bar2_addr + SGE_UDB_GTS); |
d63a6dcf HS |
2239 | wmb(); |
2240 | } | |
fd3a4790 DM |
2241 | spin_unlock(&adap->sge.intrq_lock); |
2242 | return credits; | |
2243 | } | |
2244 | ||
2245 | /* | |
2246 | * The MSI interrupt handler, which handles data events from SGE response queues | |
2247 | * as well as error and other async events as they all use the same MSI vector. | |
2248 | */ | |
2249 | static irqreturn_t t4_intr_msi(int irq, void *cookie) | |
2250 | { | |
2251 | struct adapter *adap = cookie; | |
2252 | ||
c3c7b121 HS |
2253 | if (adap->flags & MASTER_PF) |
2254 | t4_slow_intr_handler(adap); | |
fd3a4790 DM |
2255 | process_intrq(adap); |
2256 | return IRQ_HANDLED; | |
2257 | } | |
2258 | ||
2259 | /* | |
2260 | * Interrupt handler for legacy INTx interrupts. | |
2261 | * Handles data events from SGE response queues as well as error and other | |
2262 | * async events as they all use the same interrupt line. | |
2263 | */ | |
2264 | static irqreturn_t t4_intr_intx(int irq, void *cookie) | |
2265 | { | |
2266 | struct adapter *adap = cookie; | |
2267 | ||
f061de42 | 2268 | t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); |
c3c7b121 HS |
2269 | if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | |
2270 | process_intrq(adap)) | |
fd3a4790 DM |
2271 | return IRQ_HANDLED; |
2272 | return IRQ_NONE; /* probably shared interrupt */ | |
2273 | } | |
2274 | ||
2275 | /** | |
2276 | * t4_intr_handler - select the top-level interrupt handler | |
2277 | * @adap: the adapter | |
2278 | * | |
2279 | * Selects the top-level interrupt handler based on the type of interrupts | |
2280 | * (MSI-X, MSI, or INTx). | |
2281 | */ | |
2282 | irq_handler_t t4_intr_handler(struct adapter *adap) | |
2283 | { | |
2284 | if (adap->flags & USING_MSIX) | |
2285 | return t4_sge_intr_msix; | |
2286 | if (adap->flags & USING_MSI) | |
2287 | return t4_intr_msi; | |
2288 | return t4_intr_intx; | |
2289 | } | |
2290 | ||
2291 | static void sge_rx_timer_cb(unsigned long data) | |
2292 | { | |
2293 | unsigned long m; | |
a3bfb617 | 2294 | unsigned int i; |
fd3a4790 DM |
2295 | struct adapter *adap = (struct adapter *)data; |
2296 | struct sge *s = &adap->sge; | |
2297 | ||
4b8e27a8 | 2298 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
fd3a4790 DM |
2299 | for (m = s->starving_fl[i]; m; m &= m - 1) { |
2300 | struct sge_eth_rxq *rxq; | |
2301 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | |
2302 | struct sge_fl *fl = s->egr_map[id]; | |
2303 | ||
2304 | clear_bit(id, s->starving_fl); | |
4e857c58 | 2305 | smp_mb__after_atomic(); |
fd3a4790 | 2306 | |
c098b026 | 2307 | if (fl_starving(adap, fl)) { |
fd3a4790 DM |
2308 | rxq = container_of(fl, struct sge_eth_rxq, fl); |
2309 | if (napi_reschedule(&rxq->rspq.napi)) | |
2310 | fl->starving++; | |
2311 | else | |
2312 | set_bit(id, s->starving_fl); | |
2313 | } | |
2314 | } | |
a3bfb617 HS |
2315 | /* The remainder of the SGE RX Timer Callback routine is dedicated to |
2316 | * global Master PF activities like checking for chip ingress stalls, | |
2317 | * etc. | |
2318 | */ | |
2319 | if (!(adap->flags & MASTER_PF)) | |
2320 | goto done; | |
fd3a4790 | 2321 | |
a3bfb617 | 2322 | t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); |
fd3a4790 | 2323 | |
a3bfb617 | 2324 | done: |
fd3a4790 DM |
2325 | mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); |
2326 | } | |
2327 | ||
2328 | static void sge_tx_timer_cb(unsigned long data) | |
2329 | { | |
2330 | unsigned long m; | |
2331 | unsigned int i, budget; | |
2332 | struct adapter *adap = (struct adapter *)data; | |
2333 | struct sge *s = &adap->sge; | |
2334 | ||
4b8e27a8 | 2335 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
fd3a4790 DM |
2336 | for (m = s->txq_maperr[i]; m; m &= m - 1) { |
2337 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; | |
2338 | struct sge_ofld_txq *txq = s->egr_map[id]; | |
2339 | ||
2340 | clear_bit(id, s->txq_maperr); | |
2341 | tasklet_schedule(&txq->qresume_tsk); | |
2342 | } | |
2343 | ||
2344 | budget = MAX_TIMER_TX_RECLAIM; | |
2345 | i = s->ethtxq_rover; | |
2346 | do { | |
2347 | struct sge_eth_txq *q = &s->ethtxq[i]; | |
2348 | ||
2349 | if (q->q.in_use && | |
2350 | time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && | |
2351 | __netif_tx_trylock(q->txq)) { | |
2352 | int avail = reclaimable(&q->q); | |
2353 | ||
2354 | if (avail) { | |
2355 | if (avail > budget) | |
2356 | avail = budget; | |
2357 | ||
2358 | free_tx_desc(adap, &q->q, avail, true); | |
2359 | q->q.in_use -= avail; | |
2360 | budget -= avail; | |
2361 | } | |
2362 | __netif_tx_unlock(q->txq); | |
2363 | } | |
2364 | ||
2365 | if (++i >= s->ethqsets) | |
2366 | i = 0; | |
2367 | } while (budget && i != s->ethtxq_rover); | |
2368 | s->ethtxq_rover = i; | |
2369 | mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); | |
2370 | } | |
2371 | ||
d63a6dcf | 2372 | /** |
df64e4d3 HS |
2373 | * bar2_address - return the BAR2 address for an SGE Queue's Registers |
2374 | * @adapter: the adapter | |
2375 | * @qid: the SGE Queue ID | |
2376 | * @qtype: the SGE Queue Type (Egress or Ingress) | |
2377 | * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues | |
d63a6dcf | 2378 | * |
df64e4d3 HS |
2379 | * Returns the BAR2 address for the SGE Queue Registers associated with |
2380 | * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also | |
2381 | * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE | |
2382 | * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" | |
2383 | * Registers are supported (e.g. the Write Combining Doorbell Buffer). | |
2384 | */ | |
2385 | static void __iomem *bar2_address(struct adapter *adapter, | |
2386 | unsigned int qid, | |
2387 | enum t4_bar2_qtype qtype, | |
2388 | unsigned int *pbar2_qid) | |
2389 | { | |
2390 | u64 bar2_qoffset; | |
2391 | int ret; | |
d63a6dcf | 2392 | |
dd0bcc0b | 2393 | ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, |
df64e4d3 HS |
2394 | &bar2_qoffset, pbar2_qid); |
2395 | if (ret) | |
2396 | return NULL; | |
d63a6dcf | 2397 | |
df64e4d3 | 2398 | return adapter->bar2 + bar2_qoffset; |
d63a6dcf HS |
2399 | } |
2400 | ||
145ef8a5 HS |
2401 | /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 |
2402 | * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map | |
2403 | */ | |
fd3a4790 DM |
2404 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, |
2405 | struct net_device *dev, int intr_idx, | |
145ef8a5 | 2406 | struct sge_fl *fl, rspq_handler_t hnd, int cong) |
fd3a4790 DM |
2407 | { |
2408 | int ret, flsz = 0; | |
2409 | struct fw_iq_cmd c; | |
52367a76 | 2410 | struct sge *s = &adap->sge; |
fd3a4790 DM |
2411 | struct port_info *pi = netdev_priv(dev); |
2412 | ||
2413 | /* Size needs to be multiple of 16, including status entry. */ | |
2414 | iq->size = roundup(iq->size, 16); | |
2415 | ||
2416 | iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, | |
ad6bad3e | 2417 | &iq->phys_addr, NULL, 0, NUMA_NO_NODE); |
fd3a4790 DM |
2418 | if (!iq->desc) |
2419 | return -ENOMEM; | |
2420 | ||
2421 | memset(&c, 0, sizeof(c)); | |
e2ac9628 HS |
2422 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | |
2423 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | | |
6e4b51a6 HS |
2424 | FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); |
2425 | c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | | |
fd3a4790 | 2426 | FW_LEN16(c)); |
6e4b51a6 HS |
2427 | c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | |
2428 | FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | | |
1ecc7b7a HS |
2429 | FW_IQ_CMD_IQANDST_V(intr_idx < 0) | |
2430 | FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | | |
6e4b51a6 | 2431 | FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : |
fd3a4790 | 2432 | -intr_idx - 1)); |
6e4b51a6 HS |
2433 | c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | |
2434 | FW_IQ_CMD_IQGTSMODE_F | | |
2435 | FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | | |
2436 | FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); | |
fd3a4790 DM |
2437 | c.iqsize = htons(iq->size); |
2438 | c.iqaddr = cpu_to_be64(iq->phys_addr); | |
145ef8a5 HS |
2439 | if (cong >= 0) |
2440 | c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); | |
fd3a4790 DM |
2441 | |
2442 | if (fl) { | |
13432997 HS |
2443 | /* Allocate the ring for the hardware free list (with space |
2444 | * for its status page) along with the associated software | |
2445 | * descriptor ring. The free list size needs to be a multiple | |
2446 | * of the Egress Queue Unit and at least 2 Egress Units larger | |
2447 | * than the SGE's Egress Congrestion Threshold | |
2448 | * (fl_starve_thres - 1). | |
2449 | */ | |
2450 | if (fl->size < s->fl_starve_thres - 1 + 2 * 8) | |
2451 | fl->size = s->fl_starve_thres - 1 + 2 * 8; | |
fd3a4790 DM |
2452 | fl->size = roundup(fl->size, 8); |
2453 | fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), | |
2454 | sizeof(struct rx_sw_desc), &fl->addr, | |
52367a76 | 2455 | &fl->sdesc, s->stat_len, NUMA_NO_NODE); |
fd3a4790 DM |
2456 | if (!fl->desc) |
2457 | goto fl_nomem; | |
2458 | ||
52367a76 | 2459 | flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); |
145ef8a5 HS |
2460 | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | |
2461 | FW_IQ_CMD_FL0FETCHRO_F | | |
2462 | FW_IQ_CMD_FL0DATARO_F | | |
2463 | FW_IQ_CMD_FL0PADEN_F); | |
2464 | if (cong >= 0) | |
2465 | c.iqns_to_fl0congen |= | |
2466 | htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | | |
2467 | FW_IQ_CMD_FL0CONGCIF_F | | |
2468 | FW_IQ_CMD_FL0CONGEN_F); | |
1ecc7b7a HS |
2469 | c.fl0dcaen_to_fl0cidxfthresh = |
2470 | htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) | | |
2471 | FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X)); | |
fd3a4790 DM |
2472 | c.fl0size = htons(flsz); |
2473 | c.fl0addr = cpu_to_be64(fl->addr); | |
2474 | } | |
2475 | ||
060e0c75 | 2476 | ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); |
fd3a4790 DM |
2477 | if (ret) |
2478 | goto err; | |
2479 | ||
2480 | netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); | |
3a336cb1 | 2481 | napi_hash_add(&iq->napi); |
fd3a4790 DM |
2482 | iq->cur_desc = iq->desc; |
2483 | iq->cidx = 0; | |
2484 | iq->gen = 1; | |
2485 | iq->next_intr_params = iq->intr_params; | |
2486 | iq->cntxt_id = ntohs(c.iqid); | |
2487 | iq->abs_id = ntohs(c.physiqid); | |
df64e4d3 HS |
2488 | iq->bar2_addr = bar2_address(adap, |
2489 | iq->cntxt_id, | |
2490 | T4_BAR2_QTYPE_INGRESS, | |
2491 | &iq->bar2_qid); | |
fd3a4790 | 2492 | iq->size--; /* subtract status entry */ |
fd3a4790 DM |
2493 | iq->netdev = dev; |
2494 | iq->handler = hnd; | |
2495 | ||
2496 | /* set offset to -1 to distinguish ingress queues without FL */ | |
2497 | iq->offset = fl ? 0 : -1; | |
2498 | ||
e46dab4d | 2499 | adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; |
fd3a4790 DM |
2500 | |
2501 | if (fl) { | |
62718b32 | 2502 | fl->cntxt_id = ntohs(c.fl0id); |
fd3a4790 DM |
2503 | fl->avail = fl->pend_cred = 0; |
2504 | fl->pidx = fl->cidx = 0; | |
2505 | fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; | |
e46dab4d | 2506 | adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; |
d63a6dcf | 2507 | |
df64e4d3 HS |
2508 | /* Note, we must initialize the BAR2 Free List User Doorbell |
2509 | * information before refilling the Free List! | |
d63a6dcf | 2510 | */ |
df64e4d3 HS |
2511 | fl->bar2_addr = bar2_address(adap, |
2512 | fl->cntxt_id, | |
2513 | T4_BAR2_QTYPE_EGRESS, | |
2514 | &fl->bar2_qid); | |
fd3a4790 DM |
2515 | refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); |
2516 | } | |
b8b1ae99 HS |
2517 | |
2518 | /* For T5 and later we attempt to set up the Congestion Manager values | |
2519 | * of the new RX Ethernet Queue. This should really be handled by | |
2520 | * firmware because it's more complex than any host driver wants to | |
2521 | * get involved with and it's different per chip and this is almost | |
2522 | * certainly wrong. Firmware would be wrong as well, but it would be | |
2523 | * a lot easier to fix in one place ... For now we do something very | |
2524 | * simple (and hopefully less wrong). | |
2525 | */ | |
2526 | if (!is_t4(adap->params.chip) && cong >= 0) { | |
2527 | u32 param, val; | |
2528 | int i; | |
2529 | ||
2530 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | | |
2531 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | | |
2532 | FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); | |
2533 | if (cong == 0) { | |
2534 | val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); | |
2535 | } else { | |
2536 | val = | |
2537 | CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); | |
2538 | for (i = 0; i < 4; i++) { | |
2539 | if (cong & (1 << i)) | |
2540 | val |= | |
2541 | CONMCTXT_CNGCHMAP_V(1 << (i << 2)); | |
2542 | } | |
2543 | } | |
2544 | ret = t4_set_params(adap, adap->mbox, adap->fn, 0, 1, | |
2545 | ¶m, &val); | |
2546 | if (ret) | |
2547 | dev_warn(adap->pdev_dev, "Failed to set Congestion" | |
2548 | " Manager Context for Ingress Queue %d: %d\n", | |
2549 | iq->cntxt_id, -ret); | |
2550 | } | |
2551 | ||
fd3a4790 DM |
2552 | return 0; |
2553 | ||
2554 | fl_nomem: | |
2555 | ret = -ENOMEM; | |
2556 | err: | |
2557 | if (iq->desc) { | |
2558 | dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, | |
2559 | iq->desc, iq->phys_addr); | |
2560 | iq->desc = NULL; | |
2561 | } | |
2562 | if (fl && fl->desc) { | |
2563 | kfree(fl->sdesc); | |
2564 | fl->sdesc = NULL; | |
2565 | dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), | |
2566 | fl->desc, fl->addr); | |
2567 | fl->desc = NULL; | |
2568 | } | |
2569 | return ret; | |
2570 | } | |
2571 | ||
2572 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | |
2573 | { | |
22adfe0a | 2574 | q->cntxt_id = id; |
df64e4d3 HS |
2575 | q->bar2_addr = bar2_address(adap, |
2576 | q->cntxt_id, | |
2577 | T4_BAR2_QTYPE_EGRESS, | |
2578 | &q->bar2_qid); | |
fd3a4790 DM |
2579 | q->in_use = 0; |
2580 | q->cidx = q->pidx = 0; | |
2581 | q->stops = q->restarts = 0; | |
2582 | q->stat = (void *)&q->desc[q->size]; | |
3069ee9b | 2583 | spin_lock_init(&q->db_lock); |
e46dab4d | 2584 | adap->sge.egr_map[id - adap->sge.egr_start] = q; |
fd3a4790 DM |
2585 | } |
2586 | ||
2587 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | |
2588 | struct net_device *dev, struct netdev_queue *netdevq, | |
2589 | unsigned int iqid) | |
2590 | { | |
2591 | int ret, nentries; | |
2592 | struct fw_eq_eth_cmd c; | |
52367a76 | 2593 | struct sge *s = &adap->sge; |
fd3a4790 DM |
2594 | struct port_info *pi = netdev_priv(dev); |
2595 | ||
2596 | /* Add status entries */ | |
52367a76 | 2597 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
fd3a4790 DM |
2598 | |
2599 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | |
2600 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | |
52367a76 | 2601 | &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, |
ad6bad3e | 2602 | netdev_queue_numa_node_read(netdevq)); |
fd3a4790 DM |
2603 | if (!txq->q.desc) |
2604 | return -ENOMEM; | |
2605 | ||
2606 | memset(&c, 0, sizeof(c)); | |
e2ac9628 HS |
2607 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | |
2608 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | | |
6e4b51a6 HS |
2609 | FW_EQ_ETH_CMD_PFN_V(adap->fn) | |
2610 | FW_EQ_ETH_CMD_VFN_V(0)); | |
2611 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | | |
2612 | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); | |
2613 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | | |
2614 | FW_EQ_ETH_CMD_VIID_V(pi->viid)); | |
1ecc7b7a HS |
2615 | c.fetchszm_to_iqid = |
2616 | htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | | |
2617 | FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | | |
2618 | FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); | |
2619 | c.dcaen_to_eqsize = | |
2620 | htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | | |
2621 | FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | | |
2622 | FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | | |
2623 | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); | |
fd3a4790 DM |
2624 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
2625 | ||
060e0c75 | 2626 | ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); |
fd3a4790 DM |
2627 | if (ret) { |
2628 | kfree(txq->q.sdesc); | |
2629 | txq->q.sdesc = NULL; | |
2630 | dma_free_coherent(adap->pdev_dev, | |
2631 | nentries * sizeof(struct tx_desc), | |
2632 | txq->q.desc, txq->q.phys_addr); | |
2633 | txq->q.desc = NULL; | |
2634 | return ret; | |
2635 | } | |
2636 | ||
6e4b51a6 | 2637 | init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); |
fd3a4790 DM |
2638 | txq->txq = netdevq; |
2639 | txq->tso = txq->tx_cso = txq->vlan_ins = 0; | |
2640 | txq->mapping_err = 0; | |
2641 | return 0; | |
2642 | } | |
2643 | ||
2644 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, | |
2645 | struct net_device *dev, unsigned int iqid, | |
2646 | unsigned int cmplqid) | |
2647 | { | |
2648 | int ret, nentries; | |
2649 | struct fw_eq_ctrl_cmd c; | |
52367a76 | 2650 | struct sge *s = &adap->sge; |
fd3a4790 DM |
2651 | struct port_info *pi = netdev_priv(dev); |
2652 | ||
2653 | /* Add status entries */ | |
52367a76 | 2654 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
fd3a4790 DM |
2655 | |
2656 | txq->q.desc = alloc_ring(adap->pdev_dev, nentries, | |
2657 | sizeof(struct tx_desc), 0, &txq->q.phys_addr, | |
982b81eb | 2658 | NULL, 0, dev_to_node(adap->pdev_dev)); |
fd3a4790 DM |
2659 | if (!txq->q.desc) |
2660 | return -ENOMEM; | |
2661 | ||
e2ac9628 HS |
2662 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | |
2663 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | | |
6e4b51a6 HS |
2664 | FW_EQ_CTRL_CMD_PFN_V(adap->fn) | |
2665 | FW_EQ_CTRL_CMD_VFN_V(0)); | |
2666 | c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | | |
2667 | FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); | |
2668 | c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); | |
fd3a4790 | 2669 | c.physeqid_pkd = htonl(0); |
1ecc7b7a HS |
2670 | c.fetchszm_to_iqid = |
2671 | htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | | |
2672 | FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | | |
2673 | FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); | |
2674 | c.dcaen_to_eqsize = | |
2675 | htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | | |
2676 | FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | | |
2677 | FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | | |
2678 | FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); | |
fd3a4790 DM |
2679 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
2680 | ||
060e0c75 | 2681 | ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); |
fd3a4790 DM |
2682 | if (ret) { |
2683 | dma_free_coherent(adap->pdev_dev, | |
2684 | nentries * sizeof(struct tx_desc), | |
2685 | txq->q.desc, txq->q.phys_addr); | |
2686 | txq->q.desc = NULL; | |
2687 | return ret; | |
2688 | } | |
2689 | ||
6e4b51a6 | 2690 | init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); |
fd3a4790 DM |
2691 | txq->adap = adap; |
2692 | skb_queue_head_init(&txq->sendq); | |
2693 | tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); | |
2694 | txq->full = 0; | |
2695 | return 0; | |
2696 | } | |
2697 | ||
2698 | int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, | |
2699 | struct net_device *dev, unsigned int iqid) | |
2700 | { | |
2701 | int ret, nentries; | |
2702 | struct fw_eq_ofld_cmd c; | |
52367a76 | 2703 | struct sge *s = &adap->sge; |
fd3a4790 DM |
2704 | struct port_info *pi = netdev_priv(dev); |
2705 | ||
2706 | /* Add status entries */ | |
52367a76 | 2707 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
fd3a4790 DM |
2708 | |
2709 | txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, | |
2710 | sizeof(struct tx_desc), sizeof(struct tx_sw_desc), | |
52367a76 | 2711 | &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, |
ad6bad3e | 2712 | NUMA_NO_NODE); |
fd3a4790 DM |
2713 | if (!txq->q.desc) |
2714 | return -ENOMEM; | |
2715 | ||
2716 | memset(&c, 0, sizeof(c)); | |
e2ac9628 HS |
2717 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | |
2718 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | | |
6e4b51a6 HS |
2719 | FW_EQ_OFLD_CMD_PFN_V(adap->fn) | |
2720 | FW_EQ_OFLD_CMD_VFN_V(0)); | |
2721 | c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | | |
2722 | FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); | |
1ecc7b7a HS |
2723 | c.fetchszm_to_iqid = |
2724 | htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | | |
2725 | FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | | |
2726 | FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); | |
2727 | c.dcaen_to_eqsize = | |
2728 | htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | | |
2729 | FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | | |
2730 | FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | | |
2731 | FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); | |
fd3a4790 DM |
2732 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
2733 | ||
060e0c75 | 2734 | ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); |
fd3a4790 DM |
2735 | if (ret) { |
2736 | kfree(txq->q.sdesc); | |
2737 | txq->q.sdesc = NULL; | |
2738 | dma_free_coherent(adap->pdev_dev, | |
2739 | nentries * sizeof(struct tx_desc), | |
2740 | txq->q.desc, txq->q.phys_addr); | |
2741 | txq->q.desc = NULL; | |
2742 | return ret; | |
2743 | } | |
2744 | ||
6e4b51a6 | 2745 | init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); |
fd3a4790 DM |
2746 | txq->adap = adap; |
2747 | skb_queue_head_init(&txq->sendq); | |
2748 | tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); | |
2749 | txq->full = 0; | |
2750 | txq->mapping_err = 0; | |
2751 | return 0; | |
2752 | } | |
2753 | ||
2754 | static void free_txq(struct adapter *adap, struct sge_txq *q) | |
2755 | { | |
52367a76 VP |
2756 | struct sge *s = &adap->sge; |
2757 | ||
fd3a4790 | 2758 | dma_free_coherent(adap->pdev_dev, |
52367a76 | 2759 | q->size * sizeof(struct tx_desc) + s->stat_len, |
fd3a4790 DM |
2760 | q->desc, q->phys_addr); |
2761 | q->cntxt_id = 0; | |
2762 | q->sdesc = NULL; | |
2763 | q->desc = NULL; | |
2764 | } | |
2765 | ||
2766 | static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, | |
2767 | struct sge_fl *fl) | |
2768 | { | |
52367a76 | 2769 | struct sge *s = &adap->sge; |
fd3a4790 DM |
2770 | unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; |
2771 | ||
e46dab4d | 2772 | adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; |
060e0c75 DM |
2773 | t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, |
2774 | rq->cntxt_id, fl_id, 0xffff); | |
fd3a4790 DM |
2775 | dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, |
2776 | rq->desc, rq->phys_addr); | |
3a336cb1 | 2777 | napi_hash_del(&rq->napi); |
fd3a4790 DM |
2778 | netif_napi_del(&rq->napi); |
2779 | rq->netdev = NULL; | |
2780 | rq->cntxt_id = rq->abs_id = 0; | |
2781 | rq->desc = NULL; | |
2782 | ||
2783 | if (fl) { | |
2784 | free_rx_bufs(adap, fl, fl->avail); | |
52367a76 | 2785 | dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, |
fd3a4790 DM |
2786 | fl->desc, fl->addr); |
2787 | kfree(fl->sdesc); | |
2788 | fl->sdesc = NULL; | |
2789 | fl->cntxt_id = 0; | |
2790 | fl->desc = NULL; | |
2791 | } | |
2792 | } | |
2793 | ||
5fa76694 HS |
2794 | /** |
2795 | * t4_free_ofld_rxqs - free a block of consecutive Rx queues | |
2796 | * @adap: the adapter | |
2797 | * @n: number of queues | |
2798 | * @q: pointer to first queue | |
2799 | * | |
2800 | * Release the resources of a consecutive block of offload Rx queues. | |
2801 | */ | |
2802 | void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) | |
2803 | { | |
2804 | for ( ; n; n--, q++) | |
2805 | if (q->rspq.desc) | |
2806 | free_rspq_fl(adap, &q->rspq, | |
2807 | q->fl.size ? &q->fl : NULL); | |
2808 | } | |
2809 | ||
fd3a4790 DM |
2810 | /** |
2811 | * t4_free_sge_resources - free SGE resources | |
2812 | * @adap: the adapter | |
2813 | * | |
2814 | * Frees resources used by the SGE queue sets. | |
2815 | */ | |
2816 | void t4_free_sge_resources(struct adapter *adap) | |
2817 | { | |
2818 | int i; | |
2819 | struct sge_eth_rxq *eq = adap->sge.ethrxq; | |
2820 | struct sge_eth_txq *etq = adap->sge.ethtxq; | |
fd3a4790 DM |
2821 | |
2822 | /* clean up Ethernet Tx/Rx queues */ | |
2823 | for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { | |
2824 | if (eq->rspq.desc) | |
5fa76694 HS |
2825 | free_rspq_fl(adap, &eq->rspq, |
2826 | eq->fl.size ? &eq->fl : NULL); | |
fd3a4790 | 2827 | if (etq->q.desc) { |
060e0c75 DM |
2828 | t4_eth_eq_free(adap, adap->fn, adap->fn, 0, |
2829 | etq->q.cntxt_id); | |
fd3a4790 DM |
2830 | free_tx_desc(adap, &etq->q, etq->q.in_use, true); |
2831 | kfree(etq->q.sdesc); | |
2832 | free_txq(adap, &etq->q); | |
2833 | } | |
2834 | } | |
2835 | ||
2836 | /* clean up RDMA and iSCSI Rx queues */ | |
5fa76694 HS |
2837 | t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq); |
2838 | t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); | |
2839 | t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); | |
fd3a4790 DM |
2840 | |
2841 | /* clean up offload Tx queues */ | |
2842 | for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { | |
2843 | struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; | |
2844 | ||
2845 | if (q->q.desc) { | |
2846 | tasklet_kill(&q->qresume_tsk); | |
060e0c75 DM |
2847 | t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, |
2848 | q->q.cntxt_id); | |
fd3a4790 DM |
2849 | free_tx_desc(adap, &q->q, q->q.in_use, false); |
2850 | kfree(q->q.sdesc); | |
2851 | __skb_queue_purge(&q->sendq); | |
2852 | free_txq(adap, &q->q); | |
2853 | } | |
2854 | } | |
2855 | ||
2856 | /* clean up control Tx queues */ | |
2857 | for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { | |
2858 | struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; | |
2859 | ||
2860 | if (cq->q.desc) { | |
2861 | tasklet_kill(&cq->qresume_tsk); | |
060e0c75 DM |
2862 | t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, |
2863 | cq->q.cntxt_id); | |
fd3a4790 DM |
2864 | __skb_queue_purge(&cq->sendq); |
2865 | free_txq(adap, &cq->q); | |
2866 | } | |
2867 | } | |
2868 | ||
2869 | if (adap->sge.fw_evtq.desc) | |
2870 | free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); | |
2871 | ||
2872 | if (adap->sge.intrq.desc) | |
2873 | free_rspq_fl(adap, &adap->sge.intrq, NULL); | |
2874 | ||
2875 | /* clear the reverse egress queue map */ | |
4b8e27a8 HS |
2876 | memset(adap->sge.egr_map, 0, |
2877 | adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); | |
fd3a4790 DM |
2878 | } |
2879 | ||
2880 | void t4_sge_start(struct adapter *adap) | |
2881 | { | |
2882 | adap->sge.ethtxq_rover = 0; | |
2883 | mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); | |
2884 | mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); | |
2885 | } | |
2886 | ||
2887 | /** | |
2888 | * t4_sge_stop - disable SGE operation | |
2889 | * @adap: the adapter | |
2890 | * | |
2891 | * Stop tasklets and timers associated with the DMA engine. Note that | |
2892 | * this is effective only if measures have been taken to disable any HW | |
2893 | * events that may restart them. | |
2894 | */ | |
2895 | void t4_sge_stop(struct adapter *adap) | |
2896 | { | |
2897 | int i; | |
2898 | struct sge *s = &adap->sge; | |
2899 | ||
2900 | if (in_interrupt()) /* actions below require waiting */ | |
2901 | return; | |
2902 | ||
2903 | if (s->rx_timer.function) | |
2904 | del_timer_sync(&s->rx_timer); | |
2905 | if (s->tx_timer.function) | |
2906 | del_timer_sync(&s->tx_timer); | |
2907 | ||
2908 | for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { | |
2909 | struct sge_ofld_txq *q = &s->ofldtxq[i]; | |
2910 | ||
2911 | if (q->q.desc) | |
2912 | tasklet_kill(&q->qresume_tsk); | |
2913 | } | |
2914 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { | |
2915 | struct sge_ctrl_txq *cq = &s->ctrlq[i]; | |
2916 | ||
2917 | if (cq->q.desc) | |
2918 | tasklet_kill(&cq->qresume_tsk); | |
2919 | } | |
2920 | } | |
2921 | ||
2922 | /** | |
06640310 | 2923 | * t4_sge_init_soft - grab core SGE values needed by SGE code |
fd3a4790 DM |
2924 | * @adap: the adapter |
2925 | * | |
06640310 HS |
2926 | * We need to grab the SGE operating parameters that we need to have |
2927 | * in order to do our job and make sure we can live with them. | |
fd3a4790 | 2928 | */ |
52367a76 VP |
2929 | |
2930 | static int t4_sge_init_soft(struct adapter *adap) | |
fd3a4790 DM |
2931 | { |
2932 | struct sge *s = &adap->sge; | |
52367a76 VP |
2933 | u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; |
2934 | u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; | |
2935 | u32 ingress_rx_threshold; | |
fd3a4790 | 2936 | |
52367a76 VP |
2937 | /* |
2938 | * Verify that CPL messages are going to the Ingress Queue for | |
2939 | * process_responses() and that only packet data is going to the | |
2940 | * Free Lists. | |
2941 | */ | |
f612b815 HS |
2942 | if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != |
2943 | RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { | |
52367a76 VP |
2944 | dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); |
2945 | return -EINVAL; | |
2946 | } | |
2947 | ||
2948 | /* | |
2949 | * Validate the Host Buffer Register Array indices that we want to | |
2950 | * use ... | |
2951 | * | |
2952 | * XXX Note that we should really read through the Host Buffer Size | |
2953 | * XXX register array and find the indices of the Buffer Sizes which | |
2954 | * XXX meet our needs! | |
2955 | */ | |
2956 | #define READ_FL_BUF(x) \ | |
f612b815 | 2957 | t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) |
52367a76 VP |
2958 | |
2959 | fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); | |
2960 | fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); | |
2961 | fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); | |
2962 | fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); | |
2963 | ||
92ddcc7b KS |
2964 | /* We only bother using the Large Page logic if the Large Page Buffer |
2965 | * is larger than our Page Size Buffer. | |
2966 | */ | |
2967 | if (fl_large_pg <= fl_small_pg) | |
2968 | fl_large_pg = 0; | |
2969 | ||
52367a76 VP |
2970 | #undef READ_FL_BUF |
2971 | ||
92ddcc7b KS |
2972 | /* The Page Size Buffer must be exactly equal to our Page Size and the |
2973 | * Large Page Size Buffer should be 0 (per above) or a power of 2. | |
2974 | */ | |
52367a76 | 2975 | if (fl_small_pg != PAGE_SIZE || |
92ddcc7b | 2976 | (fl_large_pg & (fl_large_pg-1)) != 0) { |
52367a76 VP |
2977 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", |
2978 | fl_small_pg, fl_large_pg); | |
2979 | return -EINVAL; | |
2980 | } | |
2981 | if (fl_large_pg) | |
2982 | s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; | |
2983 | ||
2984 | if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || | |
2985 | fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { | |
2986 | dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", | |
2987 | fl_small_mtu, fl_large_mtu); | |
2988 | return -EINVAL; | |
2989 | } | |
2990 | ||
2991 | /* | |
2992 | * Retrieve our RX interrupt holdoff timer values and counter | |
2993 | * threshold values from the SGE parameters. | |
2994 | */ | |
f061de42 HS |
2995 | timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); |
2996 | timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); | |
2997 | timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); | |
52367a76 | 2998 | s->timer_val[0] = core_ticks_to_us(adap, |
f061de42 | 2999 | TIMERVALUE0_G(timer_value_0_and_1)); |
52367a76 | 3000 | s->timer_val[1] = core_ticks_to_us(adap, |
f061de42 | 3001 | TIMERVALUE1_G(timer_value_0_and_1)); |
52367a76 | 3002 | s->timer_val[2] = core_ticks_to_us(adap, |
f061de42 | 3003 | TIMERVALUE2_G(timer_value_2_and_3)); |
52367a76 | 3004 | s->timer_val[3] = core_ticks_to_us(adap, |
f061de42 | 3005 | TIMERVALUE3_G(timer_value_2_and_3)); |
52367a76 | 3006 | s->timer_val[4] = core_ticks_to_us(adap, |
f061de42 | 3007 | TIMERVALUE4_G(timer_value_4_and_5)); |
52367a76 | 3008 | s->timer_val[5] = core_ticks_to_us(adap, |
f061de42 | 3009 | TIMERVALUE5_G(timer_value_4_and_5)); |
52367a76 | 3010 | |
f612b815 HS |
3011 | ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); |
3012 | s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); | |
3013 | s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); | |
3014 | s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); | |
3015 | s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); | |
52367a76 VP |
3016 | |
3017 | return 0; | |
3018 | } | |
3019 | ||
06640310 HS |
3020 | /** |
3021 | * t4_sge_init - initialize SGE | |
3022 | * @adap: the adapter | |
3023 | * | |
3024 | * Perform low-level SGE code initialization needed every time after a | |
3025 | * chip reset. | |
3026 | */ | |
52367a76 VP |
3027 | int t4_sge_init(struct adapter *adap) |
3028 | { | |
3029 | struct sge *s = &adap->sge; | |
ce8f407a HS |
3030 | u32 sge_control, sge_control2, sge_conm_ctrl; |
3031 | unsigned int ingpadboundary, ingpackboundary; | |
c2b955e0 | 3032 | int ret, egress_threshold; |
52367a76 VP |
3033 | |
3034 | /* | |
3035 | * Ingress Padding Boundary and Egress Status Page Size are set up by | |
3036 | * t4_fixup_host_params(). | |
3037 | */ | |
f612b815 HS |
3038 | sge_control = t4_read_reg(adap, SGE_CONTROL_A); |
3039 | s->pktshift = PKTSHIFT_G(sge_control); | |
3040 | s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; | |
ce8f407a HS |
3041 | |
3042 | /* T4 uses a single control field to specify both the PCIe Padding and | |
3043 | * Packing Boundary. T5 introduced the ability to specify these | |
3044 | * separately. The actual Ingress Packet Data alignment boundary | |
3045 | * within Packed Buffer Mode is the maximum of these two | |
1ecc7b7a HS |
3046 | * specifications. (Note that it makes no real practical sense to |
3047 | * have the Pading Boudary be larger than the Packing Boundary but you | |
3048 | * could set the chip up that way and, in fact, legacy T4 code would | |
3049 | * end doing this because it would initialize the Padding Boundary and | |
3050 | * leave the Packing Boundary initialized to 0 (16 bytes).) | |
ce8f407a | 3051 | */ |
f612b815 HS |
3052 | ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + |
3053 | INGPADBOUNDARY_SHIFT_X); | |
ce8f407a HS |
3054 | if (is_t4(adap->params.chip)) { |
3055 | s->fl_align = ingpadboundary; | |
3056 | } else { | |
3057 | /* T5 has a different interpretation of one of the PCIe Packing | |
3058 | * Boundary values. | |
3059 | */ | |
3060 | sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); | |
3061 | ingpackboundary = INGPACKBOUNDARY_G(sge_control2); | |
3062 | if (ingpackboundary == INGPACKBOUNDARY_16B_X) | |
3063 | ingpackboundary = 16; | |
3064 | else | |
3065 | ingpackboundary = 1 << (ingpackboundary + | |
3066 | INGPACKBOUNDARY_SHIFT_X); | |
3067 | ||
3068 | s->fl_align = max(ingpadboundary, ingpackboundary); | |
3069 | } | |
52367a76 | 3070 | |
06640310 | 3071 | ret = t4_sge_init_soft(adap); |
52367a76 VP |
3072 | if (ret < 0) |
3073 | return ret; | |
3074 | ||
3075 | /* | |
3076 | * A FL with <= fl_starve_thres buffers is starving and a periodic | |
3077 | * timer will attempt to refill it. This needs to be larger than the | |
3078 | * SGE's Egress Congestion Threshold. If it isn't, then we can get | |
3079 | * stuck waiting for new packets while the SGE is waiting for us to | |
3080 | * give it more Free List entries. (Note that the SGE's Egress | |
c2b955e0 KS |
3081 | * Congestion Threshold is in units of 2 Free List pointers.) For T4, |
3082 | * there was only a single field to control this. For T5 there's the | |
3083 | * original field which now only applies to Unpacked Mode Free List | |
3084 | * buffers and a new field which only applies to Packed Mode Free List | |
3085 | * buffers. | |
52367a76 | 3086 | */ |
f612b815 | 3087 | sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); |
c2b955e0 | 3088 | if (is_t4(adap->params.chip)) |
f612b815 | 3089 | egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); |
c2b955e0 | 3090 | else |
f612b815 | 3091 | egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); |
c2b955e0 | 3092 | s->fl_starve_thres = 2*egress_threshold + 1; |
52367a76 | 3093 | |
a3bfb617 HS |
3094 | t4_idma_monitor_init(adap, &s->idma_monitor); |
3095 | ||
1ecc7b7a HS |
3096 | /* Set up timers used for recuring callbacks to process RX and TX |
3097 | * administrative tasks. | |
3098 | */ | |
fd3a4790 DM |
3099 | setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); |
3100 | setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); | |
a3bfb617 | 3101 | |
fd3a4790 | 3102 | spin_lock_init(&s->intrq_lock); |
52367a76 VP |
3103 | |
3104 | return 0; | |
fd3a4790 | 3105 | } |