Commit | Line | Data |
---|---|---|
9a799d71 AK |
1 | /******************************************************************************* |
2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | |
8c47eaa7 | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
9a799d71 AK |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "COPYING". | |
21 | ||
22 | Contact Information: | |
9a799d71 AK |
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
25 | ||
26 | *******************************************************************************/ | |
27 | ||
28 | #include <linux/types.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/netdevice.h> | |
32 | #include <linux/vmalloc.h> | |
33 | #include <linux/string.h> | |
34 | #include <linux/in.h> | |
35 | #include <linux/ip.h> | |
36 | #include <linux/tcp.h> | |
60127865 | 37 | #include <linux/pkt_sched.h> |
9a799d71 | 38 | #include <linux/ipv6.h> |
5a0e3ad6 | 39 | #include <linux/slab.h> |
9a799d71 AK |
40 | #include <net/checksum.h> |
41 | #include <net/ip6_checksum.h> | |
42 | #include <linux/ethtool.h> | |
43 | #include <linux/if_vlan.h> | |
eacd73f7 | 44 | #include <scsi/fc/fc_fcoe.h> |
9a799d71 AK |
45 | |
46 | #include "ixgbe.h" | |
47 | #include "ixgbe_common.h" | |
ee5f784a | 48 | #include "ixgbe_dcb_82599.h" |
1cdd1ec8 | 49 | #include "ixgbe_sriov.h" |
9a799d71 AK |
50 | |
51 | char ixgbe_driver_name[] = "ixgbe"; | |
9c8eb720 | 52 | static const char ixgbe_driver_string[] = |
e8e9f696 | 53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
9a799d71 | 54 | |
99faf68e | 55 | #define DRV_VERSION "2.0.84-k2" |
9c8eb720 | 56 | const char ixgbe_driver_version[] = DRV_VERSION; |
8c47eaa7 | 57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; |
9a799d71 AK |
58 | |
59 | static const struct ixgbe_info *ixgbe_info_tbl[] = { | |
b4617240 | 60 | [board_82598] = &ixgbe_82598_info, |
e8e26350 | 61 | [board_82599] = &ixgbe_82599_info, |
9a799d71 AK |
62 | }; |
63 | ||
64 | /* ixgbe_pci_tbl - PCI Device ID Table | |
65 | * | |
66 | * Wildcard entries (PCI_ANY_ID) should come last | |
67 | * Last entry must be all 0s | |
68 | * | |
69 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | |
70 | * Class, Class Mask, private data (not used) } | |
71 | */ | |
a3aa1884 | 72 | static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { |
1e336d0f DS |
73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), |
74 | board_82598 }, | |
9a799d71 | 75 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), |
3957d63d | 76 | board_82598 }, |
9a799d71 | 77 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), |
3957d63d | 78 | board_82598 }, |
0befdb3e JB |
79 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), |
80 | board_82598 }, | |
3845bec0 PWJ |
81 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), |
82 | board_82598 }, | |
9a799d71 | 83 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), |
3957d63d | 84 | board_82598 }, |
8d792cd9 JB |
85 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), |
86 | board_82598 }, | |
c4900be0 DS |
87 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), |
88 | board_82598 }, | |
89 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), | |
90 | board_82598 }, | |
b95f5fcb JB |
91 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), |
92 | board_82598 }, | |
c4900be0 DS |
93 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), |
94 | board_82598 }, | |
2f21bdd3 DS |
95 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), |
96 | board_82598 }, | |
e8e26350 PW |
97 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), |
98 | board_82599 }, | |
1fcf03e6 PWJ |
99 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), |
100 | board_82599 }, | |
74757d49 DS |
101 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), |
102 | board_82599 }, | |
e8e26350 PW |
103 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), |
104 | board_82599 }, | |
38ad1c8e DS |
105 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), |
106 | board_82599 }, | |
dbfec662 DS |
107 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), |
108 | board_82599 }, | |
8911184f PWJ |
109 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), |
110 | board_82599 }, | |
119fc60a MC |
111 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), |
112 | board_82599 }, | |
312eb931 DS |
113 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), |
114 | board_82599 }, | |
9a799d71 AK |
115 | |
116 | /* required last entry */ | |
117 | {0, } | |
118 | }; | |
119 | MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); | |
120 | ||
5dd2d332 | 121 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd | 122 | static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, |
e8e9f696 | 123 | void *p); |
bd0362dd JC |
124 | static struct notifier_block dca_notifier = { |
125 | .notifier_call = ixgbe_notify_dca, | |
126 | .next = NULL, | |
127 | .priority = 0 | |
128 | }; | |
129 | #endif | |
130 | ||
1cdd1ec8 GR |
131 | #ifdef CONFIG_PCI_IOV |
132 | static unsigned int max_vfs; | |
133 | module_param(max_vfs, uint, 0); | |
e8e9f696 JP |
134 | MODULE_PARM_DESC(max_vfs, |
135 | "Maximum number of virtual functions to allocate per physical function"); | |
1cdd1ec8 GR |
136 | #endif /* CONFIG_PCI_IOV */ |
137 | ||
9a799d71 AK |
138 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
139 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); | |
140 | MODULE_LICENSE("GPL"); | |
141 | MODULE_VERSION(DRV_VERSION); | |
142 | ||
143 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | |
144 | ||
1cdd1ec8 GR |
145 | static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) |
146 | { | |
147 | struct ixgbe_hw *hw = &adapter->hw; | |
148 | u32 gcr; | |
149 | u32 gpie; | |
150 | u32 vmdctl; | |
151 | ||
152 | #ifdef CONFIG_PCI_IOV | |
153 | /* disable iov and allow time for transactions to clear */ | |
154 | pci_disable_sriov(adapter->pdev); | |
155 | #endif | |
156 | ||
157 | /* turn off device IOV mode */ | |
158 | gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | |
159 | gcr &= ~(IXGBE_GCR_EXT_SRIOV); | |
160 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); | |
161 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | |
162 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | |
163 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | |
164 | ||
165 | /* set default pool back to 0 */ | |
166 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | |
167 | vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; | |
168 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); | |
169 | ||
170 | /* take a breather then clean up driver data */ | |
171 | msleep(100); | |
e8e9f696 JP |
172 | |
173 | kfree(adapter->vfinfo); | |
1cdd1ec8 GR |
174 | adapter->vfinfo = NULL; |
175 | ||
176 | adapter->num_vfs = 0; | |
177 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | |
178 | } | |
179 | ||
dcd79aeb TI |
180 | struct ixgbe_reg_info { |
181 | u32 ofs; | |
182 | char *name; | |
183 | }; | |
184 | ||
185 | static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { | |
186 | ||
187 | /* General Registers */ | |
188 | {IXGBE_CTRL, "CTRL"}, | |
189 | {IXGBE_STATUS, "STATUS"}, | |
190 | {IXGBE_CTRL_EXT, "CTRL_EXT"}, | |
191 | ||
192 | /* Interrupt Registers */ | |
193 | {IXGBE_EICR, "EICR"}, | |
194 | ||
195 | /* RX Registers */ | |
196 | {IXGBE_SRRCTL(0), "SRRCTL"}, | |
197 | {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, | |
198 | {IXGBE_RDLEN(0), "RDLEN"}, | |
199 | {IXGBE_RDH(0), "RDH"}, | |
200 | {IXGBE_RDT(0), "RDT"}, | |
201 | {IXGBE_RXDCTL(0), "RXDCTL"}, | |
202 | {IXGBE_RDBAL(0), "RDBAL"}, | |
203 | {IXGBE_RDBAH(0), "RDBAH"}, | |
204 | ||
205 | /* TX Registers */ | |
206 | {IXGBE_TDBAL(0), "TDBAL"}, | |
207 | {IXGBE_TDBAH(0), "TDBAH"}, | |
208 | {IXGBE_TDLEN(0), "TDLEN"}, | |
209 | {IXGBE_TDH(0), "TDH"}, | |
210 | {IXGBE_TDT(0), "TDT"}, | |
211 | {IXGBE_TXDCTL(0), "TXDCTL"}, | |
212 | ||
213 | /* List Terminator */ | |
214 | {} | |
215 | }; | |
216 | ||
217 | ||
218 | /* | |
219 | * ixgbe_regdump - register printout routine | |
220 | */ | |
221 | static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) | |
222 | { | |
223 | int i = 0, j = 0; | |
224 | char rname[16]; | |
225 | u32 regs[64]; | |
226 | ||
227 | switch (reginfo->ofs) { | |
228 | case IXGBE_SRRCTL(0): | |
229 | for (i = 0; i < 64; i++) | |
230 | regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); | |
231 | break; | |
232 | case IXGBE_DCA_RXCTRL(0): | |
233 | for (i = 0; i < 64; i++) | |
234 | regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | |
235 | break; | |
236 | case IXGBE_RDLEN(0): | |
237 | for (i = 0; i < 64; i++) | |
238 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); | |
239 | break; | |
240 | case IXGBE_RDH(0): | |
241 | for (i = 0; i < 64; i++) | |
242 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); | |
243 | break; | |
244 | case IXGBE_RDT(0): | |
245 | for (i = 0; i < 64; i++) | |
246 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); | |
247 | break; | |
248 | case IXGBE_RXDCTL(0): | |
249 | for (i = 0; i < 64; i++) | |
250 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); | |
251 | break; | |
252 | case IXGBE_RDBAL(0): | |
253 | for (i = 0; i < 64; i++) | |
254 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); | |
255 | break; | |
256 | case IXGBE_RDBAH(0): | |
257 | for (i = 0; i < 64; i++) | |
258 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); | |
259 | break; | |
260 | case IXGBE_TDBAL(0): | |
261 | for (i = 0; i < 64; i++) | |
262 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); | |
263 | break; | |
264 | case IXGBE_TDBAH(0): | |
265 | for (i = 0; i < 64; i++) | |
266 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); | |
267 | break; | |
268 | case IXGBE_TDLEN(0): | |
269 | for (i = 0; i < 64; i++) | |
270 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); | |
271 | break; | |
272 | case IXGBE_TDH(0): | |
273 | for (i = 0; i < 64; i++) | |
274 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); | |
275 | break; | |
276 | case IXGBE_TDT(0): | |
277 | for (i = 0; i < 64; i++) | |
278 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); | |
279 | break; | |
280 | case IXGBE_TXDCTL(0): | |
281 | for (i = 0; i < 64; i++) | |
282 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); | |
283 | break; | |
284 | default: | |
c7689578 | 285 | pr_info("%-15s %08x\n", reginfo->name, |
dcd79aeb TI |
286 | IXGBE_READ_REG(hw, reginfo->ofs)); |
287 | return; | |
288 | } | |
289 | ||
290 | for (i = 0; i < 8; i++) { | |
291 | snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); | |
c7689578 | 292 | pr_err("%-15s", rname); |
dcd79aeb | 293 | for (j = 0; j < 8; j++) |
c7689578 JP |
294 | pr_cont(" %08x", regs[i*8+j]); |
295 | pr_cont("\n"); | |
dcd79aeb TI |
296 | } |
297 | ||
298 | } | |
299 | ||
300 | /* | |
301 | * ixgbe_dump - Print registers, tx-rings and rx-rings | |
302 | */ | |
303 | static void ixgbe_dump(struct ixgbe_adapter *adapter) | |
304 | { | |
305 | struct net_device *netdev = adapter->netdev; | |
306 | struct ixgbe_hw *hw = &adapter->hw; | |
307 | struct ixgbe_reg_info *reginfo; | |
308 | int n = 0; | |
309 | struct ixgbe_ring *tx_ring; | |
310 | struct ixgbe_tx_buffer *tx_buffer_info; | |
311 | union ixgbe_adv_tx_desc *tx_desc; | |
312 | struct my_u0 { u64 a; u64 b; } *u0; | |
313 | struct ixgbe_ring *rx_ring; | |
314 | union ixgbe_adv_rx_desc *rx_desc; | |
315 | struct ixgbe_rx_buffer *rx_buffer_info; | |
316 | u32 staterr; | |
317 | int i = 0; | |
318 | ||
319 | if (!netif_msg_hw(adapter)) | |
320 | return; | |
321 | ||
322 | /* Print netdevice Info */ | |
323 | if (netdev) { | |
324 | dev_info(&adapter->pdev->dev, "Net device Info\n"); | |
c7689578 | 325 | pr_info("Device Name state " |
dcd79aeb | 326 | "trans_start last_rx\n"); |
c7689578 JP |
327 | pr_info("%-15s %016lX %016lX %016lX\n", |
328 | netdev->name, | |
329 | netdev->state, | |
330 | netdev->trans_start, | |
331 | netdev->last_rx); | |
dcd79aeb TI |
332 | } |
333 | ||
334 | /* Print Registers */ | |
335 | dev_info(&adapter->pdev->dev, "Register Dump\n"); | |
c7689578 | 336 | pr_info(" Register Name Value\n"); |
dcd79aeb TI |
337 | for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; |
338 | reginfo->name; reginfo++) { | |
339 | ixgbe_regdump(hw, reginfo); | |
340 | } | |
341 | ||
342 | /* Print TX Ring Summary */ | |
343 | if (!netdev || !netif_running(netdev)) | |
344 | goto exit; | |
345 | ||
346 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); | |
c7689578 | 347 | pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); |
dcd79aeb TI |
348 | for (n = 0; n < adapter->num_tx_queues; n++) { |
349 | tx_ring = adapter->tx_ring[n]; | |
350 | tx_buffer_info = | |
351 | &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; | |
c7689578 | 352 | pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", |
dcd79aeb TI |
353 | n, tx_ring->next_to_use, tx_ring->next_to_clean, |
354 | (u64)tx_buffer_info->dma, | |
355 | tx_buffer_info->length, | |
356 | tx_buffer_info->next_to_watch, | |
357 | (u64)tx_buffer_info->time_stamp); | |
358 | } | |
359 | ||
360 | /* Print TX Rings */ | |
361 | if (!netif_msg_tx_done(adapter)) | |
362 | goto rx_ring_summary; | |
363 | ||
364 | dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); | |
365 | ||
366 | /* Transmit Descriptor Formats | |
367 | * | |
368 | * Advanced Transmit Descriptor | |
369 | * +--------------------------------------------------------------+ | |
370 | * 0 | Buffer Address [63:0] | | |
371 | * +--------------------------------------------------------------+ | |
372 | * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | | |
373 | * +--------------------------------------------------------------+ | |
374 | * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 | |
375 | */ | |
376 | ||
377 | for (n = 0; n < adapter->num_tx_queues; n++) { | |
378 | tx_ring = adapter->tx_ring[n]; | |
c7689578 JP |
379 | pr_info("------------------------------------\n"); |
380 | pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); | |
381 | pr_info("------------------------------------\n"); | |
382 | pr_info("T [desc] [address 63:0 ] " | |
dcd79aeb TI |
383 | "[PlPOIdStDDt Ln] [bi->dma ] " |
384 | "leng ntw timestamp bi->skb\n"); | |
385 | ||
386 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | |
31f05a2d | 387 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
dcd79aeb TI |
388 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
389 | u0 = (struct my_u0 *)tx_desc; | |
c7689578 | 390 | pr_info("T [0x%03X] %016llX %016llX %016llX" |
dcd79aeb TI |
391 | " %04X %3X %016llX %p", i, |
392 | le64_to_cpu(u0->a), | |
393 | le64_to_cpu(u0->b), | |
394 | (u64)tx_buffer_info->dma, | |
395 | tx_buffer_info->length, | |
396 | tx_buffer_info->next_to_watch, | |
397 | (u64)tx_buffer_info->time_stamp, | |
398 | tx_buffer_info->skb); | |
399 | if (i == tx_ring->next_to_use && | |
400 | i == tx_ring->next_to_clean) | |
c7689578 | 401 | pr_cont(" NTC/U\n"); |
dcd79aeb | 402 | else if (i == tx_ring->next_to_use) |
c7689578 | 403 | pr_cont(" NTU\n"); |
dcd79aeb | 404 | else if (i == tx_ring->next_to_clean) |
c7689578 | 405 | pr_cont(" NTC\n"); |
dcd79aeb | 406 | else |
c7689578 | 407 | pr_cont("\n"); |
dcd79aeb TI |
408 | |
409 | if (netif_msg_pktdata(adapter) && | |
410 | tx_buffer_info->dma != 0) | |
411 | print_hex_dump(KERN_INFO, "", | |
412 | DUMP_PREFIX_ADDRESS, 16, 1, | |
413 | phys_to_virt(tx_buffer_info->dma), | |
414 | tx_buffer_info->length, true); | |
415 | } | |
416 | } | |
417 | ||
418 | /* Print RX Rings Summary */ | |
419 | rx_ring_summary: | |
420 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); | |
c7689578 | 421 | pr_info("Queue [NTU] [NTC]\n"); |
dcd79aeb TI |
422 | for (n = 0; n < adapter->num_rx_queues; n++) { |
423 | rx_ring = adapter->rx_ring[n]; | |
c7689578 JP |
424 | pr_info("%5d %5X %5X\n", |
425 | n, rx_ring->next_to_use, rx_ring->next_to_clean); | |
dcd79aeb TI |
426 | } |
427 | ||
428 | /* Print RX Rings */ | |
429 | if (!netif_msg_rx_status(adapter)) | |
430 | goto exit; | |
431 | ||
432 | dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); | |
433 | ||
434 | /* Advanced Receive Descriptor (Read) Format | |
435 | * 63 1 0 | |
436 | * +-----------------------------------------------------+ | |
437 | * 0 | Packet Buffer Address [63:1] |A0/NSE| | |
438 | * +----------------------------------------------+------+ | |
439 | * 8 | Header Buffer Address [63:1] | DD | | |
440 | * +-----------------------------------------------------+ | |
441 | * | |
442 | * | |
443 | * Advanced Receive Descriptor (Write-Back) Format | |
444 | * | |
445 | * 63 48 47 32 31 30 21 20 16 15 4 3 0 | |
446 | * +------------------------------------------------------+ | |
447 | * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | | |
448 | * | Checksum Ident | | | | Type | Type | | |
449 | * +------------------------------------------------------+ | |
450 | * 8 | VLAN Tag | Length | Extended Error | Extended Status | | |
451 | * +------------------------------------------------------+ | |
452 | * 63 48 47 32 31 20 19 0 | |
453 | */ | |
454 | for (n = 0; n < adapter->num_rx_queues; n++) { | |
455 | rx_ring = adapter->rx_ring[n]; | |
c7689578 JP |
456 | pr_info("------------------------------------\n"); |
457 | pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); | |
458 | pr_info("------------------------------------\n"); | |
459 | pr_info("R [desc] [ PktBuf A0] " | |
dcd79aeb TI |
460 | "[ HeadBuf DD] [bi->dma ] [bi->skb] " |
461 | "<-- Adv Rx Read format\n"); | |
c7689578 | 462 | pr_info("RWB[desc] [PcsmIpSHl PtRs] " |
dcd79aeb TI |
463 | "[vl er S cks ln] ---------------- [bi->skb] " |
464 | "<-- Adv Rx Write-Back format\n"); | |
465 | ||
466 | for (i = 0; i < rx_ring->count; i++) { | |
467 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | |
31f05a2d | 468 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
dcd79aeb TI |
469 | u0 = (struct my_u0 *)rx_desc; |
470 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | |
471 | if (staterr & IXGBE_RXD_STAT_DD) { | |
472 | /* Descriptor Done */ | |
c7689578 | 473 | pr_info("RWB[0x%03X] %016llX " |
dcd79aeb TI |
474 | "%016llX ---------------- %p", i, |
475 | le64_to_cpu(u0->a), | |
476 | le64_to_cpu(u0->b), | |
477 | rx_buffer_info->skb); | |
478 | } else { | |
c7689578 | 479 | pr_info("R [0x%03X] %016llX " |
dcd79aeb TI |
480 | "%016llX %016llX %p", i, |
481 | le64_to_cpu(u0->a), | |
482 | le64_to_cpu(u0->b), | |
483 | (u64)rx_buffer_info->dma, | |
484 | rx_buffer_info->skb); | |
485 | ||
486 | if (netif_msg_pktdata(adapter)) { | |
487 | print_hex_dump(KERN_INFO, "", | |
488 | DUMP_PREFIX_ADDRESS, 16, 1, | |
489 | phys_to_virt(rx_buffer_info->dma), | |
490 | rx_ring->rx_buf_len, true); | |
491 | ||
492 | if (rx_ring->rx_buf_len | |
493 | < IXGBE_RXBUFFER_2048) | |
494 | print_hex_dump(KERN_INFO, "", | |
495 | DUMP_PREFIX_ADDRESS, 16, 1, | |
496 | phys_to_virt( | |
497 | rx_buffer_info->page_dma + | |
498 | rx_buffer_info->page_offset | |
499 | ), | |
500 | PAGE_SIZE/2, true); | |
501 | } | |
502 | } | |
503 | ||
504 | if (i == rx_ring->next_to_use) | |
c7689578 | 505 | pr_cont(" NTU\n"); |
dcd79aeb | 506 | else if (i == rx_ring->next_to_clean) |
c7689578 | 507 | pr_cont(" NTC\n"); |
dcd79aeb | 508 | else |
c7689578 | 509 | pr_cont("\n"); |
dcd79aeb TI |
510 | |
511 | } | |
512 | } | |
513 | ||
514 | exit: | |
515 | return; | |
516 | } | |
517 | ||
5eba3699 AV |
518 | static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) |
519 | { | |
520 | u32 ctrl_ext; | |
521 | ||
522 | /* Let firmware take over control of h/w */ | |
523 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | |
524 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, | |
e8e9f696 | 525 | ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); |
5eba3699 AV |
526 | } |
527 | ||
528 | static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) | |
529 | { | |
530 | u32 ctrl_ext; | |
531 | ||
532 | /* Let firmware know the driver has taken over */ | |
533 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | |
534 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, | |
e8e9f696 | 535 | ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); |
5eba3699 | 536 | } |
9a799d71 | 537 | |
e8e26350 PW |
538 | /* |
539 | * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors | |
540 | * @adapter: pointer to adapter struct | |
541 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes | |
542 | * @queue: queue to map the corresponding interrupt to | |
543 | * @msix_vector: the vector to map to the corresponding queue | |
544 | * | |
545 | */ | |
546 | static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, | |
e8e9f696 | 547 | u8 queue, u8 msix_vector) |
9a799d71 AK |
548 | { |
549 | u32 ivar, index; | |
e8e26350 PW |
550 | struct ixgbe_hw *hw = &adapter->hw; |
551 | switch (hw->mac.type) { | |
552 | case ixgbe_mac_82598EB: | |
553 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | |
554 | if (direction == -1) | |
555 | direction = 0; | |
556 | index = (((direction * 64) + queue) >> 2) & 0x1F; | |
557 | ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); | |
558 | ivar &= ~(0xFF << (8 * (queue & 0x3))); | |
559 | ivar |= (msix_vector << (8 * (queue & 0x3))); | |
560 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); | |
561 | break; | |
562 | case ixgbe_mac_82599EB: | |
563 | if (direction == -1) { | |
564 | /* other causes */ | |
565 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | |
566 | index = ((queue & 1) * 8); | |
567 | ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); | |
568 | ivar &= ~(0xFF << index); | |
569 | ivar |= (msix_vector << index); | |
570 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); | |
571 | break; | |
572 | } else { | |
573 | /* tx or rx causes */ | |
574 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | |
575 | index = ((16 * (queue & 1)) + (8 * direction)); | |
576 | ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); | |
577 | ivar &= ~(0xFF << index); | |
578 | ivar |= (msix_vector << index); | |
579 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); | |
580 | break; | |
581 | } | |
582 | default: | |
583 | break; | |
584 | } | |
9a799d71 AK |
585 | } |
586 | ||
fe49f04a | 587 | static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, |
e8e9f696 | 588 | u64 qmask) |
fe49f04a AD |
589 | { |
590 | u32 mask; | |
591 | ||
592 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | |
593 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | |
594 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); | |
595 | } else { | |
596 | mask = (qmask & 0xFFFFFFFF); | |
597 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); | |
598 | mask = (qmask >> 32); | |
599 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); | |
600 | } | |
601 | } | |
602 | ||
84418e3b | 603 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, |
e8e9f696 JP |
604 | struct ixgbe_tx_buffer |
605 | *tx_buffer_info) | |
9a799d71 | 606 | { |
e5a43549 AD |
607 | if (tx_buffer_info->dma) { |
608 | if (tx_buffer_info->mapped_as_page) | |
1b507730 | 609 | dma_unmap_page(&adapter->pdev->dev, |
e5a43549 AD |
610 | tx_buffer_info->dma, |
611 | tx_buffer_info->length, | |
1b507730 | 612 | DMA_TO_DEVICE); |
e5a43549 | 613 | else |
1b507730 | 614 | dma_unmap_single(&adapter->pdev->dev, |
e5a43549 AD |
615 | tx_buffer_info->dma, |
616 | tx_buffer_info->length, | |
1b507730 | 617 | DMA_TO_DEVICE); |
e5a43549 AD |
618 | tx_buffer_info->dma = 0; |
619 | } | |
9a799d71 AK |
620 | if (tx_buffer_info->skb) { |
621 | dev_kfree_skb_any(tx_buffer_info->skb); | |
622 | tx_buffer_info->skb = NULL; | |
623 | } | |
44df32c5 | 624 | tx_buffer_info->time_stamp = 0; |
9a799d71 AK |
625 | /* tx_buffer_info must be completely set up in the transmit path */ |
626 | } | |
627 | ||
26f23d82 | 628 | /** |
7483d9dd | 629 | * ixgbe_tx_xon_state - check the tx ring xon state |
26f23d82 YZ |
630 | * @adapter: the ixgbe adapter |
631 | * @tx_ring: the corresponding tx_ring | |
632 | * | |
633 | * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the | |
634 | * corresponding TC of this tx_ring when checking TFCS. | |
635 | * | |
7483d9dd | 636 | * Returns : true if in xon state (currently not paused) |
26f23d82 | 637 | */ |
7483d9dd | 638 | static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, |
e8e9f696 | 639 | struct ixgbe_ring *tx_ring) |
26f23d82 | 640 | { |
26f23d82 YZ |
641 | u32 txoff = IXGBE_TFCS_TXOFF; |
642 | ||
643 | #ifdef CONFIG_IXGBE_DCB | |
ca739481 | 644 | if (adapter->dcb_cfg.pfc_mode_enable) { |
30b76832 | 645 | int tc; |
26f23d82 YZ |
646 | int reg_idx = tx_ring->reg_idx; |
647 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | |
648 | ||
6837e895 PW |
649 | switch (adapter->hw.mac.type) { |
650 | case ixgbe_mac_82598EB: | |
26f23d82 YZ |
651 | tc = reg_idx >> 2; |
652 | txoff = IXGBE_TFCS_TXOFF0; | |
6837e895 PW |
653 | break; |
654 | case ixgbe_mac_82599EB: | |
26f23d82 YZ |
655 | tc = 0; |
656 | txoff = IXGBE_TFCS_TXOFF; | |
657 | if (dcb_i == 8) { | |
658 | /* TC0, TC1 */ | |
659 | tc = reg_idx >> 5; | |
660 | if (tc == 2) /* TC2, TC3 */ | |
661 | tc += (reg_idx - 64) >> 4; | |
662 | else if (tc == 3) /* TC4, TC5, TC6, TC7 */ | |
663 | tc += 1 + ((reg_idx - 96) >> 3); | |
664 | } else if (dcb_i == 4) { | |
665 | /* TC0, TC1 */ | |
666 | tc = reg_idx >> 6; | |
667 | if (tc == 1) { | |
668 | tc += (reg_idx - 64) >> 5; | |
669 | if (tc == 2) /* TC2, TC3 */ | |
670 | tc += (reg_idx - 96) >> 4; | |
671 | } | |
672 | } | |
6837e895 PW |
673 | break; |
674 | default: | |
675 | tc = 0; | |
26f23d82 YZ |
676 | } |
677 | txoff <<= tc; | |
678 | } | |
679 | #endif | |
680 | return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; | |
681 | } | |
682 | ||
9a799d71 | 683 | static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, |
e8e9f696 JP |
684 | struct ixgbe_ring *tx_ring, |
685 | unsigned int eop) | |
9a799d71 | 686 | { |
e01c31a5 | 687 | struct ixgbe_hw *hw = &adapter->hw; |
e01c31a5 | 688 | |
9a799d71 | 689 | /* Detect a transmit hang in hardware, this serializes the |
e01c31a5 | 690 | * check with the clearing of time_stamp and movement of eop */ |
9a799d71 | 691 | adapter->detect_tx_hung = false; |
44df32c5 | 692 | if (tx_ring->tx_buffer_info[eop].time_stamp && |
9a799d71 | 693 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && |
7483d9dd | 694 | ixgbe_tx_xon_state(adapter, tx_ring)) { |
9a799d71 | 695 | /* detected Tx unit hang */ |
e01c31a5 | 696 | union ixgbe_adv_tx_desc *tx_desc; |
31f05a2d | 697 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); |
396e799c | 698 | e_err(drv, "Detected Tx Unit Hang\n" |
849c4542 ET |
699 | " Tx Queue <%d>\n" |
700 | " TDH, TDT <%x>, <%x>\n" | |
701 | " next_to_use <%x>\n" | |
702 | " next_to_clean <%x>\n" | |
703 | "tx_buffer_info[next_to_clean]\n" | |
704 | " time_stamp <%lx>\n" | |
705 | " jiffies <%lx>\n", | |
706 | tx_ring->queue_index, | |
707 | IXGBE_READ_REG(hw, tx_ring->head), | |
708 | IXGBE_READ_REG(hw, tx_ring->tail), | |
709 | tx_ring->next_to_use, eop, | |
710 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | |
9a799d71 AK |
711 | return true; |
712 | } | |
713 | ||
714 | return false; | |
715 | } | |
716 | ||
b4617240 PW |
717 | #define IXGBE_MAX_TXD_PWR 14 |
718 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) | |
e092be60 AV |
719 | |
720 | /* Tx Descriptors needed, worst case */ | |
721 | #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ | |
722 | (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) | |
723 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ | |
b4617240 | 724 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ |
e092be60 | 725 | |
e01c31a5 JB |
726 | static void ixgbe_tx_timeout(struct net_device *netdev); |
727 | ||
9a799d71 AK |
728 | /** |
729 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes | |
fe49f04a | 730 | * @q_vector: structure containing interrupt and ring information |
e01c31a5 | 731 | * @tx_ring: tx ring to clean |
9a799d71 | 732 | **/ |
fe49f04a | 733 | static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, |
e8e9f696 | 734 | struct ixgbe_ring *tx_ring) |
9a799d71 | 735 | { |
fe49f04a | 736 | struct ixgbe_adapter *adapter = q_vector->adapter; |
e01c31a5 | 737 | struct net_device *netdev = adapter->netdev; |
12207e49 PWJ |
738 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; |
739 | struct ixgbe_tx_buffer *tx_buffer_info; | |
740 | unsigned int i, eop, count = 0; | |
e01c31a5 | 741 | unsigned int total_bytes = 0, total_packets = 0; |
9a799d71 AK |
742 | |
743 | i = tx_ring->next_to_clean; | |
12207e49 | 744 | eop = tx_ring->tx_buffer_info[i].next_to_watch; |
31f05a2d | 745 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); |
12207e49 PWJ |
746 | |
747 | while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && | |
9a1a69ad | 748 | (count < tx_ring->work_limit)) { |
12207e49 | 749 | bool cleaned = false; |
2d0bb1c1 | 750 | rmb(); /* read buffer_info after eop_desc */ |
12207e49 PWJ |
751 | for ( ; !cleaned; count++) { |
752 | struct sk_buff *skb; | |
31f05a2d | 753 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
9a799d71 | 754 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
12207e49 | 755 | cleaned = (i == eop); |
e01c31a5 | 756 | skb = tx_buffer_info->skb; |
9a799d71 | 757 | |
12207e49 | 758 | if (cleaned && skb) { |
e092be60 | 759 | unsigned int segs, bytecount; |
3d8fd385 | 760 | unsigned int hlen = skb_headlen(skb); |
e01c31a5 JB |
761 | |
762 | /* gso_segs is currently only valid for tcp */ | |
e092be60 | 763 | segs = skb_shinfo(skb)->gso_segs ?: 1; |
3d8fd385 YZ |
764 | #ifdef IXGBE_FCOE |
765 | /* adjust for FCoE Sequence Offload */ | |
766 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | |
767 | && (skb->protocol == htons(ETH_P_FCOE)) && | |
768 | skb_is_gso(skb)) { | |
769 | hlen = skb_transport_offset(skb) + | |
770 | sizeof(struct fc_frame_header) + | |
771 | sizeof(struct fcoe_crc_eof); | |
772 | segs = DIV_ROUND_UP(skb->len - hlen, | |
773 | skb_shinfo(skb)->gso_size); | |
774 | } | |
775 | #endif /* IXGBE_FCOE */ | |
e092be60 | 776 | /* multiply data chunks by size of headers */ |
3d8fd385 | 777 | bytecount = ((segs - 1) * hlen) + skb->len; |
e01c31a5 JB |
778 | total_packets += segs; |
779 | total_bytes += bytecount; | |
e092be60 | 780 | } |
e01c31a5 | 781 | |
9a799d71 | 782 | ixgbe_unmap_and_free_tx_resource(adapter, |
e8e9f696 | 783 | tx_buffer_info); |
9a799d71 | 784 | |
12207e49 PWJ |
785 | tx_desc->wb.status = 0; |
786 | ||
9a799d71 AK |
787 | i++; |
788 | if (i == tx_ring->count) | |
789 | i = 0; | |
e01c31a5 | 790 | } |
12207e49 PWJ |
791 | |
792 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | |
31f05a2d | 793 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); |
12207e49 PWJ |
794 | } |
795 | ||
9a799d71 AK |
796 | tx_ring->next_to_clean = i; |
797 | ||
e092be60 | 798 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
e01c31a5 | 799 | if (unlikely(count && netif_carrier_ok(netdev) && |
e8e9f696 | 800 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { |
e092be60 AV |
801 | /* Make sure that anybody stopping the queue after this |
802 | * sees the new next_to_clean. | |
803 | */ | |
804 | smp_mb(); | |
30eba97a AV |
805 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
806 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | |
807 | netif_wake_subqueue(netdev, tx_ring->queue_index); | |
7ca3bc58 | 808 | ++tx_ring->restart_queue; |
30eba97a | 809 | } |
e092be60 | 810 | } |
9a799d71 | 811 | |
e01c31a5 JB |
812 | if (adapter->detect_tx_hung) { |
813 | if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { | |
814 | /* schedule immediate reset if we believe we hung */ | |
396e799c ET |
815 | e_info(probe, "tx hang %d detected, resetting " |
816 | "adapter\n", adapter->tx_timeout_count + 1); | |
e01c31a5 JB |
817 | ixgbe_tx_timeout(adapter->netdev); |
818 | } | |
819 | } | |
9a799d71 | 820 | |
e01c31a5 | 821 | /* re-arm the interrupt */ |
fe49f04a AD |
822 | if (count >= tx_ring->work_limit) |
823 | ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); | |
9a799d71 | 824 | |
e01c31a5 JB |
825 | tx_ring->total_bytes += total_bytes; |
826 | tx_ring->total_packets += total_packets; | |
de1036b1 | 827 | u64_stats_update_begin(&tx_ring->syncp); |
e01c31a5 | 828 | tx_ring->stats.packets += total_packets; |
12207e49 | 829 | tx_ring->stats.bytes += total_bytes; |
de1036b1 | 830 | u64_stats_update_end(&tx_ring->syncp); |
807540ba | 831 | return count < tx_ring->work_limit; |
9a799d71 AK |
832 | } |
833 | ||
5dd2d332 | 834 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd | 835 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, |
e8e9f696 | 836 | struct ixgbe_ring *rx_ring) |
bd0362dd JC |
837 | { |
838 | u32 rxctrl; | |
839 | int cpu = get_cpu(); | |
4a0b9ca0 | 840 | int q = rx_ring->reg_idx; |
bd0362dd | 841 | |
3a581073 | 842 | if (rx_ring->cpu != cpu) { |
bd0362dd | 843 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); |
e8e26350 PW |
844 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
845 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; | |
846 | rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | |
847 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | |
848 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; | |
849 | rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | |
e8e9f696 | 850 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); |
e8e26350 | 851 | } |
bd0362dd JC |
852 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; |
853 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | |
15005a32 DS |
854 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); |
855 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | |
e8e9f696 | 856 | IXGBE_DCA_RXCTRL_DESC_HSRO_EN); |
bd0362dd | 857 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); |
3a581073 | 858 | rx_ring->cpu = cpu; |
bd0362dd JC |
859 | } |
860 | put_cpu(); | |
861 | } | |
862 | ||
863 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | |
e8e9f696 | 864 | struct ixgbe_ring *tx_ring) |
bd0362dd JC |
865 | { |
866 | u32 txctrl; | |
867 | int cpu = get_cpu(); | |
4a0b9ca0 | 868 | int q = tx_ring->reg_idx; |
ee5f784a | 869 | struct ixgbe_hw *hw = &adapter->hw; |
bd0362dd | 870 | |
3a581073 | 871 | if (tx_ring->cpu != cpu) { |
e8e26350 | 872 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
ee5f784a | 873 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); |
e8e26350 PW |
874 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; |
875 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | |
ee5f784a DS |
876 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
877 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); | |
e8e26350 | 878 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
ee5f784a | 879 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); |
e8e26350 PW |
880 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; |
881 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | |
e8e9f696 | 882 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); |
ee5f784a DS |
883 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
884 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); | |
e8e26350 | 885 | } |
3a581073 | 886 | tx_ring->cpu = cpu; |
bd0362dd JC |
887 | } |
888 | put_cpu(); | |
889 | } | |
890 | ||
891 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | |
892 | { | |
893 | int i; | |
894 | ||
895 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | |
896 | return; | |
897 | ||
e35ec126 AD |
898 | /* always use CB2 mode, difference is masked in the CB driver */ |
899 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | |
900 | ||
bd0362dd | 901 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4a0b9ca0 PW |
902 | adapter->tx_ring[i]->cpu = -1; |
903 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); | |
bd0362dd JC |
904 | } |
905 | for (i = 0; i < adapter->num_rx_queues; i++) { | |
4a0b9ca0 PW |
906 | adapter->rx_ring[i]->cpu = -1; |
907 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); | |
bd0362dd JC |
908 | } |
909 | } | |
910 | ||
911 | static int __ixgbe_notify_dca(struct device *dev, void *data) | |
912 | { | |
913 | struct net_device *netdev = dev_get_drvdata(dev); | |
914 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
915 | unsigned long event = *(unsigned long *)data; | |
916 | ||
917 | switch (event) { | |
918 | case DCA_PROVIDER_ADD: | |
96b0e0f6 JB |
919 | /* if we're already enabled, don't do it again */ |
920 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | |
921 | break; | |
652f093f | 922 | if (dca_add_requester(dev) == 0) { |
96b0e0f6 | 923 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
bd0362dd JC |
924 | ixgbe_setup_dca(adapter); |
925 | break; | |
926 | } | |
927 | /* Fall Through since DCA is disabled. */ | |
928 | case DCA_PROVIDER_REMOVE: | |
929 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | |
930 | dca_remove_requester(dev); | |
931 | adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; | |
932 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); | |
933 | } | |
934 | break; | |
935 | } | |
936 | ||
652f093f | 937 | return 0; |
bd0362dd JC |
938 | } |
939 | ||
5dd2d332 | 940 | #endif /* CONFIG_IXGBE_DCA */ |
9a799d71 AK |
941 | /** |
942 | * ixgbe_receive_skb - Send a completed packet up the stack | |
943 | * @adapter: board private structure | |
944 | * @skb: packet to send up | |
177db6ff MC |
945 | * @status: hardware indication of status of receive |
946 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | |
947 | * @rx_desc: rx descriptor | |
9a799d71 | 948 | **/ |
78b6f4ce | 949 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, |
e8e9f696 JP |
950 | struct sk_buff *skb, u8 status, |
951 | struct ixgbe_ring *ring, | |
952 | union ixgbe_adv_rx_desc *rx_desc) | |
9a799d71 | 953 | { |
78b6f4ce HX |
954 | struct ixgbe_adapter *adapter = q_vector->adapter; |
955 | struct napi_struct *napi = &q_vector->napi; | |
177db6ff MC |
956 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
957 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); | |
9a799d71 | 958 | |
f62bbb5e JG |
959 | if (is_vlan && (tag & VLAN_VID_MASK)) |
960 | __vlan_hwaccel_put_tag(skb, tag); | |
961 | ||
962 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) | |
963 | napi_gro_receive(napi, skb); | |
964 | else | |
965 | netif_rx(skb); | |
9a799d71 AK |
966 | } |
967 | ||
e59bd25d AV |
968 | /** |
969 | * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum | |
970 | * @adapter: address of board private structure | |
971 | * @status_err: hardware indication of status of receive | |
972 | * @skb: skb currently being received and modified | |
973 | **/ | |
9a799d71 | 974 | static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, |
8bae1b2b DS |
975 | union ixgbe_adv_rx_desc *rx_desc, |
976 | struct sk_buff *skb) | |
9a799d71 | 977 | { |
8bae1b2b DS |
978 | u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); |
979 | ||
bc8acf2c | 980 | skb_checksum_none_assert(skb); |
9a799d71 | 981 | |
712744be JB |
982 | /* Rx csum disabled */ |
983 | if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) | |
9a799d71 | 984 | return; |
e59bd25d AV |
985 | |
986 | /* if IP and error */ | |
987 | if ((status_err & IXGBE_RXD_STAT_IPCS) && | |
988 | (status_err & IXGBE_RXDADV_ERR_IPE)) { | |
9a799d71 AK |
989 | adapter->hw_csum_rx_error++; |
990 | return; | |
991 | } | |
e59bd25d AV |
992 | |
993 | if (!(status_err & IXGBE_RXD_STAT_L4CS)) | |
994 | return; | |
995 | ||
996 | if (status_err & IXGBE_RXDADV_ERR_TCPE) { | |
8bae1b2b DS |
997 | u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
998 | ||
999 | /* | |
1000 | * 82599 errata, UDP frames with a 0 checksum can be marked as | |
1001 | * checksum errors. | |
1002 | */ | |
1003 | if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && | |
1004 | (adapter->hw.mac.type == ixgbe_mac_82599EB)) | |
1005 | return; | |
1006 | ||
e59bd25d AV |
1007 | adapter->hw_csum_rx_error++; |
1008 | return; | |
1009 | } | |
1010 | ||
9a799d71 | 1011 | /* It must be a TCP or UDP packet with a valid checksum */ |
e59bd25d | 1012 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
9a799d71 AK |
1013 | } |
1014 | ||
e8e26350 | 1015 | static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, |
e8e9f696 | 1016 | struct ixgbe_ring *rx_ring, u32 val) |
e8e26350 PW |
1017 | { |
1018 | /* | |
1019 | * Force memory writes to complete before letting h/w | |
1020 | * know there are new descriptors to fetch. (Only | |
1021 | * applicable for weak-ordered memory model archs, | |
1022 | * such as IA-64). | |
1023 | */ | |
1024 | wmb(); | |
1025 | IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); | |
1026 | } | |
1027 | ||
9a799d71 AK |
1028 | /** |
1029 | * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split | |
1030 | * @adapter: address of board private structure | |
1031 | **/ | |
84418e3b | 1032 | void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, |
e8e9f696 JP |
1033 | struct ixgbe_ring *rx_ring, |
1034 | int cleaned_count) | |
9a799d71 | 1035 | { |
d716a7d8 | 1036 | struct net_device *netdev = adapter->netdev; |
9a799d71 AK |
1037 | struct pci_dev *pdev = adapter->pdev; |
1038 | union ixgbe_adv_rx_desc *rx_desc; | |
3a581073 | 1039 | struct ixgbe_rx_buffer *bi; |
9a799d71 | 1040 | unsigned int i; |
d716a7d8 | 1041 | unsigned int bufsz = rx_ring->rx_buf_len; |
9a799d71 AK |
1042 | |
1043 | i = rx_ring->next_to_use; | |
3a581073 | 1044 | bi = &rx_ring->rx_buffer_info[i]; |
9a799d71 AK |
1045 | |
1046 | while (cleaned_count--) { | |
31f05a2d | 1047 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
9a799d71 | 1048 | |
762f4c57 | 1049 | if (!bi->page_dma && |
6e455b89 | 1050 | (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { |
3a581073 | 1051 | if (!bi->page) { |
d716a7d8 | 1052 | bi->page = netdev_alloc_page(netdev); |
762f4c57 JB |
1053 | if (!bi->page) { |
1054 | adapter->alloc_rx_page_failed++; | |
1055 | goto no_buffers; | |
1056 | } | |
1057 | bi->page_offset = 0; | |
1058 | } else { | |
1059 | /* use a half page if we're re-using */ | |
1060 | bi->page_offset ^= (PAGE_SIZE / 2); | |
9a799d71 | 1061 | } |
762f4c57 | 1062 | |
1b507730 | 1063 | bi->page_dma = dma_map_page(&pdev->dev, bi->page, |
e8e9f696 JP |
1064 | bi->page_offset, |
1065 | (PAGE_SIZE / 2), | |
1b507730 | 1066 | DMA_FROM_DEVICE); |
9a799d71 AK |
1067 | } |
1068 | ||
3a581073 | 1069 | if (!bi->skb) { |
d716a7d8 AD |
1070 | struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, |
1071 | bufsz); | |
1072 | bi->skb = skb; | |
9a799d71 AK |
1073 | |
1074 | if (!skb) { | |
1075 | adapter->alloc_rx_buff_failed++; | |
1076 | goto no_buffers; | |
1077 | } | |
d716a7d8 AD |
1078 | /* initialize queue mapping */ |
1079 | skb_record_rx_queue(skb, rx_ring->queue_index); | |
1080 | } | |
9a799d71 | 1081 | |
d716a7d8 AD |
1082 | if (!bi->dma) { |
1083 | bi->dma = dma_map_single(&pdev->dev, | |
1084 | bi->skb->data, | |
e8e9f696 | 1085 | rx_ring->rx_buf_len, |
1b507730 | 1086 | DMA_FROM_DEVICE); |
9a799d71 AK |
1087 | } |
1088 | /* Refresh the desc even if buffer_addrs didn't change because | |
1089 | * each write-back erases this info. */ | |
6e455b89 | 1090 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
3a581073 JB |
1091 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); |
1092 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | |
9a799d71 | 1093 | } else { |
3a581073 | 1094 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); |
84418e3b | 1095 | rx_desc->read.hdr_addr = 0; |
9a799d71 AK |
1096 | } |
1097 | ||
1098 | i++; | |
1099 | if (i == rx_ring->count) | |
1100 | i = 0; | |
3a581073 | 1101 | bi = &rx_ring->rx_buffer_info[i]; |
9a799d71 | 1102 | } |
7c6e0a43 | 1103 | |
9a799d71 AK |
1104 | no_buffers: |
1105 | if (rx_ring->next_to_use != i) { | |
1106 | rx_ring->next_to_use = i; | |
1107 | if (i-- == 0) | |
1108 | i = (rx_ring->count - 1); | |
1109 | ||
e8e26350 | 1110 | ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); |
9a799d71 AK |
1111 | } |
1112 | } | |
1113 | ||
7c6e0a43 JB |
1114 | static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) |
1115 | { | |
1116 | return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; | |
1117 | } | |
1118 | ||
1119 | static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) | |
1120 | { | |
1121 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | |
1122 | } | |
1123 | ||
f8212f97 AD |
1124 | static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) |
1125 | { | |
1126 | return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & | |
e8e9f696 JP |
1127 | IXGBE_RXDADV_RSCCNT_MASK) >> |
1128 | IXGBE_RXDADV_RSCCNT_SHIFT; | |
f8212f97 AD |
1129 | } |
1130 | ||
1131 | /** | |
1132 | * ixgbe_transform_rsc_queue - change rsc queue into a full packet | |
1133 | * @skb: pointer to the last skb in the rsc queue | |
94b982b2 | 1134 | * @count: pointer to number of packets coalesced in this context |
f8212f97 AD |
1135 | * |
1136 | * This function changes a queue full of hw rsc buffers into a completed | |
1137 | * packet. It uses the ->prev pointers to find the first packet and then | |
1138 | * turns it into the frag list owner. | |
1139 | **/ | |
94b982b2 | 1140 | static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, |
e8e9f696 | 1141 | u64 *count) |
f8212f97 AD |
1142 | { |
1143 | unsigned int frag_list_size = 0; | |
1144 | ||
1145 | while (skb->prev) { | |
1146 | struct sk_buff *prev = skb->prev; | |
1147 | frag_list_size += skb->len; | |
1148 | skb->prev = NULL; | |
1149 | skb = prev; | |
94b982b2 | 1150 | *count += 1; |
f8212f97 AD |
1151 | } |
1152 | ||
1153 | skb_shinfo(skb)->frag_list = skb->next; | |
1154 | skb->next = NULL; | |
1155 | skb->len += frag_list_size; | |
1156 | skb->data_len += frag_list_size; | |
1157 | skb->truesize += frag_list_size; | |
1158 | return skb; | |
1159 | } | |
1160 | ||
43634e82 MC |
1161 | struct ixgbe_rsc_cb { |
1162 | dma_addr_t dma; | |
e8171aaa | 1163 | bool delay_unmap; |
43634e82 MC |
1164 | }; |
1165 | ||
1166 | #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) | |
1167 | ||
78b6f4ce | 1168 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
e8e9f696 JP |
1169 | struct ixgbe_ring *rx_ring, |
1170 | int *work_done, int work_to_do) | |
9a799d71 | 1171 | { |
78b6f4ce | 1172 | struct ixgbe_adapter *adapter = q_vector->adapter; |
9a799d71 AK |
1173 | struct pci_dev *pdev = adapter->pdev; |
1174 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | |
1175 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; | |
1176 | struct sk_buff *skb; | |
f8212f97 | 1177 | unsigned int i, rsc_count = 0; |
7c6e0a43 | 1178 | u32 len, staterr; |
177db6ff MC |
1179 | u16 hdr_info; |
1180 | bool cleaned = false; | |
9a799d71 | 1181 | int cleaned_count = 0; |
d2f4fbe2 | 1182 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
3d8fd385 YZ |
1183 | #ifdef IXGBE_FCOE |
1184 | int ddp_bytes = 0; | |
1185 | #endif /* IXGBE_FCOE */ | |
9a799d71 AK |
1186 | |
1187 | i = rx_ring->next_to_clean; | |
31f05a2d | 1188 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
9a799d71 AK |
1189 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
1190 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | |
9a799d71 AK |
1191 | |
1192 | while (staterr & IXGBE_RXD_STAT_DD) { | |
7c6e0a43 | 1193 | u32 upper_len = 0; |
9a799d71 AK |
1194 | if (*work_done >= work_to_do) |
1195 | break; | |
1196 | (*work_done)++; | |
1197 | ||
3c945e5b | 1198 | rmb(); /* read descriptor and rx_buffer_info after status DD */ |
6e455b89 | 1199 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
7c6e0a43 JB |
1200 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); |
1201 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | |
762f4c57 | 1202 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; |
9a799d71 | 1203 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); |
0b746e08 SN |
1204 | if ((len > IXGBE_RX_HDR_SIZE) || |
1205 | (upper_len && !(hdr_info & IXGBE_RXDADV_SPH))) | |
1206 | len = IXGBE_RX_HDR_SIZE; | |
7c6e0a43 | 1207 | } else { |
9a799d71 | 1208 | len = le16_to_cpu(rx_desc->wb.upper.length); |
7c6e0a43 | 1209 | } |
9a799d71 AK |
1210 | |
1211 | cleaned = true; | |
1212 | skb = rx_buffer_info->skb; | |
7ca3bc58 | 1213 | prefetch(skb->data); |
9a799d71 AK |
1214 | rx_buffer_info->skb = NULL; |
1215 | ||
21fa4e66 | 1216 | if (rx_buffer_info->dma) { |
43634e82 MC |
1217 | if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
1218 | (!(staterr & IXGBE_RXD_STAT_EOP)) && | |
e8171aaa | 1219 | (!(skb->prev))) { |
43634e82 MC |
1220 | /* |
1221 | * When HWRSC is enabled, delay unmapping | |
1222 | * of the first packet. It carries the | |
1223 | * header information, HW may still | |
1224 | * access the header after the writeback. | |
1225 | * Only unmap it when EOP is reached | |
1226 | */ | |
e8171aaa | 1227 | IXGBE_RSC_CB(skb)->delay_unmap = true; |
43634e82 | 1228 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; |
e8171aaa | 1229 | } else { |
1b507730 | 1230 | dma_unmap_single(&pdev->dev, |
e8e9f696 JP |
1231 | rx_buffer_info->dma, |
1232 | rx_ring->rx_buf_len, | |
1233 | DMA_FROM_DEVICE); | |
e8171aaa | 1234 | } |
4f57ca6e | 1235 | rx_buffer_info->dma = 0; |
9a799d71 AK |
1236 | skb_put(skb, len); |
1237 | } | |
1238 | ||
1239 | if (upper_len) { | |
1b507730 NN |
1240 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, |
1241 | PAGE_SIZE / 2, DMA_FROM_DEVICE); | |
9a799d71 AK |
1242 | rx_buffer_info->page_dma = 0; |
1243 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | |
e8e9f696 JP |
1244 | rx_buffer_info->page, |
1245 | rx_buffer_info->page_offset, | |
1246 | upper_len); | |
762f4c57 JB |
1247 | |
1248 | if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || | |
1249 | (page_count(rx_buffer_info->page) != 1)) | |
1250 | rx_buffer_info->page = NULL; | |
1251 | else | |
1252 | get_page(rx_buffer_info->page); | |
9a799d71 AK |
1253 | |
1254 | skb->len += upper_len; | |
1255 | skb->data_len += upper_len; | |
1256 | skb->truesize += upper_len; | |
1257 | } | |
1258 | ||
1259 | i++; | |
1260 | if (i == rx_ring->count) | |
1261 | i = 0; | |
9a799d71 | 1262 | |
31f05a2d | 1263 | next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); |
9a799d71 | 1264 | prefetch(next_rxd); |
9a799d71 | 1265 | cleaned_count++; |
f8212f97 | 1266 | |
0c19d6af | 1267 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) |
f8212f97 AD |
1268 | rsc_count = ixgbe_get_rsc_count(rx_desc); |
1269 | ||
1270 | if (rsc_count) { | |
1271 | u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> | |
1272 | IXGBE_RXDADV_NEXTP_SHIFT; | |
1273 | next_buffer = &rx_ring->rx_buffer_info[nextp]; | |
f8212f97 AD |
1274 | } else { |
1275 | next_buffer = &rx_ring->rx_buffer_info[i]; | |
1276 | } | |
1277 | ||
9a799d71 | 1278 | if (staterr & IXGBE_RXD_STAT_EOP) { |
f8212f97 | 1279 | if (skb->prev) |
e8e9f696 JP |
1280 | skb = ixgbe_transform_rsc_queue(skb, |
1281 | &(rx_ring->rsc_count)); | |
94b982b2 | 1282 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
e8171aaa | 1283 | if (IXGBE_RSC_CB(skb)->delay_unmap) { |
1b507730 NN |
1284 | dma_unmap_single(&pdev->dev, |
1285 | IXGBE_RSC_CB(skb)->dma, | |
e8e9f696 | 1286 | rx_ring->rx_buf_len, |
1b507730 | 1287 | DMA_FROM_DEVICE); |
fd3686a8 | 1288 | IXGBE_RSC_CB(skb)->dma = 0; |
e8171aaa | 1289 | IXGBE_RSC_CB(skb)->delay_unmap = false; |
fd3686a8 | 1290 | } |
94b982b2 | 1291 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) |
e8e9f696 JP |
1292 | rx_ring->rsc_count += |
1293 | skb_shinfo(skb)->nr_frags; | |
94b982b2 MC |
1294 | else |
1295 | rx_ring->rsc_count++; | |
1296 | rx_ring->rsc_flush++; | |
1297 | } | |
de1036b1 | 1298 | u64_stats_update_begin(&rx_ring->syncp); |
9a799d71 AK |
1299 | rx_ring->stats.packets++; |
1300 | rx_ring->stats.bytes += skb->len; | |
de1036b1 | 1301 | u64_stats_update_end(&rx_ring->syncp); |
9a799d71 | 1302 | } else { |
6e455b89 | 1303 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
f8212f97 AD |
1304 | rx_buffer_info->skb = next_buffer->skb; |
1305 | rx_buffer_info->dma = next_buffer->dma; | |
1306 | next_buffer->skb = skb; | |
1307 | next_buffer->dma = 0; | |
1308 | } else { | |
1309 | skb->next = next_buffer->skb; | |
1310 | skb->next->prev = skb; | |
1311 | } | |
7ca3bc58 | 1312 | rx_ring->non_eop_descs++; |
9a799d71 AK |
1313 | goto next_desc; |
1314 | } | |
1315 | ||
1316 | if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { | |
1317 | dev_kfree_skb_irq(skb); | |
1318 | goto next_desc; | |
1319 | } | |
1320 | ||
8bae1b2b | 1321 | ixgbe_rx_checksum(adapter, rx_desc, skb); |
d2f4fbe2 AV |
1322 | |
1323 | /* probably a little skewed due to removing CRC */ | |
1324 | total_rx_bytes += skb->len; | |
1325 | total_rx_packets++; | |
1326 | ||
74ce8dd2 | 1327 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
332d4a7d YZ |
1328 | #ifdef IXGBE_FCOE |
1329 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ | |
3d8fd385 YZ |
1330 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { |
1331 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); | |
1332 | if (!ddp_bytes) | |
332d4a7d | 1333 | goto next_desc; |
3d8fd385 | 1334 | } |
332d4a7d | 1335 | #endif /* IXGBE_FCOE */ |
fdaff1ce | 1336 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); |
9a799d71 AK |
1337 | |
1338 | next_desc: | |
1339 | rx_desc->wb.upper.status_error = 0; | |
1340 | ||
1341 | /* return some buffers to hardware, one at a time is too slow */ | |
1342 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { | |
1343 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); | |
1344 | cleaned_count = 0; | |
1345 | } | |
1346 | ||
1347 | /* use prefetched values */ | |
1348 | rx_desc = next_rxd; | |
f8212f97 | 1349 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
9a799d71 AK |
1350 | |
1351 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | |
177db6ff MC |
1352 | } |
1353 | ||
9a799d71 AK |
1354 | rx_ring->next_to_clean = i; |
1355 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); | |
1356 | ||
1357 | if (cleaned_count) | |
1358 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); | |
1359 | ||
3d8fd385 YZ |
1360 | #ifdef IXGBE_FCOE |
1361 | /* include DDPed FCoE data */ | |
1362 | if (ddp_bytes > 0) { | |
1363 | unsigned int mss; | |
1364 | ||
1365 | mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - | |
1366 | sizeof(struct fc_frame_header) - | |
1367 | sizeof(struct fcoe_crc_eof); | |
1368 | if (mss > 512) | |
1369 | mss &= ~511; | |
1370 | total_rx_bytes += ddp_bytes; | |
1371 | total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); | |
1372 | } | |
1373 | #endif /* IXGBE_FCOE */ | |
1374 | ||
f494e8fa AV |
1375 | rx_ring->total_packets += total_rx_packets; |
1376 | rx_ring->total_bytes += total_rx_bytes; | |
f494e8fa | 1377 | |
9a799d71 AK |
1378 | return cleaned; |
1379 | } | |
1380 | ||
021230d4 | 1381 | static int ixgbe_clean_rxonly(struct napi_struct *, int); |
9a799d71 AK |
1382 | /** |
1383 | * ixgbe_configure_msix - Configure MSI-X hardware | |
1384 | * @adapter: board private structure | |
1385 | * | |
1386 | * ixgbe_configure_msix sets up the hardware to properly generate MSI-X | |
1387 | * interrupts. | |
1388 | **/ | |
1389 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |
1390 | { | |
021230d4 AV |
1391 | struct ixgbe_q_vector *q_vector; |
1392 | int i, j, q_vectors, v_idx, r_idx; | |
1393 | u32 mask; | |
9a799d71 | 1394 | |
021230d4 | 1395 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
9a799d71 | 1396 | |
4df10466 JB |
1397 | /* |
1398 | * Populate the IVAR table and set the ITR values to the | |
021230d4 AV |
1399 | * corresponding register. |
1400 | */ | |
1401 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { | |
7a921c93 | 1402 | q_vector = adapter->q_vector[v_idx]; |
984b3f57 | 1403 | /* XXX for_each_set_bit(...) */ |
021230d4 | 1404 | r_idx = find_first_bit(q_vector->rxr_idx, |
e8e9f696 | 1405 | adapter->num_rx_queues); |
021230d4 AV |
1406 | |
1407 | for (i = 0; i < q_vector->rxr_count; i++) { | |
4a0b9ca0 | 1408 | j = adapter->rx_ring[r_idx]->reg_idx; |
e8e26350 | 1409 | ixgbe_set_ivar(adapter, 0, j, v_idx); |
021230d4 | 1410 | r_idx = find_next_bit(q_vector->rxr_idx, |
e8e9f696 JP |
1411 | adapter->num_rx_queues, |
1412 | r_idx + 1); | |
021230d4 AV |
1413 | } |
1414 | r_idx = find_first_bit(q_vector->txr_idx, | |
e8e9f696 | 1415 | adapter->num_tx_queues); |
021230d4 AV |
1416 | |
1417 | for (i = 0; i < q_vector->txr_count; i++) { | |
4a0b9ca0 | 1418 | j = adapter->tx_ring[r_idx]->reg_idx; |
e8e26350 | 1419 | ixgbe_set_ivar(adapter, 1, j, v_idx); |
021230d4 | 1420 | r_idx = find_next_bit(q_vector->txr_idx, |
e8e9f696 JP |
1421 | adapter->num_tx_queues, |
1422 | r_idx + 1); | |
021230d4 AV |
1423 | } |
1424 | ||
021230d4 | 1425 | if (q_vector->txr_count && !q_vector->rxr_count) |
f7554a2b NS |
1426 | /* tx only */ |
1427 | q_vector->eitr = adapter->tx_eitr_param; | |
509ee935 | 1428 | else if (q_vector->rxr_count) |
f7554a2b NS |
1429 | /* rx or mixed */ |
1430 | q_vector->eitr = adapter->rx_eitr_param; | |
021230d4 | 1431 | |
fe49f04a | 1432 | ixgbe_write_eitr(q_vector); |
b25ebfd2 PW |
1433 | /* If Flow Director is enabled, set interrupt affinity */ |
1434 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | |
1435 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | |
1436 | /* | |
1437 | * Allocate the affinity_hint cpumask, assign the mask | |
1438 | * for this vector, and set our affinity_hint for | |
1439 | * this irq. | |
1440 | */ | |
1441 | if (!alloc_cpumask_var(&q_vector->affinity_mask, | |
1442 | GFP_KERNEL)) | |
1443 | return; | |
1444 | cpumask_set_cpu(v_idx, q_vector->affinity_mask); | |
1445 | irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, | |
1446 | q_vector->affinity_mask); | |
1447 | } | |
9a799d71 AK |
1448 | } |
1449 | ||
e8e26350 PW |
1450 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
1451 | ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, | |
e8e9f696 | 1452 | v_idx); |
e8e26350 PW |
1453 | else if (adapter->hw.mac.type == ixgbe_mac_82599EB) |
1454 | ixgbe_set_ivar(adapter, -1, 1, v_idx); | |
021230d4 AV |
1455 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); |
1456 | ||
41fb9248 | 1457 | /* set up to autoclear timer, and the vectors */ |
021230d4 | 1458 | mask = IXGBE_EIMS_ENABLE_MASK; |
1cdd1ec8 GR |
1459 | if (adapter->num_vfs) |
1460 | mask &= ~(IXGBE_EIMS_OTHER | | |
1461 | IXGBE_EIMS_MAILBOX | | |
1462 | IXGBE_EIMS_LSC); | |
1463 | else | |
1464 | mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); | |
021230d4 | 1465 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); |
9a799d71 AK |
1466 | } |
1467 | ||
f494e8fa AV |
1468 | enum latency_range { |
1469 | lowest_latency = 0, | |
1470 | low_latency = 1, | |
1471 | bulk_latency = 2, | |
1472 | latency_invalid = 255 | |
1473 | }; | |
1474 | ||
1475 | /** | |
1476 | * ixgbe_update_itr - update the dynamic ITR value based on statistics | |
1477 | * @adapter: pointer to adapter | |
1478 | * @eitr: eitr setting (ints per sec) to give last timeslice | |
1479 | * @itr_setting: current throttle rate in ints/second | |
1480 | * @packets: the number of packets during this measurement interval | |
1481 | * @bytes: the number of bytes during this measurement interval | |
1482 | * | |
1483 | * Stores a new ITR value based on packets and byte | |
1484 | * counts during the last interrupt. The advantage of per interrupt | |
1485 | * computation is faster updates and more accurate ITR for the current | |
1486 | * traffic pattern. Constants in this function were computed | |
1487 | * based on theoretical maximum wire speed and thresholds were set based | |
1488 | * on testing data as well as attempting to minimize response time | |
1489 | * while increasing bulk throughput. | |
1490 | * this functionality is controlled by the InterruptThrottleRate module | |
1491 | * parameter (see ixgbe_param.c) | |
1492 | **/ | |
1493 | static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, | |
e8e9f696 JP |
1494 | u32 eitr, u8 itr_setting, |
1495 | int packets, int bytes) | |
f494e8fa AV |
1496 | { |
1497 | unsigned int retval = itr_setting; | |
1498 | u32 timepassed_us; | |
1499 | u64 bytes_perint; | |
1500 | ||
1501 | if (packets == 0) | |
1502 | goto update_itr_done; | |
1503 | ||
1504 | ||
1505 | /* simple throttlerate management | |
1506 | * 0-20MB/s lowest (100000 ints/s) | |
1507 | * 20-100MB/s low (20000 ints/s) | |
1508 | * 100-1249MB/s bulk (8000 ints/s) | |
1509 | */ | |
1510 | /* what was last interrupt timeslice? */ | |
1511 | timepassed_us = 1000000/eitr; | |
1512 | bytes_perint = bytes / timepassed_us; /* bytes/usec */ | |
1513 | ||
1514 | switch (itr_setting) { | |
1515 | case lowest_latency: | |
1516 | if (bytes_perint > adapter->eitr_low) | |
1517 | retval = low_latency; | |
1518 | break; | |
1519 | case low_latency: | |
1520 | if (bytes_perint > adapter->eitr_high) | |
1521 | retval = bulk_latency; | |
1522 | else if (bytes_perint <= adapter->eitr_low) | |
1523 | retval = lowest_latency; | |
1524 | break; | |
1525 | case bulk_latency: | |
1526 | if (bytes_perint <= adapter->eitr_high) | |
1527 | retval = low_latency; | |
1528 | break; | |
1529 | } | |
1530 | ||
1531 | update_itr_done: | |
1532 | return retval; | |
1533 | } | |
1534 | ||
509ee935 JB |
1535 | /** |
1536 | * ixgbe_write_eitr - write EITR register in hardware specific way | |
fe49f04a | 1537 | * @q_vector: structure containing interrupt and ring information |
509ee935 JB |
1538 | * |
1539 | * This function is made to be called by ethtool and by the driver | |
1540 | * when it needs to update EITR registers at runtime. Hardware | |
1541 | * specific quirks/differences are taken care of here. | |
1542 | */ | |
fe49f04a | 1543 | void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) |
509ee935 | 1544 | { |
fe49f04a | 1545 | struct ixgbe_adapter *adapter = q_vector->adapter; |
509ee935 | 1546 | struct ixgbe_hw *hw = &adapter->hw; |
fe49f04a AD |
1547 | int v_idx = q_vector->v_idx; |
1548 | u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); | |
1549 | ||
509ee935 JB |
1550 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
1551 | /* must write high and low 16 bits to reset counter */ | |
1552 | itr_reg |= (itr_reg << 16); | |
1553 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | |
f8d1dcaf JB |
1554 | /* |
1555 | * 82599 can support a value of zero, so allow it for | |
1556 | * max interrupt rate, but there is an errata where it can | |
1557 | * not be zero with RSC | |
1558 | */ | |
1559 | if (itr_reg == 8 && | |
1560 | !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) | |
1561 | itr_reg = 0; | |
1562 | ||
509ee935 JB |
1563 | /* |
1564 | * set the WDIS bit to not clear the timer bits and cause an | |
1565 | * immediate assertion of the interrupt | |
1566 | */ | |
1567 | itr_reg |= IXGBE_EITR_CNT_WDIS; | |
1568 | } | |
1569 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); | |
1570 | } | |
1571 | ||
f494e8fa AV |
1572 | static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) |
1573 | { | |
1574 | struct ixgbe_adapter *adapter = q_vector->adapter; | |
f494e8fa AV |
1575 | u32 new_itr; |
1576 | u8 current_itr, ret_itr; | |
fe49f04a | 1577 | int i, r_idx; |
f494e8fa AV |
1578 | struct ixgbe_ring *rx_ring, *tx_ring; |
1579 | ||
1580 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | |
1581 | for (i = 0; i < q_vector->txr_count; i++) { | |
4a0b9ca0 | 1582 | tx_ring = adapter->tx_ring[r_idx]; |
f494e8fa | 1583 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
e8e9f696 JP |
1584 | q_vector->tx_itr, |
1585 | tx_ring->total_packets, | |
1586 | tx_ring->total_bytes); | |
f494e8fa AV |
1587 | /* if the result for this queue would decrease interrupt |
1588 | * rate for this vector then use that result */ | |
30efa5a3 | 1589 | q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? |
e8e9f696 | 1590 | q_vector->tx_itr - 1 : ret_itr); |
f494e8fa | 1591 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
e8e9f696 | 1592 | r_idx + 1); |
f494e8fa AV |
1593 | } |
1594 | ||
1595 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | |
1596 | for (i = 0; i < q_vector->rxr_count; i++) { | |
4a0b9ca0 | 1597 | rx_ring = adapter->rx_ring[r_idx]; |
f494e8fa | 1598 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
e8e9f696 JP |
1599 | q_vector->rx_itr, |
1600 | rx_ring->total_packets, | |
1601 | rx_ring->total_bytes); | |
f494e8fa AV |
1602 | /* if the result for this queue would decrease interrupt |
1603 | * rate for this vector then use that result */ | |
30efa5a3 | 1604 | q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? |
e8e9f696 | 1605 | q_vector->rx_itr - 1 : ret_itr); |
f494e8fa | 1606 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
e8e9f696 | 1607 | r_idx + 1); |
f494e8fa AV |
1608 | } |
1609 | ||
30efa5a3 | 1610 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
f494e8fa AV |
1611 | |
1612 | switch (current_itr) { | |
1613 | /* counts and packets in update_itr are dependent on these numbers */ | |
1614 | case lowest_latency: | |
1615 | new_itr = 100000; | |
1616 | break; | |
1617 | case low_latency: | |
1618 | new_itr = 20000; /* aka hwitr = ~200 */ | |
1619 | break; | |
1620 | case bulk_latency: | |
1621 | default: | |
1622 | new_itr = 8000; | |
1623 | break; | |
1624 | } | |
1625 | ||
1626 | if (new_itr != q_vector->eitr) { | |
fe49f04a AD |
1627 | /* do an exponential smoothing */ |
1628 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | |
509ee935 JB |
1629 | |
1630 | /* save the algorithm value here, not the smoothed one */ | |
1631 | q_vector->eitr = new_itr; | |
fe49f04a AD |
1632 | |
1633 | ixgbe_write_eitr(q_vector); | |
f494e8fa | 1634 | } |
f494e8fa AV |
1635 | } |
1636 | ||
119fc60a MC |
1637 | /** |
1638 | * ixgbe_check_overtemp_task - worker thread to check over tempurature | |
1639 | * @work: pointer to work_struct containing our data | |
1640 | **/ | |
1641 | static void ixgbe_check_overtemp_task(struct work_struct *work) | |
1642 | { | |
1643 | struct ixgbe_adapter *adapter = container_of(work, | |
e8e9f696 JP |
1644 | struct ixgbe_adapter, |
1645 | check_overtemp_task); | |
119fc60a MC |
1646 | struct ixgbe_hw *hw = &adapter->hw; |
1647 | u32 eicr = adapter->interrupt_event; | |
1648 | ||
7ca647bd JP |
1649 | if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) |
1650 | return; | |
1651 | ||
1652 | switch (hw->device_id) { | |
1653 | case IXGBE_DEV_ID_82599_T3_LOM: { | |
1654 | u32 autoneg; | |
1655 | bool link_up = false; | |
1656 | ||
1657 | if (hw->mac.ops.check_link) | |
1658 | hw->mac.ops.check_link(hw, &autoneg, &link_up, false); | |
1659 | ||
1660 | if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || | |
1661 | (eicr & IXGBE_EICR_LSC)) | |
1662 | /* Check if this is due to overtemp */ | |
1663 | if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) | |
1664 | break; | |
1665 | return; | |
1666 | } | |
1667 | default: | |
1668 | if (!(eicr & IXGBE_EICR_GPI_SDP0)) | |
119fc60a | 1669 | return; |
7ca647bd | 1670 | break; |
119fc60a | 1671 | } |
7ca647bd JP |
1672 | e_crit(drv, |
1673 | "Network adapter has been stopped because it has over heated. " | |
1674 | "Restart the computer. If the problem persists, " | |
1675 | "power off the system and replace the adapter\n"); | |
1676 | /* write to clear the interrupt */ | |
1677 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); | |
119fc60a MC |
1678 | } |
1679 | ||
0befdb3e JB |
1680 | static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) |
1681 | { | |
1682 | struct ixgbe_hw *hw = &adapter->hw; | |
1683 | ||
1684 | if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && | |
1685 | (eicr & IXGBE_EICR_GPI_SDP1)) { | |
396e799c | 1686 | e_crit(probe, "Fan has stopped, replace the adapter\n"); |
0befdb3e JB |
1687 | /* write to clear the interrupt */ |
1688 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); | |
1689 | } | |
1690 | } | |
cf8280ee | 1691 | |
e8e26350 PW |
1692 | static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) |
1693 | { | |
1694 | struct ixgbe_hw *hw = &adapter->hw; | |
1695 | ||
1696 | if (eicr & IXGBE_EICR_GPI_SDP1) { | |
1697 | /* Clear the interrupt */ | |
1698 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); | |
1699 | schedule_work(&adapter->multispeed_fiber_task); | |
1700 | } else if (eicr & IXGBE_EICR_GPI_SDP2) { | |
1701 | /* Clear the interrupt */ | |
1702 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); | |
1703 | schedule_work(&adapter->sfp_config_module_task); | |
1704 | } else { | |
1705 | /* Interrupt isn't for us... */ | |
1706 | return; | |
1707 | } | |
1708 | } | |
1709 | ||
cf8280ee JB |
1710 | static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) |
1711 | { | |
1712 | struct ixgbe_hw *hw = &adapter->hw; | |
1713 | ||
1714 | adapter->lsc_int++; | |
1715 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | |
1716 | adapter->link_check_timeout = jiffies; | |
1717 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | |
1718 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); | |
8a0717f3 | 1719 | IXGBE_WRITE_FLUSH(hw); |
cf8280ee JB |
1720 | schedule_work(&adapter->watchdog_task); |
1721 | } | |
1722 | } | |
1723 | ||
9a799d71 AK |
1724 | static irqreturn_t ixgbe_msix_lsc(int irq, void *data) |
1725 | { | |
1726 | struct net_device *netdev = data; | |
1727 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
1728 | struct ixgbe_hw *hw = &adapter->hw; | |
54037505 DS |
1729 | u32 eicr; |
1730 | ||
1731 | /* | |
1732 | * Workaround for Silicon errata. Use clear-by-write instead | |
1733 | * of clear-by-read. Reading with EICS will return the | |
1734 | * interrupt causes without clearing, which later be done | |
1735 | * with the write to EICR. | |
1736 | */ | |
1737 | eicr = IXGBE_READ_REG(hw, IXGBE_EICS); | |
1738 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); | |
9a799d71 | 1739 | |
cf8280ee JB |
1740 | if (eicr & IXGBE_EICR_LSC) |
1741 | ixgbe_check_lsc(adapter); | |
d4f80882 | 1742 | |
1cdd1ec8 GR |
1743 | if (eicr & IXGBE_EICR_MAILBOX) |
1744 | ixgbe_msg_task(adapter); | |
1745 | ||
e8e26350 PW |
1746 | if (hw->mac.type == ixgbe_mac_82598EB) |
1747 | ixgbe_check_fan_failure(adapter, eicr); | |
0befdb3e | 1748 | |
c4cf55e5 | 1749 | if (hw->mac.type == ixgbe_mac_82599EB) { |
e8e26350 | 1750 | ixgbe_check_sfp_event(adapter, eicr); |
119fc60a MC |
1751 | adapter->interrupt_event = eicr; |
1752 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | |
1753 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) | |
1754 | schedule_work(&adapter->check_overtemp_task); | |
c4cf55e5 PWJ |
1755 | |
1756 | /* Handle Flow Director Full threshold interrupt */ | |
1757 | if (eicr & IXGBE_EICR_FLOW_DIR) { | |
1758 | int i; | |
1759 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); | |
1760 | /* Disable transmits before FDIR Re-initialization */ | |
1761 | netif_tx_stop_all_queues(netdev); | |
1762 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
1763 | struct ixgbe_ring *tx_ring = | |
e8e9f696 | 1764 | adapter->tx_ring[i]; |
c4cf55e5 | 1765 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, |
e8e9f696 | 1766 | &tx_ring->reinit_state)) |
c4cf55e5 PWJ |
1767 | schedule_work(&adapter->fdir_reinit_task); |
1768 | } | |
1769 | } | |
1770 | } | |
d4f80882 AV |
1771 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1772 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | |
9a799d71 AK |
1773 | |
1774 | return IRQ_HANDLED; | |
1775 | } | |
1776 | ||
fe49f04a AD |
1777 | static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, |
1778 | u64 qmask) | |
1779 | { | |
1780 | u32 mask; | |
1781 | ||
1782 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | |
1783 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | |
1784 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | |
1785 | } else { | |
1786 | mask = (qmask & 0xFFFFFFFF); | |
1787 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); | |
1788 | mask = (qmask >> 32); | |
1789 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); | |
1790 | } | |
1791 | /* skip the flush */ | |
1792 | } | |
1793 | ||
1794 | static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, | |
e8e9f696 | 1795 | u64 qmask) |
fe49f04a AD |
1796 | { |
1797 | u32 mask; | |
1798 | ||
1799 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | |
1800 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | |
1801 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); | |
1802 | } else { | |
1803 | mask = (qmask & 0xFFFFFFFF); | |
1804 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); | |
1805 | mask = (qmask >> 32); | |
1806 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); | |
1807 | } | |
1808 | /* skip the flush */ | |
1809 | } | |
1810 | ||
9a799d71 AK |
1811 | static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) |
1812 | { | |
021230d4 AV |
1813 | struct ixgbe_q_vector *q_vector = data; |
1814 | struct ixgbe_adapter *adapter = q_vector->adapter; | |
3a581073 | 1815 | struct ixgbe_ring *tx_ring; |
021230d4 AV |
1816 | int i, r_idx; |
1817 | ||
1818 | if (!q_vector->txr_count) | |
1819 | return IRQ_HANDLED; | |
1820 | ||
1821 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | |
1822 | for (i = 0; i < q_vector->txr_count; i++) { | |
4a0b9ca0 | 1823 | tx_ring = adapter->tx_ring[r_idx]; |
3a581073 JB |
1824 | tx_ring->total_bytes = 0; |
1825 | tx_ring->total_packets = 0; | |
021230d4 | 1826 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
e8e9f696 | 1827 | r_idx + 1); |
021230d4 | 1828 | } |
9a799d71 | 1829 | |
9b471446 | 1830 | /* EIAM disabled interrupts (on this vector) for us */ |
91281fd3 AD |
1831 | napi_schedule(&q_vector->napi); |
1832 | ||
9a799d71 AK |
1833 | return IRQ_HANDLED; |
1834 | } | |
1835 | ||
021230d4 AV |
1836 | /** |
1837 | * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) | |
1838 | * @irq: unused | |
1839 | * @data: pointer to our q_vector struct for this interrupt vector | |
1840 | **/ | |
9a799d71 AK |
1841 | static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) |
1842 | { | |
021230d4 AV |
1843 | struct ixgbe_q_vector *q_vector = data; |
1844 | struct ixgbe_adapter *adapter = q_vector->adapter; | |
3a581073 | 1845 | struct ixgbe_ring *rx_ring; |
021230d4 | 1846 | int r_idx; |
30efa5a3 | 1847 | int i; |
021230d4 AV |
1848 | |
1849 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | |
30efa5a3 | 1850 | for (i = 0; i < q_vector->rxr_count; i++) { |
4a0b9ca0 | 1851 | rx_ring = adapter->rx_ring[r_idx]; |
30efa5a3 JB |
1852 | rx_ring->total_bytes = 0; |
1853 | rx_ring->total_packets = 0; | |
1854 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | |
e8e9f696 | 1855 | r_idx + 1); |
30efa5a3 JB |
1856 | } |
1857 | ||
021230d4 AV |
1858 | if (!q_vector->rxr_count) |
1859 | return IRQ_HANDLED; | |
1860 | ||
021230d4 | 1861 | /* disable interrupts on this vector only */ |
9b471446 | 1862 | /* EIAM disabled interrupts (on this vector) for us */ |
288379f0 | 1863 | napi_schedule(&q_vector->napi); |
021230d4 AV |
1864 | |
1865 | return IRQ_HANDLED; | |
1866 | } | |
1867 | ||
1868 | static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |
1869 | { | |
91281fd3 AD |
1870 | struct ixgbe_q_vector *q_vector = data; |
1871 | struct ixgbe_adapter *adapter = q_vector->adapter; | |
1872 | struct ixgbe_ring *ring; | |
1873 | int r_idx; | |
1874 | int i; | |
1875 | ||
1876 | if (!q_vector->txr_count && !q_vector->rxr_count) | |
1877 | return IRQ_HANDLED; | |
1878 | ||
1879 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | |
1880 | for (i = 0; i < q_vector->txr_count; i++) { | |
4a0b9ca0 | 1881 | ring = adapter->tx_ring[r_idx]; |
91281fd3 AD |
1882 | ring->total_bytes = 0; |
1883 | ring->total_packets = 0; | |
1884 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | |
e8e9f696 | 1885 | r_idx + 1); |
91281fd3 AD |
1886 | } |
1887 | ||
1888 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | |
1889 | for (i = 0; i < q_vector->rxr_count; i++) { | |
4a0b9ca0 | 1890 | ring = adapter->rx_ring[r_idx]; |
91281fd3 AD |
1891 | ring->total_bytes = 0; |
1892 | ring->total_packets = 0; | |
1893 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | |
e8e9f696 | 1894 | r_idx + 1); |
91281fd3 AD |
1895 | } |
1896 | ||
9b471446 | 1897 | /* EIAM disabled interrupts (on this vector) for us */ |
91281fd3 | 1898 | napi_schedule(&q_vector->napi); |
9a799d71 | 1899 | |
9a799d71 AK |
1900 | return IRQ_HANDLED; |
1901 | } | |
1902 | ||
021230d4 AV |
1903 | /** |
1904 | * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine | |
1905 | * @napi: napi struct with our devices info in it | |
1906 | * @budget: amount of work driver is allowed to do this pass, in packets | |
1907 | * | |
f0848276 JB |
1908 | * This function is optimized for cleaning one queue only on a single |
1909 | * q_vector!!! | |
021230d4 | 1910 | **/ |
9a799d71 AK |
1911 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) |
1912 | { | |
021230d4 | 1913 | struct ixgbe_q_vector *q_vector = |
e8e9f696 | 1914 | container_of(napi, struct ixgbe_q_vector, napi); |
021230d4 | 1915 | struct ixgbe_adapter *adapter = q_vector->adapter; |
f0848276 | 1916 | struct ixgbe_ring *rx_ring = NULL; |
9a799d71 | 1917 | int work_done = 0; |
021230d4 | 1918 | long r_idx; |
9a799d71 | 1919 | |
021230d4 | 1920 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
4a0b9ca0 | 1921 | rx_ring = adapter->rx_ring[r_idx]; |
5dd2d332 | 1922 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd | 1923 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
3a581073 | 1924 | ixgbe_update_rx_dca(adapter, rx_ring); |
bd0362dd | 1925 | #endif |
9a799d71 | 1926 | |
78b6f4ce | 1927 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); |
9a799d71 | 1928 | |
021230d4 AV |
1929 | /* If all Rx work done, exit the polling mode */ |
1930 | if (work_done < budget) { | |
288379f0 | 1931 | napi_complete(napi); |
f7554a2b | 1932 | if (adapter->rx_itr_setting & 1) |
f494e8fa | 1933 | ixgbe_set_itr_msix(q_vector); |
9a799d71 | 1934 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
fe49f04a | 1935 | ixgbe_irq_enable_queues(adapter, |
e8e9f696 | 1936 | ((u64)1 << q_vector->v_idx)); |
9a799d71 AK |
1937 | } |
1938 | ||
1939 | return work_done; | |
1940 | } | |
1941 | ||
f0848276 | 1942 | /** |
91281fd3 | 1943 | * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine |
f0848276 JB |
1944 | * @napi: napi struct with our devices info in it |
1945 | * @budget: amount of work driver is allowed to do this pass, in packets | |
1946 | * | |
1947 | * This function will clean more than one rx queue associated with a | |
1948 | * q_vector. | |
1949 | **/ | |
91281fd3 | 1950 | static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) |
f0848276 JB |
1951 | { |
1952 | struct ixgbe_q_vector *q_vector = | |
e8e9f696 | 1953 | container_of(napi, struct ixgbe_q_vector, napi); |
f0848276 | 1954 | struct ixgbe_adapter *adapter = q_vector->adapter; |
91281fd3 | 1955 | struct ixgbe_ring *ring = NULL; |
f0848276 JB |
1956 | int work_done = 0, i; |
1957 | long r_idx; | |
91281fd3 AD |
1958 | bool tx_clean_complete = true; |
1959 | ||
1960 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | |
1961 | for (i = 0; i < q_vector->txr_count; i++) { | |
4a0b9ca0 | 1962 | ring = adapter->tx_ring[r_idx]; |
91281fd3 AD |
1963 | #ifdef CONFIG_IXGBE_DCA |
1964 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | |
1965 | ixgbe_update_tx_dca(adapter, ring); | |
1966 | #endif | |
1967 | tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); | |
1968 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | |
e8e9f696 | 1969 | r_idx + 1); |
91281fd3 | 1970 | } |
f0848276 JB |
1971 | |
1972 | /* attempt to distribute budget to each queue fairly, but don't allow | |
1973 | * the budget to go below 1 because we'll exit polling */ | |
1974 | budget /= (q_vector->rxr_count ?: 1); | |
1975 | budget = max(budget, 1); | |
1976 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | |
1977 | for (i = 0; i < q_vector->rxr_count; i++) { | |
4a0b9ca0 | 1978 | ring = adapter->rx_ring[r_idx]; |
5dd2d332 | 1979 | #ifdef CONFIG_IXGBE_DCA |
f0848276 | 1980 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
91281fd3 | 1981 | ixgbe_update_rx_dca(adapter, ring); |
f0848276 | 1982 | #endif |
91281fd3 | 1983 | ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); |
f0848276 | 1984 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
e8e9f696 | 1985 | r_idx + 1); |
f0848276 JB |
1986 | } |
1987 | ||
1988 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | |
4a0b9ca0 | 1989 | ring = adapter->rx_ring[r_idx]; |
f0848276 | 1990 | /* If all Rx work done, exit the polling mode */ |
7f821875 | 1991 | if (work_done < budget) { |
288379f0 | 1992 | napi_complete(napi); |
f7554a2b | 1993 | if (adapter->rx_itr_setting & 1) |
f0848276 JB |
1994 | ixgbe_set_itr_msix(q_vector); |
1995 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | |
fe49f04a | 1996 | ixgbe_irq_enable_queues(adapter, |
e8e9f696 | 1997 | ((u64)1 << q_vector->v_idx)); |
f0848276 JB |
1998 | return 0; |
1999 | } | |
2000 | ||
2001 | return work_done; | |
2002 | } | |
91281fd3 AD |
2003 | |
2004 | /** | |
2005 | * ixgbe_clean_txonly - msix (aka one shot) tx clean routine | |
2006 | * @napi: napi struct with our devices info in it | |
2007 | * @budget: amount of work driver is allowed to do this pass, in packets | |
2008 | * | |
2009 | * This function is optimized for cleaning one queue only on a single | |
2010 | * q_vector!!! | |
2011 | **/ | |
2012 | static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | |
2013 | { | |
2014 | struct ixgbe_q_vector *q_vector = | |
e8e9f696 | 2015 | container_of(napi, struct ixgbe_q_vector, napi); |
91281fd3 AD |
2016 | struct ixgbe_adapter *adapter = q_vector->adapter; |
2017 | struct ixgbe_ring *tx_ring = NULL; | |
2018 | int work_done = 0; | |
2019 | long r_idx; | |
2020 | ||
2021 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | |
4a0b9ca0 | 2022 | tx_ring = adapter->tx_ring[r_idx]; |
91281fd3 AD |
2023 | #ifdef CONFIG_IXGBE_DCA |
2024 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | |
2025 | ixgbe_update_tx_dca(adapter, tx_ring); | |
2026 | #endif | |
2027 | ||
2028 | if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) | |
2029 | work_done = budget; | |
2030 | ||
f7554a2b | 2031 | /* If all Tx work done, exit the polling mode */ |
91281fd3 AD |
2032 | if (work_done < budget) { |
2033 | napi_complete(napi); | |
f7554a2b | 2034 | if (adapter->tx_itr_setting & 1) |
91281fd3 AD |
2035 | ixgbe_set_itr_msix(q_vector); |
2036 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | |
e8e9f696 JP |
2037 | ixgbe_irq_enable_queues(adapter, |
2038 | ((u64)1 << q_vector->v_idx)); | |
91281fd3 AD |
2039 | } |
2040 | ||
2041 | return work_done; | |
2042 | } | |
2043 | ||
021230d4 | 2044 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, |
e8e9f696 | 2045 | int r_idx) |
021230d4 | 2046 | { |
7a921c93 AD |
2047 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
2048 | ||
2049 | set_bit(r_idx, q_vector->rxr_idx); | |
2050 | q_vector->rxr_count++; | |
021230d4 AV |
2051 | } |
2052 | ||
2053 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | |
e8e9f696 | 2054 | int t_idx) |
021230d4 | 2055 | { |
7a921c93 AD |
2056 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
2057 | ||
2058 | set_bit(t_idx, q_vector->txr_idx); | |
2059 | q_vector->txr_count++; | |
021230d4 AV |
2060 | } |
2061 | ||
9a799d71 | 2062 | /** |
021230d4 AV |
2063 | * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors |
2064 | * @adapter: board private structure to initialize | |
2065 | * @vectors: allotted vector count for descriptor rings | |
9a799d71 | 2066 | * |
021230d4 AV |
2067 | * This function maps descriptor rings to the queue-specific vectors |
2068 | * we were allotted through the MSI-X enabling code. Ideally, we'd have | |
2069 | * one vector per ring/queue, but on a constrained vector budget, we | |
2070 | * group the rings as "efficiently" as possible. You would add new | |
2071 | * mapping configurations in here. | |
9a799d71 | 2072 | **/ |
021230d4 | 2073 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, |
e8e9f696 | 2074 | int vectors) |
021230d4 AV |
2075 | { |
2076 | int v_start = 0; | |
2077 | int rxr_idx = 0, txr_idx = 0; | |
2078 | int rxr_remaining = adapter->num_rx_queues; | |
2079 | int txr_remaining = adapter->num_tx_queues; | |
2080 | int i, j; | |
2081 | int rqpv, tqpv; | |
2082 | int err = 0; | |
2083 | ||
2084 | /* No mapping required if MSI-X is disabled. */ | |
2085 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | |
2086 | goto out; | |
9a799d71 | 2087 | |
021230d4 AV |
2088 | /* |
2089 | * The ideal configuration... | |
2090 | * We have enough vectors to map one per queue. | |
2091 | */ | |
2092 | if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { | |
2093 | for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) | |
2094 | map_vector_to_rxq(adapter, v_start, rxr_idx); | |
9a799d71 | 2095 | |
021230d4 AV |
2096 | for (; txr_idx < txr_remaining; v_start++, txr_idx++) |
2097 | map_vector_to_txq(adapter, v_start, txr_idx); | |
9a799d71 | 2098 | |
9a799d71 | 2099 | goto out; |
021230d4 | 2100 | } |
9a799d71 | 2101 | |
021230d4 AV |
2102 | /* |
2103 | * If we don't have enough vectors for a 1-to-1 | |
2104 | * mapping, we'll have to group them so there are | |
2105 | * multiple queues per vector. | |
2106 | */ | |
2107 | /* Re-adjusting *qpv takes care of the remainder. */ | |
2108 | for (i = v_start; i < vectors; i++) { | |
2109 | rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); | |
2110 | for (j = 0; j < rqpv; j++) { | |
2111 | map_vector_to_rxq(adapter, i, rxr_idx); | |
2112 | rxr_idx++; | |
2113 | rxr_remaining--; | |
2114 | } | |
2115 | } | |
2116 | for (i = v_start; i < vectors; i++) { | |
2117 | tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); | |
2118 | for (j = 0; j < tqpv; j++) { | |
2119 | map_vector_to_txq(adapter, i, txr_idx); | |
2120 | txr_idx++; | |
2121 | txr_remaining--; | |
9a799d71 | 2122 | } |
9a799d71 AK |
2123 | } |
2124 | ||
021230d4 AV |
2125 | out: |
2126 | return err; | |
2127 | } | |
2128 | ||
2129 | /** | |
2130 | * ixgbe_request_msix_irqs - Initialize MSI-X interrupts | |
2131 | * @adapter: board private structure | |
2132 | * | |
2133 | * ixgbe_request_msix_irqs allocates MSI-X vectors and requests | |
2134 | * interrupts from the kernel. | |
2135 | **/ | |
2136 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |
2137 | { | |
2138 | struct net_device *netdev = adapter->netdev; | |
2139 | irqreturn_t (*handler)(int, void *); | |
2140 | int i, vector, q_vectors, err; | |
e8e9f696 | 2141 | int ri = 0, ti = 0; |
021230d4 AV |
2142 | |
2143 | /* Decrement for Other and TCP Timer vectors */ | |
2144 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | |
2145 | ||
2146 | /* Map the Tx/Rx rings to the vectors we were allotted. */ | |
2147 | err = ixgbe_map_rings_to_vectors(adapter, q_vectors); | |
2148 | if (err) | |
2149 | goto out; | |
2150 | ||
2151 | #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ | |
e8e9f696 JP |
2152 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ |
2153 | &ixgbe_msix_clean_many) | |
021230d4 | 2154 | for (vector = 0; vector < q_vectors; vector++) { |
7a921c93 | 2155 | handler = SET_HANDLER(adapter->q_vector[vector]); |
cb13fc20 | 2156 | |
e8e9f696 | 2157 | if (handler == &ixgbe_msix_clean_rx) { |
cb13fc20 RO |
2158 | sprintf(adapter->name[vector], "%s-%s-%d", |
2159 | netdev->name, "rx", ri++); | |
e8e9f696 | 2160 | } else if (handler == &ixgbe_msix_clean_tx) { |
cb13fc20 RO |
2161 | sprintf(adapter->name[vector], "%s-%s-%d", |
2162 | netdev->name, "tx", ti++); | |
e8e9f696 | 2163 | } else |
cb13fc20 RO |
2164 | sprintf(adapter->name[vector], "%s-%s-%d", |
2165 | netdev->name, "TxRx", vector); | |
2166 | ||
021230d4 | 2167 | err = request_irq(adapter->msix_entries[vector].vector, |
e8e9f696 JP |
2168 | handler, 0, adapter->name[vector], |
2169 | adapter->q_vector[vector]); | |
9a799d71 | 2170 | if (err) { |
396e799c | 2171 | e_err(probe, "request_irq failed for MSIX interrupt " |
849c4542 | 2172 | "Error: %d\n", err); |
021230d4 | 2173 | goto free_queue_irqs; |
9a799d71 | 2174 | } |
9a799d71 AK |
2175 | } |
2176 | ||
021230d4 AV |
2177 | sprintf(adapter->name[vector], "%s:lsc", netdev->name); |
2178 | err = request_irq(adapter->msix_entries[vector].vector, | |
e8e9f696 | 2179 | ixgbe_msix_lsc, 0, adapter->name[vector], netdev); |
9a799d71 | 2180 | if (err) { |
396e799c | 2181 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); |
021230d4 | 2182 | goto free_queue_irqs; |
9a799d71 AK |
2183 | } |
2184 | ||
9a799d71 AK |
2185 | return 0; |
2186 | ||
021230d4 AV |
2187 | free_queue_irqs: |
2188 | for (i = vector - 1; i >= 0; i--) | |
2189 | free_irq(adapter->msix_entries[--vector].vector, | |
e8e9f696 | 2190 | adapter->q_vector[i]); |
021230d4 AV |
2191 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
2192 | pci_disable_msix(adapter->pdev); | |
9a799d71 AK |
2193 | kfree(adapter->msix_entries); |
2194 | adapter->msix_entries = NULL; | |
021230d4 | 2195 | out: |
9a799d71 AK |
2196 | return err; |
2197 | } | |
2198 | ||
f494e8fa AV |
2199 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) |
2200 | { | |
7a921c93 | 2201 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
f494e8fa AV |
2202 | u8 current_itr; |
2203 | u32 new_itr = q_vector->eitr; | |
4a0b9ca0 PW |
2204 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; |
2205 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; | |
f494e8fa | 2206 | |
30efa5a3 | 2207 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, |
e8e9f696 JP |
2208 | q_vector->tx_itr, |
2209 | tx_ring->total_packets, | |
2210 | tx_ring->total_bytes); | |
30efa5a3 | 2211 | q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, |
e8e9f696 JP |
2212 | q_vector->rx_itr, |
2213 | rx_ring->total_packets, | |
2214 | rx_ring->total_bytes); | |
f494e8fa | 2215 | |
30efa5a3 | 2216 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
f494e8fa AV |
2217 | |
2218 | switch (current_itr) { | |
2219 | /* counts and packets in update_itr are dependent on these numbers */ | |
2220 | case lowest_latency: | |
2221 | new_itr = 100000; | |
2222 | break; | |
2223 | case low_latency: | |
2224 | new_itr = 20000; /* aka hwitr = ~200 */ | |
2225 | break; | |
2226 | case bulk_latency: | |
2227 | new_itr = 8000; | |
2228 | break; | |
2229 | default: | |
2230 | break; | |
2231 | } | |
2232 | ||
2233 | if (new_itr != q_vector->eitr) { | |
fe49f04a AD |
2234 | /* do an exponential smoothing */ |
2235 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | |
509ee935 JB |
2236 | |
2237 | /* save the algorithm value here, not the smoothed one */ | |
2238 | q_vector->eitr = new_itr; | |
fe49f04a AD |
2239 | |
2240 | ixgbe_write_eitr(q_vector); | |
f494e8fa | 2241 | } |
f494e8fa AV |
2242 | } |
2243 | ||
79aefa45 AD |
2244 | /** |
2245 | * ixgbe_irq_enable - Enable default interrupt generation settings | |
2246 | * @adapter: board private structure | |
2247 | **/ | |
6af3b9eb ET |
2248 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, |
2249 | bool flush) | |
79aefa45 AD |
2250 | { |
2251 | u32 mask; | |
835462fc NS |
2252 | |
2253 | mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); | |
119fc60a MC |
2254 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) |
2255 | mask |= IXGBE_EIMS_GPI_SDP0; | |
6ab33d51 DM |
2256 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
2257 | mask |= IXGBE_EIMS_GPI_SDP1; | |
e8e26350 | 2258 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
2a41ff81 | 2259 | mask |= IXGBE_EIMS_ECC; |
e8e26350 PW |
2260 | mask |= IXGBE_EIMS_GPI_SDP1; |
2261 | mask |= IXGBE_EIMS_GPI_SDP2; | |
1cdd1ec8 GR |
2262 | if (adapter->num_vfs) |
2263 | mask |= IXGBE_EIMS_MAILBOX; | |
e8e26350 | 2264 | } |
c4cf55e5 PWJ |
2265 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
2266 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | |
2267 | mask |= IXGBE_EIMS_FLOW_DIR; | |
e8e26350 | 2268 | |
79aefa45 | 2269 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
6af3b9eb ET |
2270 | if (queues) |
2271 | ixgbe_irq_enable_queues(adapter, ~0); | |
2272 | if (flush) | |
2273 | IXGBE_WRITE_FLUSH(&adapter->hw); | |
1cdd1ec8 GR |
2274 | |
2275 | if (adapter->num_vfs > 32) { | |
2276 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; | |
2277 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); | |
2278 | } | |
79aefa45 | 2279 | } |
021230d4 | 2280 | |
9a799d71 | 2281 | /** |
021230d4 | 2282 | * ixgbe_intr - legacy mode Interrupt Handler |
9a799d71 AK |
2283 | * @irq: interrupt number |
2284 | * @data: pointer to a network interface device structure | |
9a799d71 AK |
2285 | **/ |
2286 | static irqreturn_t ixgbe_intr(int irq, void *data) | |
2287 | { | |
2288 | struct net_device *netdev = data; | |
2289 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
2290 | struct ixgbe_hw *hw = &adapter->hw; | |
7a921c93 | 2291 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
9a799d71 AK |
2292 | u32 eicr; |
2293 | ||
54037505 | 2294 | /* |
6af3b9eb | 2295 | * Workaround for silicon errata on 82598. Mask the interrupts |
54037505 DS |
2296 | * before the read of EICR. |
2297 | */ | |
2298 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); | |
2299 | ||
021230d4 AV |
2300 | /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read |
2301 | * therefore no explict interrupt disable is necessary */ | |
2302 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); | |
f47cf66e | 2303 | if (!eicr) { |
6af3b9eb ET |
2304 | /* |
2305 | * shared interrupt alert! | |
f47cf66e | 2306 | * make sure interrupts are enabled because the read will |
6af3b9eb ET |
2307 | * have disabled interrupts due to EIAM |
2308 | * finish the workaround of silicon errata on 82598. Unmask | |
2309 | * the interrupt that we masked before the EICR read. | |
2310 | */ | |
2311 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | |
2312 | ixgbe_irq_enable(adapter, true, true); | |
9a799d71 | 2313 | return IRQ_NONE; /* Not our interrupt */ |
f47cf66e | 2314 | } |
9a799d71 | 2315 | |
cf8280ee JB |
2316 | if (eicr & IXGBE_EICR_LSC) |
2317 | ixgbe_check_lsc(adapter); | |
021230d4 | 2318 | |
e8e26350 PW |
2319 | if (hw->mac.type == ixgbe_mac_82599EB) |
2320 | ixgbe_check_sfp_event(adapter, eicr); | |
2321 | ||
0befdb3e | 2322 | ixgbe_check_fan_failure(adapter, eicr); |
119fc60a MC |
2323 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && |
2324 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) | |
2325 | schedule_work(&adapter->check_overtemp_task); | |
0befdb3e | 2326 | |
7a921c93 | 2327 | if (napi_schedule_prep(&(q_vector->napi))) { |
4a0b9ca0 PW |
2328 | adapter->tx_ring[0]->total_packets = 0; |
2329 | adapter->tx_ring[0]->total_bytes = 0; | |
2330 | adapter->rx_ring[0]->total_packets = 0; | |
2331 | adapter->rx_ring[0]->total_bytes = 0; | |
021230d4 | 2332 | /* would disable interrupts here but EIAM disabled it */ |
7a921c93 | 2333 | __napi_schedule(&(q_vector->napi)); |
9a799d71 AK |
2334 | } |
2335 | ||
6af3b9eb ET |
2336 | /* |
2337 | * re-enable link(maybe) and non-queue interrupts, no flush. | |
2338 | * ixgbe_poll will re-enable the queue interrupts | |
2339 | */ | |
2340 | ||
2341 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | |
2342 | ixgbe_irq_enable(adapter, false, false); | |
2343 | ||
9a799d71 AK |
2344 | return IRQ_HANDLED; |
2345 | } | |
2346 | ||
021230d4 AV |
2347 | static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) |
2348 | { | |
2349 | int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | |
2350 | ||
2351 | for (i = 0; i < q_vectors; i++) { | |
7a921c93 | 2352 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; |
021230d4 AV |
2353 | bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); |
2354 | bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); | |
2355 | q_vector->rxr_count = 0; | |
2356 | q_vector->txr_count = 0; | |
2357 | } | |
2358 | } | |
2359 | ||
9a799d71 AK |
2360 | /** |
2361 | * ixgbe_request_irq - initialize interrupts | |
2362 | * @adapter: board private structure | |
2363 | * | |
2364 | * Attempts to configure interrupts using the best available | |
2365 | * capabilities of the hardware and kernel. | |
2366 | **/ | |
021230d4 | 2367 | static int ixgbe_request_irq(struct ixgbe_adapter *adapter) |
9a799d71 AK |
2368 | { |
2369 | struct net_device *netdev = adapter->netdev; | |
021230d4 | 2370 | int err; |
9a799d71 | 2371 | |
021230d4 AV |
2372 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
2373 | err = ixgbe_request_msix_irqs(adapter); | |
2374 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | |
a0607fd3 | 2375 | err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, |
e8e9f696 | 2376 | netdev->name, netdev); |
021230d4 | 2377 | } else { |
a0607fd3 | 2378 | err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, |
e8e9f696 | 2379 | netdev->name, netdev); |
9a799d71 AK |
2380 | } |
2381 | ||
9a799d71 | 2382 | if (err) |
396e799c | 2383 | e_err(probe, "request_irq failed, Error %d\n", err); |
9a799d71 | 2384 | |
9a799d71 AK |
2385 | return err; |
2386 | } | |
2387 | ||
2388 | static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |
2389 | { | |
2390 | struct net_device *netdev = adapter->netdev; | |
2391 | ||
2392 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | |
021230d4 | 2393 | int i, q_vectors; |
9a799d71 | 2394 | |
021230d4 AV |
2395 | q_vectors = adapter->num_msix_vectors; |
2396 | ||
2397 | i = q_vectors - 1; | |
9a799d71 | 2398 | free_irq(adapter->msix_entries[i].vector, netdev); |
9a799d71 | 2399 | |
021230d4 AV |
2400 | i--; |
2401 | for (; i >= 0; i--) { | |
2402 | free_irq(adapter->msix_entries[i].vector, | |
e8e9f696 | 2403 | adapter->q_vector[i]); |
021230d4 AV |
2404 | } |
2405 | ||
2406 | ixgbe_reset_q_vectors(adapter); | |
2407 | } else { | |
2408 | free_irq(adapter->pdev->irq, netdev); | |
9a799d71 AK |
2409 | } |
2410 | } | |
2411 | ||
22d5a71b JB |
2412 | /** |
2413 | * ixgbe_irq_disable - Mask off interrupt generation on the NIC | |
2414 | * @adapter: board private structure | |
2415 | **/ | |
2416 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |
2417 | { | |
835462fc NS |
2418 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
2419 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); | |
2420 | } else { | |
2421 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); | |
2422 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); | |
22d5a71b | 2423 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
1cdd1ec8 GR |
2424 | if (adapter->num_vfs > 32) |
2425 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); | |
22d5a71b JB |
2426 | } |
2427 | IXGBE_WRITE_FLUSH(&adapter->hw); | |
2428 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | |
2429 | int i; | |
2430 | for (i = 0; i < adapter->num_msix_vectors; i++) | |
2431 | synchronize_irq(adapter->msix_entries[i].vector); | |
2432 | } else { | |
2433 | synchronize_irq(adapter->pdev->irq); | |
2434 | } | |
2435 | } | |
2436 | ||
9a799d71 AK |
2437 | /** |
2438 | * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts | |
2439 | * | |
2440 | **/ | |
2441 | static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |
2442 | { | |
9a799d71 AK |
2443 | struct ixgbe_hw *hw = &adapter->hw; |
2444 | ||
021230d4 | 2445 | IXGBE_WRITE_REG(hw, IXGBE_EITR(0), |
e8e9f696 | 2446 | EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); |
9a799d71 | 2447 | |
e8e26350 PW |
2448 | ixgbe_set_ivar(adapter, 0, 0, 0); |
2449 | ixgbe_set_ivar(adapter, 1, 0, 0); | |
021230d4 AV |
2450 | |
2451 | map_vector_to_rxq(adapter, 0, 0); | |
2452 | map_vector_to_txq(adapter, 0, 0); | |
2453 | ||
396e799c | 2454 | e_info(hw, "Legacy interrupt IVAR setup done\n"); |
9a799d71 AK |
2455 | } |
2456 | ||
43e69bf0 AD |
2457 | /** |
2458 | * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset | |
2459 | * @adapter: board private structure | |
2460 | * @ring: structure containing ring specific data | |
2461 | * | |
2462 | * Configure the Tx descriptor ring after a reset. | |
2463 | **/ | |
84418e3b AD |
2464 | void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, |
2465 | struct ixgbe_ring *ring) | |
43e69bf0 AD |
2466 | { |
2467 | struct ixgbe_hw *hw = &adapter->hw; | |
2468 | u64 tdba = ring->dma; | |
2f1860b8 AD |
2469 | int wait_loop = 10; |
2470 | u32 txdctl; | |
43e69bf0 AD |
2471 | u16 reg_idx = ring->reg_idx; |
2472 | ||
2f1860b8 AD |
2473 | /* disable queue to avoid issues while updating state */ |
2474 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | |
2475 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), | |
2476 | txdctl & ~IXGBE_TXDCTL_ENABLE); | |
2477 | IXGBE_WRITE_FLUSH(hw); | |
2478 | ||
43e69bf0 | 2479 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), |
e8e9f696 | 2480 | (tdba & DMA_BIT_MASK(32))); |
43e69bf0 AD |
2481 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); |
2482 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), | |
2483 | ring->count * sizeof(union ixgbe_adv_tx_desc)); | |
2484 | IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); | |
2485 | IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); | |
2486 | ring->head = IXGBE_TDH(reg_idx); | |
2487 | ring->tail = IXGBE_TDT(reg_idx); | |
2488 | ||
2f1860b8 AD |
2489 | /* configure fetching thresholds */ |
2490 | if (adapter->rx_itr_setting == 0) { | |
2491 | /* cannot set wthresh when itr==0 */ | |
2492 | txdctl &= ~0x007F0000; | |
2493 | } else { | |
2494 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | |
2495 | txdctl |= (8 << 16); | |
2496 | } | |
2497 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | |
2498 | /* PThresh workaround for Tx hang with DFP enabled. */ | |
2499 | txdctl |= 32; | |
2500 | } | |
2501 | ||
2502 | /* reinitialize flowdirector state */ | |
2503 | set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); | |
2504 | ||
2505 | /* enable queue */ | |
2506 | txdctl |= IXGBE_TXDCTL_ENABLE; | |
2507 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); | |
2508 | ||
2509 | /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | |
2510 | if (hw->mac.type == ixgbe_mac_82598EB && | |
2511 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | |
2512 | return; | |
2513 | ||
2514 | /* poll to verify queue is enabled */ | |
2515 | do { | |
2516 | msleep(1); | |
2517 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | |
2518 | } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); | |
2519 | if (!wait_loop) | |
2520 | e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); | |
43e69bf0 AD |
2521 | } |
2522 | ||
120ff942 AD |
2523 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) |
2524 | { | |
2525 | struct ixgbe_hw *hw = &adapter->hw; | |
2526 | u32 rttdcs; | |
2527 | u32 mask; | |
2528 | ||
2529 | if (hw->mac.type == ixgbe_mac_82598EB) | |
2530 | return; | |
2531 | ||
2532 | /* disable the arbiter while setting MTQC */ | |
2533 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); | |
2534 | rttdcs |= IXGBE_RTTDCS_ARBDIS; | |
2535 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | |
2536 | ||
2537 | /* set transmit pool layout */ | |
2538 | mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); | |
2539 | switch (adapter->flags & mask) { | |
2540 | ||
2541 | case (IXGBE_FLAG_SRIOV_ENABLED): | |
2542 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | |
2543 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | |
2544 | break; | |
2545 | ||
2546 | case (IXGBE_FLAG_DCB_ENABLED): | |
2547 | /* We enable 8 traffic classes, DCB only */ | |
2548 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | |
2549 | (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); | |
2550 | break; | |
2551 | ||
2552 | default: | |
2553 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); | |
2554 | break; | |
2555 | } | |
2556 | ||
2557 | /* re-enable the arbiter */ | |
2558 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | |
2559 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | |
2560 | } | |
2561 | ||
9a799d71 | 2562 | /** |
3a581073 | 2563 | * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset |
9a799d71 AK |
2564 | * @adapter: board private structure |
2565 | * | |
2566 | * Configure the Tx unit of the MAC after a reset. | |
2567 | **/ | |
2568 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |
2569 | { | |
2f1860b8 AD |
2570 | struct ixgbe_hw *hw = &adapter->hw; |
2571 | u32 dmatxctl; | |
43e69bf0 | 2572 | u32 i; |
9a799d71 | 2573 | |
2f1860b8 AD |
2574 | ixgbe_setup_mtqc(adapter); |
2575 | ||
2576 | if (hw->mac.type != ixgbe_mac_82598EB) { | |
2577 | /* DMATXCTL.EN must be before Tx queues are enabled */ | |
2578 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | |
2579 | dmatxctl |= IXGBE_DMATXCTL_TE; | |
2580 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | |
2581 | } | |
2582 | ||
9a799d71 | 2583 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
43e69bf0 AD |
2584 | for (i = 0; i < adapter->num_tx_queues; i++) |
2585 | ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); | |
9a799d71 AK |
2586 | } |
2587 | ||
e8e26350 | 2588 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 |
cc41ac7c | 2589 | |
a6616b42 | 2590 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, |
e8e9f696 | 2591 | struct ixgbe_ring *rx_ring) |
cc41ac7c | 2592 | { |
cc41ac7c | 2593 | u32 srrctl; |
a6616b42 | 2594 | int index; |
0cefafad | 2595 | struct ixgbe_ring_feature *feature = adapter->ring_feature; |
3be1adfb | 2596 | |
a6616b42 YZ |
2597 | index = rx_ring->reg_idx; |
2598 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | |
2599 | unsigned long mask; | |
0cefafad | 2600 | mask = (unsigned long) feature[RING_F_RSS].mask; |
3be1adfb | 2601 | index = index & mask; |
cc41ac7c | 2602 | } |
cc41ac7c JB |
2603 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); |
2604 | ||
2605 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | |
2606 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | |
9e10e045 AD |
2607 | if (adapter->num_vfs) |
2608 | srrctl |= IXGBE_SRRCTL_DROP_EN; | |
cc41ac7c | 2609 | |
afafd5b0 AD |
2610 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
2611 | IXGBE_SRRCTL_BSIZEHDR_MASK; | |
2612 | ||
6e455b89 | 2613 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
afafd5b0 AD |
2614 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER |
2615 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | |
2616 | #else | |
2617 | srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | |
2618 | #endif | |
cc41ac7c | 2619 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
cc41ac7c | 2620 | } else { |
afafd5b0 AD |
2621 | srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> |
2622 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | |
cc41ac7c | 2623 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
cc41ac7c | 2624 | } |
e8e26350 | 2625 | |
cc41ac7c JB |
2626 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); |
2627 | } | |
9a799d71 | 2628 | |
05abb126 | 2629 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) |
0cefafad | 2630 | { |
05abb126 AD |
2631 | struct ixgbe_hw *hw = &adapter->hw; |
2632 | static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, | |
e8e9f696 JP |
2633 | 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, |
2634 | 0x6A3E67EA, 0x14364D17, 0x3BED200D}; | |
05abb126 AD |
2635 | u32 mrqc = 0, reta = 0; |
2636 | u32 rxcsum; | |
2637 | int i, j; | |
0cefafad JB |
2638 | int mask; |
2639 | ||
05abb126 AD |
2640 | /* Fill out hash function seeds */ |
2641 | for (i = 0; i < 10; i++) | |
2642 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); | |
2643 | ||
2644 | /* Fill out redirection table */ | |
2645 | for (i = 0, j = 0; i < 128; i++, j++) { | |
2646 | if (j == adapter->ring_feature[RING_F_RSS].indices) | |
2647 | j = 0; | |
2648 | /* reta = 4-byte sliding window of | |
2649 | * 0x00..(indices-1)(indices-1)00..etc. */ | |
2650 | reta = (reta << 8) | (j * 0x11); | |
2651 | if ((i & 3) == 3) | |
2652 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); | |
2653 | } | |
0cefafad | 2654 | |
05abb126 AD |
2655 | /* Disable indicating checksum in descriptor, enables RSS hash */ |
2656 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | |
2657 | rxcsum |= IXGBE_RXCSUM_PCSD; | |
2658 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | |
2659 | ||
2660 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | |
2661 | mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED; | |
2662 | else | |
2663 | mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | |
0cefafad | 2664 | #ifdef CONFIG_IXGBE_DCB |
05abb126 | 2665 | | IXGBE_FLAG_DCB_ENABLED |
0cefafad | 2666 | #endif |
05abb126 AD |
2667 | | IXGBE_FLAG_SRIOV_ENABLED |
2668 | ); | |
0cefafad JB |
2669 | |
2670 | switch (mask) { | |
2671 | case (IXGBE_FLAG_RSS_ENABLED): | |
2672 | mrqc = IXGBE_MRQC_RSSEN; | |
2673 | break; | |
1cdd1ec8 GR |
2674 | case (IXGBE_FLAG_SRIOV_ENABLED): |
2675 | mrqc = IXGBE_MRQC_VMDQEN; | |
2676 | break; | |
0cefafad JB |
2677 | #ifdef CONFIG_IXGBE_DCB |
2678 | case (IXGBE_FLAG_DCB_ENABLED): | |
2679 | mrqc = IXGBE_MRQC_RT8TCEN; | |
2680 | break; | |
2681 | #endif /* CONFIG_IXGBE_DCB */ | |
2682 | default: | |
2683 | break; | |
2684 | } | |
2685 | ||
05abb126 AD |
2686 | /* Perform hash on these packet types */ |
2687 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | |
2688 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | |
2689 | | IXGBE_MRQC_RSS_FIELD_IPV6 | |
2690 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | |
2691 | ||
2692 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | |
0cefafad JB |
2693 | } |
2694 | ||
bb5a9ad2 NS |
2695 | /** |
2696 | * ixgbe_configure_rscctl - enable RSC for the indicated ring | |
2697 | * @adapter: address of board private structure | |
2698 | * @index: index of ring to set | |
bb5a9ad2 | 2699 | **/ |
7367096a AD |
2700 | static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, |
2701 | struct ixgbe_ring *ring) | |
bb5a9ad2 | 2702 | { |
bb5a9ad2 | 2703 | struct ixgbe_hw *hw = &adapter->hw; |
bb5a9ad2 | 2704 | u32 rscctrl; |
edd2ea55 | 2705 | int rx_buf_len; |
7367096a AD |
2706 | u16 reg_idx = ring->reg_idx; |
2707 | ||
2708 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) | |
2709 | return; | |
bb5a9ad2 | 2710 | |
7367096a AD |
2711 | rx_buf_len = ring->rx_buf_len; |
2712 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); | |
bb5a9ad2 NS |
2713 | rscctrl |= IXGBE_RSCCTL_RSCEN; |
2714 | /* | |
2715 | * we must limit the number of descriptors so that the | |
2716 | * total size of max desc * buf_len is not greater | |
2717 | * than 65535 | |
2718 | */ | |
7367096a | 2719 | if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
bb5a9ad2 NS |
2720 | #if (MAX_SKB_FRAGS > 16) |
2721 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | |
2722 | #elif (MAX_SKB_FRAGS > 8) | |
2723 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; | |
2724 | #elif (MAX_SKB_FRAGS > 4) | |
2725 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | |
2726 | #else | |
2727 | rscctrl |= IXGBE_RSCCTL_MAXDESC_1; | |
2728 | #endif | |
2729 | } else { | |
2730 | if (rx_buf_len < IXGBE_RXBUFFER_4096) | |
2731 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | |
2732 | else if (rx_buf_len < IXGBE_RXBUFFER_8192) | |
2733 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; | |
2734 | else | |
2735 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | |
2736 | } | |
7367096a | 2737 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); |
bb5a9ad2 NS |
2738 | } |
2739 | ||
9e10e045 AD |
2740 | /** |
2741 | * ixgbe_set_uta - Set unicast filter table address | |
2742 | * @adapter: board private structure | |
2743 | * | |
2744 | * The unicast table address is a register array of 32-bit registers. | |
2745 | * The table is meant to be used in a way similar to how the MTA is used | |
2746 | * however due to certain limitations in the hardware it is necessary to | |
2747 | * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous | |
2748 | * enable bit to allow vlan tag stripping when promiscuous mode is enabled | |
2749 | **/ | |
2750 | static void ixgbe_set_uta(struct ixgbe_adapter *adapter) | |
2751 | { | |
2752 | struct ixgbe_hw *hw = &adapter->hw; | |
2753 | int i; | |
2754 | ||
2755 | /* The UTA table only exists on 82599 hardware and newer */ | |
2756 | if (hw->mac.type < ixgbe_mac_82599EB) | |
2757 | return; | |
2758 | ||
2759 | /* we only need to do this if VMDq is enabled */ | |
2760 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | |
2761 | return; | |
2762 | ||
2763 | for (i = 0; i < 128; i++) | |
2764 | IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); | |
2765 | } | |
2766 | ||
2767 | #define IXGBE_MAX_RX_DESC_POLL 10 | |
2768 | static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |
2769 | struct ixgbe_ring *ring) | |
2770 | { | |
2771 | struct ixgbe_hw *hw = &adapter->hw; | |
2772 | int reg_idx = ring->reg_idx; | |
2773 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | |
2774 | u32 rxdctl; | |
2775 | ||
2776 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | |
2777 | if (hw->mac.type == ixgbe_mac_82598EB && | |
2778 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | |
2779 | return; | |
2780 | ||
2781 | do { | |
2782 | msleep(1); | |
2783 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | |
2784 | } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); | |
2785 | ||
2786 | if (!wait_loop) { | |
2787 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " | |
2788 | "the polling period\n", reg_idx); | |
2789 | } | |
2790 | } | |
2791 | ||
84418e3b AD |
2792 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, |
2793 | struct ixgbe_ring *ring) | |
acd37177 AD |
2794 | { |
2795 | struct ixgbe_hw *hw = &adapter->hw; | |
2796 | u64 rdba = ring->dma; | |
9e10e045 | 2797 | u32 rxdctl; |
acd37177 AD |
2798 | u16 reg_idx = ring->reg_idx; |
2799 | ||
9e10e045 AD |
2800 | /* disable queue to avoid issues while updating state */ |
2801 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | |
2802 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), | |
2803 | rxdctl & ~IXGBE_RXDCTL_ENABLE); | |
2804 | IXGBE_WRITE_FLUSH(hw); | |
2805 | ||
acd37177 AD |
2806 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); |
2807 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); | |
2808 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), | |
2809 | ring->count * sizeof(union ixgbe_adv_rx_desc)); | |
2810 | IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); | |
2811 | IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); | |
2812 | ring->head = IXGBE_RDH(reg_idx); | |
2813 | ring->tail = IXGBE_RDT(reg_idx); | |
9e10e045 AD |
2814 | |
2815 | ixgbe_configure_srrctl(adapter, ring); | |
2816 | ixgbe_configure_rscctl(adapter, ring); | |
2817 | ||
2818 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
2819 | /* | |
2820 | * enable cache line friendly hardware writes: | |
2821 | * PTHRESH=32 descriptors (half the internal cache), | |
2822 | * this also removes ugly rx_no_buffer_count increment | |
2823 | * HTHRESH=4 descriptors (to minimize latency on fetch) | |
2824 | * WTHRESH=8 burst writeback up to two cache lines | |
2825 | */ | |
2826 | rxdctl &= ~0x3FFFFF; | |
2827 | rxdctl |= 0x080420; | |
2828 | } | |
2829 | ||
2830 | /* enable receive descriptor ring */ | |
2831 | rxdctl |= IXGBE_RXDCTL_ENABLE; | |
2832 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | |
2833 | ||
2834 | ixgbe_rx_desc_queue_enable(adapter, ring); | |
2835 | ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); | |
acd37177 AD |
2836 | } |
2837 | ||
48654521 AD |
2838 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) |
2839 | { | |
2840 | struct ixgbe_hw *hw = &adapter->hw; | |
2841 | int p; | |
2842 | ||
2843 | /* PSRTYPE must be initialized in non 82598 adapters */ | |
2844 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | | |
e8e9f696 JP |
2845 | IXGBE_PSRTYPE_UDPHDR | |
2846 | IXGBE_PSRTYPE_IPV4HDR | | |
48654521 | 2847 | IXGBE_PSRTYPE_L2HDR | |
e8e9f696 | 2848 | IXGBE_PSRTYPE_IPV6HDR; |
48654521 AD |
2849 | |
2850 | if (hw->mac.type == ixgbe_mac_82598EB) | |
2851 | return; | |
2852 | ||
2853 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) | |
2854 | psrtype |= (adapter->num_rx_queues_per_pool << 29); | |
2855 | ||
2856 | for (p = 0; p < adapter->num_rx_pools; p++) | |
2857 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), | |
2858 | psrtype); | |
2859 | } | |
2860 | ||
f5b4a52e AD |
2861 | static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) |
2862 | { | |
2863 | struct ixgbe_hw *hw = &adapter->hw; | |
2864 | u32 gcr_ext; | |
2865 | u32 vt_reg_bits; | |
2866 | u32 reg_offset, vf_shift; | |
2867 | u32 vmdctl; | |
2868 | ||
2869 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | |
2870 | return; | |
2871 | ||
2872 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | |
2873 | vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; | |
2874 | vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); | |
2875 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); | |
2876 | ||
2877 | vf_shift = adapter->num_vfs % 32; | |
2878 | reg_offset = (adapter->num_vfs > 32) ? 1 : 0; | |
2879 | ||
2880 | /* Enable only the PF's pool for Tx/Rx */ | |
2881 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); | |
2882 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); | |
2883 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); | |
2884 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); | |
2885 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | |
2886 | ||
2887 | /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ | |
2888 | hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); | |
2889 | ||
2890 | /* | |
2891 | * Set up VF register offsets for selected VT Mode, | |
2892 | * i.e. 32 or 64 VFs for SR-IOV | |
2893 | */ | |
2894 | gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | |
2895 | gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; | |
2896 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; | |
2897 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); | |
2898 | ||
2899 | /* enable Tx loopback for VF/PF communication */ | |
2900 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | |
2901 | } | |
2902 | ||
477de6ed | 2903 | static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) |
9a799d71 | 2904 | { |
9a799d71 AK |
2905 | struct ixgbe_hw *hw = &adapter->hw; |
2906 | struct net_device *netdev = adapter->netdev; | |
2907 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | |
7c6e0a43 | 2908 | int rx_buf_len; |
477de6ed AD |
2909 | struct ixgbe_ring *rx_ring; |
2910 | int i; | |
2911 | u32 mhadd, hlreg0; | |
48654521 | 2912 | |
9a799d71 | 2913 | /* Decide whether to use packet split mode or not */ |
1cdd1ec8 GR |
2914 | /* Do not use packet split if we're in SR-IOV Mode */ |
2915 | if (!adapter->num_vfs) | |
2916 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | |
9a799d71 AK |
2917 | |
2918 | /* Set the RX buffer length according to the mode */ | |
2919 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | |
7c6e0a43 | 2920 | rx_buf_len = IXGBE_RX_HDR_SIZE; |
9a799d71 | 2921 | } else { |
0c19d6af | 2922 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
f8212f97 | 2923 | (netdev->mtu <= ETH_DATA_LEN)) |
7c6e0a43 | 2924 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
9a799d71 | 2925 | else |
477de6ed | 2926 | rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); |
9a799d71 AK |
2927 | } |
2928 | ||
63f39bd1 | 2929 | #ifdef IXGBE_FCOE |
477de6ed AD |
2930 | /* adjust max frame to be able to do baby jumbo for FCoE */ |
2931 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | |
2932 | (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) | |
2933 | max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; | |
9a799d71 | 2934 | |
477de6ed AD |
2935 | #endif /* IXGBE_FCOE */ |
2936 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | |
2937 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { | |
2938 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | |
2939 | mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; | |
2940 | ||
2941 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); | |
2942 | } | |
2943 | ||
2944 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); | |
2945 | /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ | |
2946 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; | |
2947 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); | |
9a799d71 | 2948 | |
0cefafad JB |
2949 | /* |
2950 | * Setup the HW Rx Head and Tail Descriptor Pointers and | |
2951 | * the Base and Length of the Rx Descriptor Ring | |
2952 | */ | |
9a799d71 | 2953 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4a0b9ca0 | 2954 | rx_ring = adapter->rx_ring[i]; |
a6616b42 | 2955 | rx_ring->rx_buf_len = rx_buf_len; |
cc41ac7c | 2956 | |
6e455b89 YZ |
2957 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) |
2958 | rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; | |
1b3ff02e PWJ |
2959 | else |
2960 | rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; | |
cc41ac7c | 2961 | |
63f39bd1 | 2962 | #ifdef IXGBE_FCOE |
e8e9f696 | 2963 | if (netdev->features & NETIF_F_FCOE_MTU) { |
63f39bd1 YZ |
2964 | struct ixgbe_ring_feature *f; |
2965 | f = &adapter->ring_feature[RING_F_FCOE]; | |
6e455b89 YZ |
2966 | if ((i >= f->mask) && (i < f->mask + f->indices)) { |
2967 | rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; | |
2968 | if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) | |
2969 | rx_ring->rx_buf_len = | |
e8e9f696 | 2970 | IXGBE_FCOE_JUMBO_FRAME_SIZE; |
6e455b89 | 2971 | } |
63f39bd1 | 2972 | } |
63f39bd1 | 2973 | #endif /* IXGBE_FCOE */ |
477de6ed AD |
2974 | } |
2975 | ||
2976 | } | |
2977 | ||
7367096a AD |
2978 | static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) |
2979 | { | |
2980 | struct ixgbe_hw *hw = &adapter->hw; | |
2981 | u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | |
2982 | ||
2983 | switch (hw->mac.type) { | |
2984 | case ixgbe_mac_82598EB: | |
2985 | /* | |
2986 | * For VMDq support of different descriptor types or | |
2987 | * buffer sizes through the use of multiple SRRCTL | |
2988 | * registers, RDRXCTL.MVMEN must be set to 1 | |
2989 | * | |
2990 | * also, the manual doesn't mention it clearly but DCA hints | |
2991 | * will only use queue 0's tags unless this bit is set. Side | |
2992 | * effects of setting this bit are only that SRRCTL must be | |
2993 | * fully programmed [0..15] | |
2994 | */ | |
2995 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; | |
2996 | break; | |
2997 | case ixgbe_mac_82599EB: | |
2998 | /* Disable RSC for ACK packets */ | |
2999 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, | |
3000 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); | |
3001 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; | |
3002 | /* hardware requires some bits to be set by default */ | |
3003 | rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); | |
3004 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; | |
3005 | break; | |
3006 | default: | |
3007 | /* We should do nothing since we don't know this hardware */ | |
3008 | return; | |
3009 | } | |
3010 | ||
3011 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | |
3012 | } | |
3013 | ||
477de6ed AD |
3014 | /** |
3015 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset | |
3016 | * @adapter: board private structure | |
3017 | * | |
3018 | * Configure the Rx unit of the MAC after a reset. | |
3019 | **/ | |
3020 | static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |
3021 | { | |
3022 | struct ixgbe_hw *hw = &adapter->hw; | |
477de6ed AD |
3023 | int i; |
3024 | u32 rxctrl; | |
477de6ed AD |
3025 | |
3026 | /* disable receives while setting up the descriptors */ | |
3027 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | |
3028 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | |
3029 | ||
3030 | ixgbe_setup_psrtype(adapter); | |
7367096a | 3031 | ixgbe_setup_rdrxctl(adapter); |
477de6ed | 3032 | |
9e10e045 | 3033 | /* Program registers for the distribution of queues */ |
f5b4a52e | 3034 | ixgbe_setup_mrqc(adapter); |
f5b4a52e | 3035 | |
9e10e045 AD |
3036 | ixgbe_set_uta(adapter); |
3037 | ||
477de6ed AD |
3038 | /* set_rx_buffer_len must be called before ring initialization */ |
3039 | ixgbe_set_rx_buffer_len(adapter); | |
3040 | ||
3041 | /* | |
3042 | * Setup the HW Rx Head and Tail Descriptor Pointers and | |
3043 | * the Base and Length of the Rx Descriptor Ring | |
3044 | */ | |
9e10e045 AD |
3045 | for (i = 0; i < adapter->num_rx_queues; i++) |
3046 | ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); | |
177db6ff | 3047 | |
9e10e045 AD |
3048 | /* disable drop enable for 82598 parts */ |
3049 | if (hw->mac.type == ixgbe_mac_82598EB) | |
3050 | rxctrl |= IXGBE_RXCTRL_DMBYPS; | |
3051 | ||
3052 | /* enable all receives */ | |
3053 | rxctrl |= IXGBE_RXCTRL_RXEN; | |
3054 | hw->mac.ops.enable_rx_dma(hw, rxctrl); | |
9a799d71 AK |
3055 | } |
3056 | ||
068c89b0 DS |
3057 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
3058 | { | |
3059 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
3060 | struct ixgbe_hw *hw = &adapter->hw; | |
1ada1b1b | 3061 | int pool_ndx = adapter->num_vfs; |
068c89b0 DS |
3062 | |
3063 | /* add VID to filter table */ | |
1ada1b1b | 3064 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); |
f62bbb5e | 3065 | set_bit(vid, adapter->active_vlans); |
068c89b0 DS |
3066 | } |
3067 | ||
3068 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
3069 | { | |
3070 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
3071 | struct ixgbe_hw *hw = &adapter->hw; | |
1ada1b1b | 3072 | int pool_ndx = adapter->num_vfs; |
068c89b0 | 3073 | |
068c89b0 | 3074 | /* remove VID from filter table */ |
1ada1b1b | 3075 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); |
f62bbb5e | 3076 | clear_bit(vid, adapter->active_vlans); |
068c89b0 DS |
3077 | } |
3078 | ||
5f6c0181 JB |
3079 | /** |
3080 | * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering | |
3081 | * @adapter: driver data | |
3082 | */ | |
3083 | static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) | |
3084 | { | |
3085 | struct ixgbe_hw *hw = &adapter->hw; | |
f62bbb5e JG |
3086 | u32 vlnctrl; |
3087 | ||
3088 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
3089 | vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); | |
3090 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | |
3091 | } | |
3092 | ||
3093 | /** | |
3094 | * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering | |
3095 | * @adapter: driver data | |
3096 | */ | |
3097 | static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) | |
3098 | { | |
3099 | struct ixgbe_hw *hw = &adapter->hw; | |
3100 | u32 vlnctrl; | |
3101 | ||
3102 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
3103 | vlnctrl |= IXGBE_VLNCTRL_VFE; | |
3104 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | |
3105 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | |
3106 | } | |
3107 | ||
3108 | /** | |
3109 | * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping | |
3110 | * @adapter: driver data | |
3111 | */ | |
3112 | static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) | |
3113 | { | |
3114 | struct ixgbe_hw *hw = &adapter->hw; | |
3115 | u32 vlnctrl; | |
5f6c0181 JB |
3116 | int i, j; |
3117 | ||
3118 | switch (hw->mac.type) { | |
3119 | case ixgbe_mac_82598EB: | |
f62bbb5e JG |
3120 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
3121 | vlnctrl &= ~IXGBE_VLNCTRL_VME; | |
5f6c0181 JB |
3122 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
3123 | break; | |
3124 | case ixgbe_mac_82599EB: | |
5f6c0181 JB |
3125 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3126 | j = adapter->rx_ring[i]->reg_idx; | |
3127 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | |
3128 | vlnctrl &= ~IXGBE_RXDCTL_VME; | |
3129 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); | |
3130 | } | |
3131 | break; | |
3132 | default: | |
3133 | break; | |
3134 | } | |
3135 | } | |
3136 | ||
3137 | /** | |
f62bbb5e | 3138 | * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping |
5f6c0181 JB |
3139 | * @adapter: driver data |
3140 | */ | |
f62bbb5e | 3141 | static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) |
5f6c0181 JB |
3142 | { |
3143 | struct ixgbe_hw *hw = &adapter->hw; | |
f62bbb5e | 3144 | u32 vlnctrl; |
5f6c0181 JB |
3145 | int i, j; |
3146 | ||
3147 | switch (hw->mac.type) { | |
3148 | case ixgbe_mac_82598EB: | |
f62bbb5e JG |
3149 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
3150 | vlnctrl |= IXGBE_VLNCTRL_VME; | |
5f6c0181 JB |
3151 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
3152 | break; | |
3153 | case ixgbe_mac_82599EB: | |
5f6c0181 JB |
3154 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3155 | j = adapter->rx_ring[i]->reg_idx; | |
3156 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | |
3157 | vlnctrl |= IXGBE_RXDCTL_VME; | |
3158 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); | |
3159 | } | |
3160 | break; | |
3161 | default: | |
3162 | break; | |
3163 | } | |
3164 | } | |
3165 | ||
9a799d71 AK |
3166 | static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) |
3167 | { | |
f62bbb5e | 3168 | u16 vid; |
9a799d71 | 3169 | |
f62bbb5e JG |
3170 | ixgbe_vlan_rx_add_vid(adapter->netdev, 0); |
3171 | ||
3172 | for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) | |
3173 | ixgbe_vlan_rx_add_vid(adapter->netdev, vid); | |
9a799d71 AK |
3174 | } |
3175 | ||
2850062a AD |
3176 | /** |
3177 | * ixgbe_write_uc_addr_list - write unicast addresses to RAR table | |
3178 | * @netdev: network interface device structure | |
3179 | * | |
3180 | * Writes unicast address list to the RAR table. | |
3181 | * Returns: -ENOMEM on failure/insufficient address space | |
3182 | * 0 on no addresses written | |
3183 | * X on writing X addresses to the RAR table | |
3184 | **/ | |
3185 | static int ixgbe_write_uc_addr_list(struct net_device *netdev) | |
3186 | { | |
3187 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
3188 | struct ixgbe_hw *hw = &adapter->hw; | |
3189 | unsigned int vfn = adapter->num_vfs; | |
3190 | unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1); | |
3191 | int count = 0; | |
3192 | ||
3193 | /* return ENOMEM indicating insufficient memory for addresses */ | |
3194 | if (netdev_uc_count(netdev) > rar_entries) | |
3195 | return -ENOMEM; | |
3196 | ||
3197 | if (!netdev_uc_empty(netdev) && rar_entries) { | |
3198 | struct netdev_hw_addr *ha; | |
3199 | /* return error if we do not support writing to RAR table */ | |
3200 | if (!hw->mac.ops.set_rar) | |
3201 | return -ENOMEM; | |
3202 | ||
3203 | netdev_for_each_uc_addr(ha, netdev) { | |
3204 | if (!rar_entries) | |
3205 | break; | |
3206 | hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, | |
3207 | vfn, IXGBE_RAH_AV); | |
3208 | count++; | |
3209 | } | |
3210 | } | |
3211 | /* write the addresses in reverse order to avoid write combining */ | |
3212 | for (; rar_entries > 0 ; rar_entries--) | |
3213 | hw->mac.ops.clear_rar(hw, rar_entries); | |
3214 | ||
3215 | return count; | |
3216 | } | |
3217 | ||
9a799d71 | 3218 | /** |
2c5645cf | 3219 | * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set |
9a799d71 AK |
3220 | * @netdev: network interface device structure |
3221 | * | |
2c5645cf CL |
3222 | * The set_rx_method entry point is called whenever the unicast/multicast |
3223 | * address list or the network interface flags are updated. This routine is | |
3224 | * responsible for configuring the hardware for proper unicast, multicast and | |
3225 | * promiscuous mode. | |
9a799d71 | 3226 | **/ |
7f870475 | 3227 | void ixgbe_set_rx_mode(struct net_device *netdev) |
9a799d71 AK |
3228 | { |
3229 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
3230 | struct ixgbe_hw *hw = &adapter->hw; | |
2850062a AD |
3231 | u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; |
3232 | int count; | |
9a799d71 AK |
3233 | |
3234 | /* Check for Promiscuous and All Multicast modes */ | |
3235 | ||
3236 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
3237 | ||
f5dc442b AD |
3238 | /* set all bits that we expect to always be set */ |
3239 | fctrl |= IXGBE_FCTRL_BAM; | |
3240 | fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ | |
3241 | fctrl |= IXGBE_FCTRL_PMCF; | |
3242 | ||
2850062a AD |
3243 | /* clear the bits we are changing the status of */ |
3244 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | |
3245 | ||
9a799d71 | 3246 | if (netdev->flags & IFF_PROMISC) { |
e433ea1f | 3247 | hw->addr_ctrl.user_set_promisc = true; |
9a799d71 | 3248 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
2850062a | 3249 | vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); |
5f6c0181 JB |
3250 | /* don't hardware filter vlans in promisc mode */ |
3251 | ixgbe_vlan_filter_disable(adapter); | |
9a799d71 | 3252 | } else { |
746b9f02 PM |
3253 | if (netdev->flags & IFF_ALLMULTI) { |
3254 | fctrl |= IXGBE_FCTRL_MPE; | |
2850062a AD |
3255 | vmolr |= IXGBE_VMOLR_MPE; |
3256 | } else { | |
3257 | /* | |
3258 | * Write addresses to the MTA, if the attempt fails | |
3259 | * then we should just turn on promiscous mode so | |
3260 | * that we can at least receive multicast traffic | |
3261 | */ | |
3262 | hw->mac.ops.update_mc_addr_list(hw, netdev); | |
3263 | vmolr |= IXGBE_VMOLR_ROMPE; | |
746b9f02 | 3264 | } |
5f6c0181 | 3265 | ixgbe_vlan_filter_enable(adapter); |
e433ea1f | 3266 | hw->addr_ctrl.user_set_promisc = false; |
2850062a AD |
3267 | /* |
3268 | * Write addresses to available RAR registers, if there is not | |
3269 | * sufficient space to store all the addresses then enable | |
3270 | * unicast promiscous mode | |
3271 | */ | |
3272 | count = ixgbe_write_uc_addr_list(netdev); | |
3273 | if (count < 0) { | |
3274 | fctrl |= IXGBE_FCTRL_UPE; | |
3275 | vmolr |= IXGBE_VMOLR_ROPE; | |
3276 | } | |
9a799d71 AK |
3277 | } |
3278 | ||
2850062a | 3279 | if (adapter->num_vfs) { |
1cdd1ec8 | 3280 | ixgbe_restore_vf_multicasts(adapter); |
2850062a AD |
3281 | vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & |
3282 | ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | | |
3283 | IXGBE_VMOLR_ROPE); | |
3284 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); | |
3285 | } | |
3286 | ||
3287 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | |
f62bbb5e JG |
3288 | |
3289 | if (netdev->features & NETIF_F_HW_VLAN_RX) | |
3290 | ixgbe_vlan_strip_enable(adapter); | |
3291 | else | |
3292 | ixgbe_vlan_strip_disable(adapter); | |
9a799d71 AK |
3293 | } |
3294 | ||
021230d4 AV |
3295 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) |
3296 | { | |
3297 | int q_idx; | |
3298 | struct ixgbe_q_vector *q_vector; | |
3299 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | |
3300 | ||
3301 | /* legacy and MSI only use one vector */ | |
3302 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | |
3303 | q_vectors = 1; | |
3304 | ||
3305 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | |
f0848276 | 3306 | struct napi_struct *napi; |
7a921c93 | 3307 | q_vector = adapter->q_vector[q_idx]; |
f0848276 | 3308 | napi = &q_vector->napi; |
91281fd3 AD |
3309 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
3310 | if (!q_vector->rxr_count || !q_vector->txr_count) { | |
3311 | if (q_vector->txr_count == 1) | |
3312 | napi->poll = &ixgbe_clean_txonly; | |
3313 | else if (q_vector->rxr_count == 1) | |
3314 | napi->poll = &ixgbe_clean_rxonly; | |
3315 | } | |
3316 | } | |
f0848276 JB |
3317 | |
3318 | napi_enable(napi); | |
021230d4 AV |
3319 | } |
3320 | } | |
3321 | ||
3322 | static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) | |
3323 | { | |
3324 | int q_idx; | |
3325 | struct ixgbe_q_vector *q_vector; | |
3326 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | |
3327 | ||
3328 | /* legacy and MSI only use one vector */ | |
3329 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | |
3330 | q_vectors = 1; | |
3331 | ||
3332 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | |
7a921c93 | 3333 | q_vector = adapter->q_vector[q_idx]; |
021230d4 AV |
3334 | napi_disable(&q_vector->napi); |
3335 | } | |
3336 | } | |
3337 | ||
7a6b6f51 | 3338 | #ifdef CONFIG_IXGBE_DCB |
2f90b865 AD |
3339 | /* |
3340 | * ixgbe_configure_dcb - Configure DCB hardware | |
3341 | * @adapter: ixgbe adapter struct | |
3342 | * | |
3343 | * This is called by the driver on open to configure the DCB hardware. | |
3344 | * This is also called by the gennetlink interface when reconfiguring | |
3345 | * the DCB state. | |
3346 | */ | |
3347 | static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |
3348 | { | |
3349 | struct ixgbe_hw *hw = &adapter->hw; | |
5f6c0181 | 3350 | u32 txdctl; |
2f90b865 AD |
3351 | int i, j; |
3352 | ||
67ebd791 AD |
3353 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { |
3354 | if (hw->mac.type == ixgbe_mac_82598EB) | |
3355 | netif_set_gso_max_size(adapter->netdev, 65536); | |
3356 | return; | |
3357 | } | |
3358 | ||
3359 | if (hw->mac.type == ixgbe_mac_82598EB) | |
3360 | netif_set_gso_max_size(adapter->netdev, 32768); | |
3361 | ||
2f90b865 AD |
3362 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); |
3363 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); | |
3364 | ||
3365 | /* reconfigure the hardware */ | |
3366 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); | |
3367 | ||
3368 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
4a0b9ca0 | 3369 | j = adapter->tx_ring[i]->reg_idx; |
2f90b865 AD |
3370 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
3371 | /* PThresh workaround for Tx hang with DFP enabled. */ | |
3372 | txdctl |= 32; | |
3373 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | |
3374 | } | |
3375 | /* Enable VLAN tag insert/strip */ | |
f62bbb5e | 3376 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; |
5f6c0181 | 3377 | |
2f90b865 AD |
3378 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
3379 | } | |
3380 | ||
3381 | #endif | |
9a799d71 AK |
3382 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
3383 | { | |
3384 | struct net_device *netdev = adapter->netdev; | |
c4cf55e5 | 3385 | struct ixgbe_hw *hw = &adapter->hw; |
9a799d71 AK |
3386 | int i; |
3387 | ||
7a6b6f51 | 3388 | #ifdef CONFIG_IXGBE_DCB |
67ebd791 | 3389 | ixgbe_configure_dcb(adapter); |
2f90b865 | 3390 | #endif |
9a799d71 | 3391 | |
f62bbb5e JG |
3392 | ixgbe_set_rx_mode(netdev); |
3393 | ixgbe_restore_vlan(adapter); | |
3394 | ||
eacd73f7 YZ |
3395 | #ifdef IXGBE_FCOE |
3396 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | |
3397 | ixgbe_configure_fcoe(adapter); | |
3398 | ||
3399 | #endif /* IXGBE_FCOE */ | |
c4cf55e5 PWJ |
3400 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
3401 | for (i = 0; i < adapter->num_tx_queues; i++) | |
4a0b9ca0 | 3402 | adapter->tx_ring[i]->atr_sample_rate = |
e8e9f696 | 3403 | adapter->atr_sample_rate; |
c4cf55e5 PWJ |
3404 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); |
3405 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | |
3406 | ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); | |
3407 | } | |
933d41f1 | 3408 | ixgbe_configure_virtualization(adapter); |
c4cf55e5 | 3409 | |
9a799d71 AK |
3410 | ixgbe_configure_tx(adapter); |
3411 | ixgbe_configure_rx(adapter); | |
9a799d71 AK |
3412 | } |
3413 | ||
e8e26350 PW |
3414 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) |
3415 | { | |
3416 | switch (hw->phy.type) { | |
3417 | case ixgbe_phy_sfp_avago: | |
3418 | case ixgbe_phy_sfp_ftl: | |
3419 | case ixgbe_phy_sfp_intel: | |
3420 | case ixgbe_phy_sfp_unknown: | |
ea0a04df DS |
3421 | case ixgbe_phy_sfp_passive_tyco: |
3422 | case ixgbe_phy_sfp_passive_unknown: | |
3423 | case ixgbe_phy_sfp_active_unknown: | |
3424 | case ixgbe_phy_sfp_ftl_active: | |
e8e26350 PW |
3425 | return true; |
3426 | default: | |
3427 | return false; | |
3428 | } | |
3429 | } | |
3430 | ||
0ecc061d | 3431 | /** |
e8e26350 PW |
3432 | * ixgbe_sfp_link_config - set up SFP+ link |
3433 | * @adapter: pointer to private adapter struct | |
3434 | **/ | |
3435 | static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | |
3436 | { | |
3437 | struct ixgbe_hw *hw = &adapter->hw; | |
3438 | ||
3439 | if (hw->phy.multispeed_fiber) { | |
3440 | /* | |
3441 | * In multispeed fiber setups, the device may not have | |
3442 | * had a physical connection when the driver loaded. | |
3443 | * If that's the case, the initial link configuration | |
3444 | * couldn't get the MAC into 10G or 1G mode, so we'll | |
3445 | * never have a link status change interrupt fire. | |
3446 | * We need to try and force an autonegotiation | |
3447 | * session, then bring up link. | |
3448 | */ | |
3449 | hw->mac.ops.setup_sfp(hw); | |
3450 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | |
3451 | schedule_work(&adapter->multispeed_fiber_task); | |
3452 | } else { | |
3453 | /* | |
3454 | * Direct Attach Cu and non-multispeed fiber modules | |
3455 | * still need to be configured properly prior to | |
3456 | * attempting link. | |
3457 | */ | |
3458 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK)) | |
3459 | schedule_work(&adapter->sfp_config_module_task); | |
3460 | } | |
3461 | } | |
3462 | ||
3463 | /** | |
3464 | * ixgbe_non_sfp_link_config - set up non-SFP+ link | |
0ecc061d PWJ |
3465 | * @hw: pointer to private hardware struct |
3466 | * | |
3467 | * Returns 0 on success, negative on failure | |
3468 | **/ | |
e8e26350 | 3469 | static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) |
0ecc061d PWJ |
3470 | { |
3471 | u32 autoneg; | |
8620a103 | 3472 | bool negotiation, link_up = false; |
0ecc061d PWJ |
3473 | u32 ret = IXGBE_ERR_LINK_SETUP; |
3474 | ||
3475 | if (hw->mac.ops.check_link) | |
3476 | ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); | |
3477 | ||
3478 | if (ret) | |
3479 | goto link_cfg_out; | |
3480 | ||
3481 | if (hw->mac.ops.get_link_capabilities) | |
e8e9f696 JP |
3482 | ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, |
3483 | &negotiation); | |
0ecc061d PWJ |
3484 | if (ret) |
3485 | goto link_cfg_out; | |
3486 | ||
8620a103 MC |
3487 | if (hw->mac.ops.setup_link) |
3488 | ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); | |
0ecc061d PWJ |
3489 | link_cfg_out: |
3490 | return ret; | |
3491 | } | |
3492 | ||
a34bcfff | 3493 | static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) |
9a799d71 | 3494 | { |
9a799d71 | 3495 | struct ixgbe_hw *hw = &adapter->hw; |
a34bcfff | 3496 | u32 gpie = 0; |
9a799d71 | 3497 | |
9b471446 | 3498 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
a34bcfff AD |
3499 | gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | |
3500 | IXGBE_GPIE_OCD; | |
3501 | gpie |= IXGBE_GPIE_EIAME; | |
9b471446 JB |
3502 | /* |
3503 | * use EIAM to auto-mask when MSI-X interrupt is asserted | |
3504 | * this saves a register write for every interrupt | |
3505 | */ | |
3506 | switch (hw->mac.type) { | |
3507 | case ixgbe_mac_82598EB: | |
3508 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | |
3509 | break; | |
3510 | default: | |
3511 | case ixgbe_mac_82599EB: | |
3512 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); | |
3513 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); | |
3514 | break; | |
3515 | } | |
3516 | } else { | |
021230d4 AV |
3517 | /* legacy interrupts, use EIAM to auto-mask when reading EICR, |
3518 | * specifically only auto mask tx and rx interrupts */ | |
3519 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | |
3520 | } | |
9a799d71 | 3521 | |
a34bcfff AD |
3522 | /* XXX: to interrupt immediately for EICS writes, enable this */ |
3523 | /* gpie |= IXGBE_GPIE_EIMEN; */ | |
3524 | ||
3525 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | |
3526 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | |
3527 | gpie |= IXGBE_GPIE_VTMODE_64; | |
119fc60a MC |
3528 | } |
3529 | ||
a34bcfff AD |
3530 | /* Enable fan failure interrupt */ |
3531 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) | |
0befdb3e | 3532 | gpie |= IXGBE_SDP1_GPIEN; |
0befdb3e | 3533 | |
a34bcfff | 3534 | if (hw->mac.type == ixgbe_mac_82599EB) |
e8e26350 PW |
3535 | gpie |= IXGBE_SDP1_GPIEN; |
3536 | gpie |= IXGBE_SDP2_GPIEN; | |
a34bcfff AD |
3537 | |
3538 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | |
3539 | } | |
3540 | ||
3541 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |
3542 | { | |
3543 | struct ixgbe_hw *hw = &adapter->hw; | |
a34bcfff | 3544 | int err; |
a34bcfff AD |
3545 | u32 ctrl_ext; |
3546 | ||
3547 | ixgbe_get_hw_control(adapter); | |
3548 | ixgbe_setup_gpie(adapter); | |
e8e26350 | 3549 | |
9a799d71 AK |
3550 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
3551 | ixgbe_configure_msix(adapter); | |
3552 | else | |
3553 | ixgbe_configure_msi_and_legacy(adapter); | |
3554 | ||
61fac744 PW |
3555 | /* enable the optics */ |
3556 | if (hw->phy.multispeed_fiber) | |
3557 | hw->mac.ops.enable_tx_laser(hw); | |
3558 | ||
9a799d71 | 3559 | clear_bit(__IXGBE_DOWN, &adapter->state); |
021230d4 AV |
3560 | ixgbe_napi_enable_all(adapter); |
3561 | ||
3562 | /* clear any pending interrupts, may auto mask */ | |
3563 | IXGBE_READ_REG(hw, IXGBE_EICR); | |
6af3b9eb | 3564 | ixgbe_irq_enable(adapter, true, true); |
9a799d71 | 3565 | |
bf069c97 DS |
3566 | /* |
3567 | * If this adapter has a fan, check to see if we had a failure | |
3568 | * before we enabled the interrupt. | |
3569 | */ | |
3570 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | |
3571 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | |
3572 | if (esdp & IXGBE_ESDP_SDP1) | |
396e799c | 3573 | e_crit(drv, "Fan has stopped, replace the adapter\n"); |
bf069c97 DS |
3574 | } |
3575 | ||
e8e26350 PW |
3576 | /* |
3577 | * For hot-pluggable SFP+ devices, a new SFP+ module may have | |
19343de2 DS |
3578 | * arrived before interrupts were enabled but after probe. Such |
3579 | * devices wouldn't have their type identified yet. We need to | |
3580 | * kick off the SFP+ module setup first, then try to bring up link. | |
e8e26350 PW |
3581 | * If we're not hot-pluggable SFP+, we just need to configure link |
3582 | * and bring it up. | |
3583 | */ | |
19343de2 DS |
3584 | if (hw->phy.type == ixgbe_phy_unknown) { |
3585 | err = hw->phy.ops.identify(hw); | |
3586 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | |
5da43c1a DS |
3587 | /* |
3588 | * Take the device down and schedule the sfp tasklet | |
3589 | * which will unregister_netdev and log it. | |
3590 | */ | |
19343de2 | 3591 | ixgbe_down(adapter); |
5da43c1a | 3592 | schedule_work(&adapter->sfp_config_module_task); |
19343de2 DS |
3593 | return err; |
3594 | } | |
e8e26350 PW |
3595 | } |
3596 | ||
3597 | if (ixgbe_is_sfp(hw)) { | |
3598 | ixgbe_sfp_link_config(adapter); | |
3599 | } else { | |
3600 | err = ixgbe_non_sfp_link_config(hw); | |
3601 | if (err) | |
396e799c | 3602 | e_err(probe, "link_config FAILED %d\n", err); |
e8e26350 | 3603 | } |
0ecc061d | 3604 | |
1da100bb | 3605 | /* enable transmits */ |
477de6ed | 3606 | netif_tx_start_all_queues(adapter->netdev); |
1da100bb | 3607 | |
9a799d71 AK |
3608 | /* bring the link up in the watchdog, this could race with our first |
3609 | * link up interrupt but shouldn't be a problem */ | |
cf8280ee JB |
3610 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
3611 | adapter->link_check_timeout = jiffies; | |
9a799d71 | 3612 | mod_timer(&adapter->watchdog_timer, jiffies); |
c9205697 GR |
3613 | |
3614 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ | |
3615 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | |
3616 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | |
3617 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | |
3618 | ||
9a799d71 AK |
3619 | return 0; |
3620 | } | |
3621 | ||
d4f80882 AV |
3622 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) |
3623 | { | |
3624 | WARN_ON(in_interrupt()); | |
3625 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | |
3626 | msleep(1); | |
3627 | ixgbe_down(adapter); | |
5809a1ae GR |
3628 | /* |
3629 | * If SR-IOV enabled then wait a bit before bringing the adapter | |
3630 | * back up to give the VFs time to respond to the reset. The | |
3631 | * two second wait is based upon the watchdog timer cycle in | |
3632 | * the VF driver. | |
3633 | */ | |
3634 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | |
3635 | msleep(2000); | |
d4f80882 AV |
3636 | ixgbe_up(adapter); |
3637 | clear_bit(__IXGBE_RESETTING, &adapter->state); | |
3638 | } | |
3639 | ||
9a799d71 AK |
3640 | int ixgbe_up(struct ixgbe_adapter *adapter) |
3641 | { | |
3642 | /* hardware has been reset, we need to reload some things */ | |
3643 | ixgbe_configure(adapter); | |
3644 | ||
3645 | return ixgbe_up_complete(adapter); | |
3646 | } | |
3647 | ||
3648 | void ixgbe_reset(struct ixgbe_adapter *adapter) | |
3649 | { | |
c44ade9e | 3650 | struct ixgbe_hw *hw = &adapter->hw; |
8ca783ab DS |
3651 | int err; |
3652 | ||
3653 | err = hw->mac.ops.init_hw(hw); | |
da4dd0f7 PWJ |
3654 | switch (err) { |
3655 | case 0: | |
3656 | case IXGBE_ERR_SFP_NOT_PRESENT: | |
3657 | break; | |
3658 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: | |
849c4542 | 3659 | e_dev_err("master disable timed out\n"); |
da4dd0f7 | 3660 | break; |
794caeb2 PWJ |
3661 | case IXGBE_ERR_EEPROM_VERSION: |
3662 | /* We are running on a pre-production device, log a warning */ | |
849c4542 ET |
3663 | e_dev_warn("This device is a pre-production adapter/LOM. " |
3664 | "Please be aware there may be issuesassociated with " | |
3665 | "your hardware. If you are experiencing problems " | |
3666 | "please contact your Intel or hardware " | |
3667 | "representative who provided you with this " | |
3668 | "hardware.\n"); | |
794caeb2 | 3669 | break; |
da4dd0f7 | 3670 | default: |
849c4542 | 3671 | e_dev_err("Hardware Error: %d\n", err); |
da4dd0f7 | 3672 | } |
9a799d71 AK |
3673 | |
3674 | /* reprogram the RAR[0] in case user changed it. */ | |
1cdd1ec8 GR |
3675 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
3676 | IXGBE_RAH_AV); | |
9a799d71 AK |
3677 | } |
3678 | ||
9a799d71 AK |
3679 | /** |
3680 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue | |
3681 | * @adapter: board private structure | |
3682 | * @rx_ring: ring to free buffers from | |
3683 | **/ | |
3684 | static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |
e8e9f696 | 3685 | struct ixgbe_ring *rx_ring) |
9a799d71 AK |
3686 | { |
3687 | struct pci_dev *pdev = adapter->pdev; | |
3688 | unsigned long size; | |
3689 | unsigned int i; | |
3690 | ||
84418e3b AD |
3691 | /* ring already cleared, nothing to do */ |
3692 | if (!rx_ring->rx_buffer_info) | |
3693 | return; | |
9a799d71 | 3694 | |
84418e3b | 3695 | /* Free all the Rx ring sk_buffs */ |
9a799d71 AK |
3696 | for (i = 0; i < rx_ring->count; i++) { |
3697 | struct ixgbe_rx_buffer *rx_buffer_info; | |
3698 | ||
3699 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | |
3700 | if (rx_buffer_info->dma) { | |
1b507730 | 3701 | dma_unmap_single(&pdev->dev, rx_buffer_info->dma, |
e8e9f696 | 3702 | rx_ring->rx_buf_len, |
1b507730 | 3703 | DMA_FROM_DEVICE); |
9a799d71 AK |
3704 | rx_buffer_info->dma = 0; |
3705 | } | |
3706 | if (rx_buffer_info->skb) { | |
f8212f97 | 3707 | struct sk_buff *skb = rx_buffer_info->skb; |
9a799d71 | 3708 | rx_buffer_info->skb = NULL; |
f8212f97 AD |
3709 | do { |
3710 | struct sk_buff *this = skb; | |
e8171aaa | 3711 | if (IXGBE_RSC_CB(this)->delay_unmap) { |
1b507730 NN |
3712 | dma_unmap_single(&pdev->dev, |
3713 | IXGBE_RSC_CB(this)->dma, | |
e8e9f696 | 3714 | rx_ring->rx_buf_len, |
1b507730 | 3715 | DMA_FROM_DEVICE); |
fd3686a8 | 3716 | IXGBE_RSC_CB(this)->dma = 0; |
e8171aaa | 3717 | IXGBE_RSC_CB(skb)->delay_unmap = false; |
fd3686a8 | 3718 | } |
f8212f97 AD |
3719 | skb = skb->prev; |
3720 | dev_kfree_skb(this); | |
3721 | } while (skb); | |
9a799d71 AK |
3722 | } |
3723 | if (!rx_buffer_info->page) | |
3724 | continue; | |
4f57ca6e | 3725 | if (rx_buffer_info->page_dma) { |
1b507730 NN |
3726 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, |
3727 | PAGE_SIZE / 2, DMA_FROM_DEVICE); | |
4f57ca6e JB |
3728 | rx_buffer_info->page_dma = 0; |
3729 | } | |
9a799d71 AK |
3730 | put_page(rx_buffer_info->page); |
3731 | rx_buffer_info->page = NULL; | |
762f4c57 | 3732 | rx_buffer_info->page_offset = 0; |
9a799d71 AK |
3733 | } |
3734 | ||
3735 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | |
3736 | memset(rx_ring->rx_buffer_info, 0, size); | |
3737 | ||
3738 | /* Zero out the descriptor ring */ | |
3739 | memset(rx_ring->desc, 0, rx_ring->size); | |
3740 | ||
3741 | rx_ring->next_to_clean = 0; | |
3742 | rx_ring->next_to_use = 0; | |
3743 | ||
9891ca7c JB |
3744 | if (rx_ring->head) |
3745 | writel(0, adapter->hw.hw_addr + rx_ring->head); | |
3746 | if (rx_ring->tail) | |
3747 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | |
9a799d71 AK |
3748 | } |
3749 | ||
3750 | /** | |
3751 | * ixgbe_clean_tx_ring - Free Tx Buffers | |
3752 | * @adapter: board private structure | |
3753 | * @tx_ring: ring to be cleaned | |
3754 | **/ | |
3755 | static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | |
e8e9f696 | 3756 | struct ixgbe_ring *tx_ring) |
9a799d71 AK |
3757 | { |
3758 | struct ixgbe_tx_buffer *tx_buffer_info; | |
3759 | unsigned long size; | |
3760 | unsigned int i; | |
3761 | ||
84418e3b AD |
3762 | /* ring already cleared, nothing to do */ |
3763 | if (!tx_ring->tx_buffer_info) | |
3764 | return; | |
9a799d71 | 3765 | |
84418e3b | 3766 | /* Free all the Tx ring sk_buffs */ |
9a799d71 AK |
3767 | for (i = 0; i < tx_ring->count; i++) { |
3768 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | |
3769 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | |
3770 | } | |
3771 | ||
3772 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | |
3773 | memset(tx_ring->tx_buffer_info, 0, size); | |
3774 | ||
3775 | /* Zero out the descriptor ring */ | |
3776 | memset(tx_ring->desc, 0, tx_ring->size); | |
3777 | ||
3778 | tx_ring->next_to_use = 0; | |
3779 | tx_ring->next_to_clean = 0; | |
3780 | ||
9891ca7c JB |
3781 | if (tx_ring->head) |
3782 | writel(0, adapter->hw.hw_addr + tx_ring->head); | |
3783 | if (tx_ring->tail) | |
3784 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | |
9a799d71 AK |
3785 | } |
3786 | ||
3787 | /** | |
021230d4 | 3788 | * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues |
9a799d71 AK |
3789 | * @adapter: board private structure |
3790 | **/ | |
021230d4 | 3791 | static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) |
9a799d71 AK |
3792 | { |
3793 | int i; | |
3794 | ||
021230d4 | 3795 | for (i = 0; i < adapter->num_rx_queues; i++) |
4a0b9ca0 | 3796 | ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); |
9a799d71 AK |
3797 | } |
3798 | ||
3799 | /** | |
021230d4 | 3800 | * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues |
9a799d71 AK |
3801 | * @adapter: board private structure |
3802 | **/ | |
021230d4 | 3803 | static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) |
9a799d71 AK |
3804 | { |
3805 | int i; | |
3806 | ||
021230d4 | 3807 | for (i = 0; i < adapter->num_tx_queues; i++) |
4a0b9ca0 | 3808 | ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); |
9a799d71 AK |
3809 | } |
3810 | ||
3811 | void ixgbe_down(struct ixgbe_adapter *adapter) | |
3812 | { | |
3813 | struct net_device *netdev = adapter->netdev; | |
7f821875 | 3814 | struct ixgbe_hw *hw = &adapter->hw; |
9a799d71 | 3815 | u32 rxctrl; |
7f821875 JB |
3816 | u32 txdctl; |
3817 | int i, j; | |
b25ebfd2 | 3818 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
9a799d71 AK |
3819 | |
3820 | /* signal that we are down to the interrupt handler */ | |
3821 | set_bit(__IXGBE_DOWN, &adapter->state); | |
3822 | ||
767081ad GR |
3823 | /* disable receive for all VFs and wait one second */ |
3824 | if (adapter->num_vfs) { | |
767081ad GR |
3825 | /* ping all the active vfs to let them know we are going down */ |
3826 | ixgbe_ping_all_vfs(adapter); | |
581d1aa7 | 3827 | |
767081ad GR |
3828 | /* Disable all VFTE/VFRE TX/RX */ |
3829 | ixgbe_disable_tx_rx(adapter); | |
581d1aa7 GR |
3830 | |
3831 | /* Mark all the VFs as inactive */ | |
3832 | for (i = 0 ; i < adapter->num_vfs; i++) | |
3833 | adapter->vfinfo[i].clear_to_send = 0; | |
767081ad GR |
3834 | } |
3835 | ||
9a799d71 | 3836 | /* disable receives */ |
7f821875 JB |
3837 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
3838 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | |
9a799d71 | 3839 | |
7f821875 | 3840 | IXGBE_WRITE_FLUSH(hw); |
9a799d71 AK |
3841 | msleep(10); |
3842 | ||
7f821875 JB |
3843 | netif_tx_stop_all_queues(netdev); |
3844 | ||
0a1f87cb DS |
3845 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); |
3846 | del_timer_sync(&adapter->sfp_timer); | |
9a799d71 | 3847 | del_timer_sync(&adapter->watchdog_timer); |
cf8280ee | 3848 | cancel_work_sync(&adapter->watchdog_task); |
9a799d71 | 3849 | |
c0dfb90e JF |
3850 | netif_carrier_off(netdev); |
3851 | netif_tx_disable(netdev); | |
3852 | ||
3853 | ixgbe_irq_disable(adapter); | |
3854 | ||
3855 | ixgbe_napi_disable_all(adapter); | |
3856 | ||
b25ebfd2 PW |
3857 | /* Cleanup the affinity_hint CPU mask memory and callback */ |
3858 | for (i = 0; i < num_q_vectors; i++) { | |
3859 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | |
3860 | /* clear the affinity_mask in the IRQ descriptor */ | |
3861 | irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); | |
3862 | /* release the CPU mask memory */ | |
3863 | free_cpumask_var(q_vector->affinity_mask); | |
3864 | } | |
3865 | ||
c4cf55e5 PWJ |
3866 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
3867 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | |
3868 | cancel_work_sync(&adapter->fdir_reinit_task); | |
3869 | ||
119fc60a MC |
3870 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) |
3871 | cancel_work_sync(&adapter->check_overtemp_task); | |
3872 | ||
7f821875 JB |
3873 | /* disable transmits in the hardware now that interrupts are off */ |
3874 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
4a0b9ca0 | 3875 | j = adapter->tx_ring[i]->reg_idx; |
7f821875 JB |
3876 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
3877 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), | |
e8e9f696 | 3878 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); |
7f821875 | 3879 | } |
88512539 PW |
3880 | /* Disable the Tx DMA engine on 82599 */ |
3881 | if (hw->mac.type == ixgbe_mac_82599EB) | |
3882 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, | |
e8e9f696 JP |
3883 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & |
3884 | ~IXGBE_DMATXCTL_TE)); | |
7f821875 | 3885 | |
9f756f01 JF |
3886 | /* power down the optics */ |
3887 | if (hw->phy.multispeed_fiber) | |
3888 | hw->mac.ops.disable_tx_laser(hw); | |
3889 | ||
9a713e7c PW |
3890 | /* clear n-tuple filters that are cached */ |
3891 | ethtool_ntuple_flush(netdev); | |
3892 | ||
6f4a0e45 PL |
3893 | if (!pci_channel_offline(adapter->pdev)) |
3894 | ixgbe_reset(adapter); | |
9a799d71 AK |
3895 | ixgbe_clean_all_tx_rings(adapter); |
3896 | ixgbe_clean_all_rx_rings(adapter); | |
3897 | ||
5dd2d332 | 3898 | #ifdef CONFIG_IXGBE_DCA |
96b0e0f6 | 3899 | /* since we reset the hardware DCA settings were cleared */ |
e35ec126 | 3900 | ixgbe_setup_dca(adapter); |
96b0e0f6 | 3901 | #endif |
9a799d71 AK |
3902 | } |
3903 | ||
9a799d71 | 3904 | /** |
021230d4 AV |
3905 | * ixgbe_poll - NAPI Rx polling callback |
3906 | * @napi: structure for representing this polling device | |
3907 | * @budget: how many packets driver is allowed to clean | |
3908 | * | |
3909 | * This function is used for legacy and MSI, NAPI mode | |
9a799d71 | 3910 | **/ |
021230d4 | 3911 | static int ixgbe_poll(struct napi_struct *napi, int budget) |
9a799d71 | 3912 | { |
9a1a69ad | 3913 | struct ixgbe_q_vector *q_vector = |
e8e9f696 | 3914 | container_of(napi, struct ixgbe_q_vector, napi); |
021230d4 | 3915 | struct ixgbe_adapter *adapter = q_vector->adapter; |
9a1a69ad | 3916 | int tx_clean_complete, work_done = 0; |
9a799d71 | 3917 | |
5dd2d332 | 3918 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd | 3919 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
4a0b9ca0 PW |
3920 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); |
3921 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]); | |
bd0362dd JC |
3922 | } |
3923 | #endif | |
3924 | ||
4a0b9ca0 PW |
3925 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); |
3926 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); | |
9a799d71 | 3927 | |
9a1a69ad | 3928 | if (!tx_clean_complete) |
d2c7ddd6 DM |
3929 | work_done = budget; |
3930 | ||
53e52c72 DM |
3931 | /* If budget not fully consumed, exit the polling mode */ |
3932 | if (work_done < budget) { | |
288379f0 | 3933 | napi_complete(napi); |
f7554a2b | 3934 | if (adapter->rx_itr_setting & 1) |
f494e8fa | 3935 | ixgbe_set_itr(adapter); |
d4f80882 | 3936 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
835462fc | 3937 | ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); |
9a799d71 | 3938 | } |
9a799d71 AK |
3939 | return work_done; |
3940 | } | |
3941 | ||
3942 | /** | |
3943 | * ixgbe_tx_timeout - Respond to a Tx Hang | |
3944 | * @netdev: network interface device structure | |
3945 | **/ | |
3946 | static void ixgbe_tx_timeout(struct net_device *netdev) | |
3947 | { | |
3948 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
3949 | ||
3950 | /* Do the reset outside of interrupt context */ | |
3951 | schedule_work(&adapter->reset_task); | |
3952 | } | |
3953 | ||
3954 | static void ixgbe_reset_task(struct work_struct *work) | |
3955 | { | |
3956 | struct ixgbe_adapter *adapter; | |
3957 | adapter = container_of(work, struct ixgbe_adapter, reset_task); | |
3958 | ||
2f90b865 AD |
3959 | /* If we're already down or resetting, just bail */ |
3960 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | |
3961 | test_bit(__IXGBE_RESETTING, &adapter->state)) | |
3962 | return; | |
3963 | ||
9a799d71 AK |
3964 | adapter->tx_timeout_count++; |
3965 | ||
dcd79aeb TI |
3966 | ixgbe_dump(adapter); |
3967 | netdev_err(adapter->netdev, "Reset adapter\n"); | |
d4f80882 | 3968 | ixgbe_reinit_locked(adapter); |
9a799d71 AK |
3969 | } |
3970 | ||
bc97114d PWJ |
3971 | #ifdef CONFIG_IXGBE_DCB |
3972 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | |
b9804972 | 3973 | { |
bc97114d | 3974 | bool ret = false; |
0cefafad | 3975 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; |
b9804972 | 3976 | |
0cefafad JB |
3977 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
3978 | return ret; | |
3979 | ||
3980 | f->mask = 0x7 << 3; | |
3981 | adapter->num_rx_queues = f->indices; | |
3982 | adapter->num_tx_queues = f->indices; | |
3983 | ret = true; | |
2f90b865 | 3984 | |
bc97114d PWJ |
3985 | return ret; |
3986 | } | |
3987 | #endif | |
3988 | ||
4df10466 JB |
3989 | /** |
3990 | * ixgbe_set_rss_queues: Allocate queues for RSS | |
3991 | * @adapter: board private structure to initialize | |
3992 | * | |
3993 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | |
3994 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | |
3995 | * | |
3996 | **/ | |
bc97114d PWJ |
3997 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) |
3998 | { | |
3999 | bool ret = false; | |
0cefafad | 4000 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; |
bc97114d PWJ |
4001 | |
4002 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | |
0cefafad JB |
4003 | f->mask = 0xF; |
4004 | adapter->num_rx_queues = f->indices; | |
4005 | adapter->num_tx_queues = f->indices; | |
bc97114d PWJ |
4006 | ret = true; |
4007 | } else { | |
bc97114d | 4008 | ret = false; |
b9804972 JB |
4009 | } |
4010 | ||
bc97114d PWJ |
4011 | return ret; |
4012 | } | |
4013 | ||
c4cf55e5 PWJ |
4014 | /** |
4015 | * ixgbe_set_fdir_queues: Allocate queues for Flow Director | |
4016 | * @adapter: board private structure to initialize | |
4017 | * | |
4018 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | |
4019 | * to the original CPU that initiated the Tx session. This runs in addition | |
4020 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | |
4021 | * Rx load across CPUs using RSS. | |
4022 | * | |
4023 | **/ | |
e8e9f696 | 4024 | static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) |
c4cf55e5 PWJ |
4025 | { |
4026 | bool ret = false; | |
4027 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | |
4028 | ||
4029 | f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); | |
4030 | f_fdir->mask = 0; | |
4031 | ||
4032 | /* Flow Director must have RSS enabled */ | |
4033 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | |
4034 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | |
4035 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { | |
4036 | adapter->num_tx_queues = f_fdir->indices; | |
4037 | adapter->num_rx_queues = f_fdir->indices; | |
4038 | ret = true; | |
4039 | } else { | |
4040 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | |
4041 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | |
4042 | } | |
4043 | return ret; | |
4044 | } | |
4045 | ||
0331a832 YZ |
4046 | #ifdef IXGBE_FCOE |
4047 | /** | |
4048 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) | |
4049 | * @adapter: board private structure to initialize | |
4050 | * | |
4051 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. | |
4052 | * The ring feature mask is not used as a mask for FCoE, as it can take any 8 | |
4053 | * rx queues out of the max number of rx queues, instead, it is used as the | |
4054 | * index of the first rx queue used by FCoE. | |
4055 | * | |
4056 | **/ | |
4057 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |
4058 | { | |
4059 | bool ret = false; | |
4060 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | |
4061 | ||
4062 | f->indices = min((int)num_online_cpus(), f->indices); | |
4063 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | |
8de8b2e6 YZ |
4064 | adapter->num_rx_queues = 1; |
4065 | adapter->num_tx_queues = 1; | |
0331a832 YZ |
4066 | #ifdef CONFIG_IXGBE_DCB |
4067 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | |
396e799c | 4068 | e_info(probe, "FCoE enabled with DCB\n"); |
0331a832 YZ |
4069 | ixgbe_set_dcb_queues(adapter); |
4070 | } | |
4071 | #endif | |
4072 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | |
396e799c | 4073 | e_info(probe, "FCoE enabled with RSS\n"); |
8faa2a78 YZ |
4074 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
4075 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | |
4076 | ixgbe_set_fdir_queues(adapter); | |
4077 | else | |
4078 | ixgbe_set_rss_queues(adapter); | |
0331a832 YZ |
4079 | } |
4080 | /* adding FCoE rx rings to the end */ | |
4081 | f->mask = adapter->num_rx_queues; | |
4082 | adapter->num_rx_queues += f->indices; | |
8de8b2e6 | 4083 | adapter->num_tx_queues += f->indices; |
0331a832 YZ |
4084 | |
4085 | ret = true; | |
4086 | } | |
4087 | ||
4088 | return ret; | |
4089 | } | |
4090 | ||
4091 | #endif /* IXGBE_FCOE */ | |
1cdd1ec8 GR |
4092 | /** |
4093 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | |
4094 | * @adapter: board private structure to initialize | |
4095 | * | |
4096 | * IOV doesn't actually use anything, so just NAK the | |
4097 | * request for now and let the other queue routines | |
4098 | * figure out what to do. | |
4099 | */ | |
4100 | static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | |
4101 | { | |
4102 | return false; | |
4103 | } | |
4104 | ||
4df10466 JB |
4105 | /* |
4106 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant | |
4107 | * @adapter: board private structure to initialize | |
4108 | * | |
4109 | * This is the top level queue allocation routine. The order here is very | |
4110 | * important, starting with the "most" number of features turned on at once, | |
4111 | * and ending with the smallest set of features. This way large combinations | |
4112 | * can be allocated if they're turned on, and smaller combinations are the | |
4113 | * fallthrough conditions. | |
4114 | * | |
4115 | **/ | |
847f53ff | 4116 | static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
bc97114d | 4117 | { |
1cdd1ec8 GR |
4118 | /* Start with base case */ |
4119 | adapter->num_rx_queues = 1; | |
4120 | adapter->num_tx_queues = 1; | |
4121 | adapter->num_rx_pools = adapter->num_rx_queues; | |
4122 | adapter->num_rx_queues_per_pool = 1; | |
4123 | ||
4124 | if (ixgbe_set_sriov_queues(adapter)) | |
847f53ff | 4125 | goto done; |
1cdd1ec8 | 4126 | |
0331a832 YZ |
4127 | #ifdef IXGBE_FCOE |
4128 | if (ixgbe_set_fcoe_queues(adapter)) | |
4129 | goto done; | |
4130 | ||
4131 | #endif /* IXGBE_FCOE */ | |
bc97114d PWJ |
4132 | #ifdef CONFIG_IXGBE_DCB |
4133 | if (ixgbe_set_dcb_queues(adapter)) | |
af22ab1b | 4134 | goto done; |
bc97114d PWJ |
4135 | |
4136 | #endif | |
c4cf55e5 PWJ |
4137 | if (ixgbe_set_fdir_queues(adapter)) |
4138 | goto done; | |
4139 | ||
bc97114d | 4140 | if (ixgbe_set_rss_queues(adapter)) |
af22ab1b WF |
4141 | goto done; |
4142 | ||
4143 | /* fallback to base case */ | |
4144 | adapter->num_rx_queues = 1; | |
4145 | adapter->num_tx_queues = 1; | |
4146 | ||
4147 | done: | |
847f53ff | 4148 | /* Notify the stack of the (possibly) reduced queue counts. */ |
f0796d5c | 4149 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); |
847f53ff BH |
4150 | return netif_set_real_num_rx_queues(adapter->netdev, |
4151 | adapter->num_rx_queues); | |
b9804972 JB |
4152 | } |
4153 | ||
021230d4 | 4154 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, |
e8e9f696 | 4155 | int vectors) |
021230d4 AV |
4156 | { |
4157 | int err, vector_threshold; | |
4158 | ||
4159 | /* We'll want at least 3 (vector_threshold): | |
4160 | * 1) TxQ[0] Cleanup | |
4161 | * 2) RxQ[0] Cleanup | |
4162 | * 3) Other (Link Status Change, etc.) | |
4163 | * 4) TCP Timer (optional) | |
4164 | */ | |
4165 | vector_threshold = MIN_MSIX_COUNT; | |
4166 | ||
4167 | /* The more we get, the more we will assign to Tx/Rx Cleanup | |
4168 | * for the separate queues...where Rx Cleanup >= Tx Cleanup. | |
4169 | * Right now, we simply care about how many we'll get; we'll | |
4170 | * set them up later while requesting irq's. | |
4171 | */ | |
4172 | while (vectors >= vector_threshold) { | |
4173 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | |
e8e9f696 | 4174 | vectors); |
021230d4 AV |
4175 | if (!err) /* Success in acquiring all requested vectors. */ |
4176 | break; | |
4177 | else if (err < 0) | |
4178 | vectors = 0; /* Nasty failure, quit now */ | |
4179 | else /* err == number of vectors we should try again with */ | |
4180 | vectors = err; | |
4181 | } | |
4182 | ||
4183 | if (vectors < vector_threshold) { | |
4184 | /* Can't allocate enough MSI-X interrupts? Oh well. | |
4185 | * This just means we'll go with either a single MSI | |
4186 | * vector or fall back to legacy interrupts. | |
4187 | */ | |
849c4542 ET |
4188 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, |
4189 | "Unable to allocate MSI-X interrupts\n"); | |
021230d4 AV |
4190 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
4191 | kfree(adapter->msix_entries); | |
4192 | adapter->msix_entries = NULL; | |
021230d4 AV |
4193 | } else { |
4194 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | |
eb7f139c PWJ |
4195 | /* |
4196 | * Adjust for only the vectors we'll use, which is minimum | |
4197 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | |
4198 | * vectors we were allocated. | |
4199 | */ | |
4200 | adapter->num_msix_vectors = min(vectors, | |
e8e9f696 | 4201 | adapter->max_msix_q_vectors + NON_Q_VECTORS); |
021230d4 AV |
4202 | } |
4203 | } | |
4204 | ||
021230d4 | 4205 | /** |
bc97114d | 4206 | * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS |
021230d4 AV |
4207 | * @adapter: board private structure to initialize |
4208 | * | |
bc97114d PWJ |
4209 | * Cache the descriptor ring offsets for RSS to the assigned rings. |
4210 | * | |
021230d4 | 4211 | **/ |
bc97114d | 4212 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) |
021230d4 | 4213 | { |
bc97114d PWJ |
4214 | int i; |
4215 | bool ret = false; | |
4216 | ||
4217 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | |
4218 | for (i = 0; i < adapter->num_rx_queues; i++) | |
4a0b9ca0 | 4219 | adapter->rx_ring[i]->reg_idx = i; |
bc97114d | 4220 | for (i = 0; i < adapter->num_tx_queues; i++) |
4a0b9ca0 | 4221 | adapter->tx_ring[i]->reg_idx = i; |
bc97114d PWJ |
4222 | ret = true; |
4223 | } else { | |
4224 | ret = false; | |
4225 | } | |
4226 | ||
4227 | return ret; | |
4228 | } | |
4229 | ||
4230 | #ifdef CONFIG_IXGBE_DCB | |
4231 | /** | |
4232 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | |
4233 | * @adapter: board private structure to initialize | |
4234 | * | |
4235 | * Cache the descriptor ring offsets for DCB to the assigned rings. | |
4236 | * | |
4237 | **/ | |
4238 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |
4239 | { | |
4240 | int i; | |
4241 | bool ret = false; | |
4242 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | |
4243 | ||
4244 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | |
4245 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | |
2f90b865 AD |
4246 | /* the number of queues is assumed to be symmetric */ |
4247 | for (i = 0; i < dcb_i; i++) { | |
4a0b9ca0 PW |
4248 | adapter->rx_ring[i]->reg_idx = i << 3; |
4249 | adapter->tx_ring[i]->reg_idx = i << 2; | |
2f90b865 | 4250 | } |
bc97114d | 4251 | ret = true; |
e8e26350 | 4252 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
f92ef202 PW |
4253 | if (dcb_i == 8) { |
4254 | /* | |
4255 | * Tx TC0 starts at: descriptor queue 0 | |
4256 | * Tx TC1 starts at: descriptor queue 32 | |
4257 | * Tx TC2 starts at: descriptor queue 64 | |
4258 | * Tx TC3 starts at: descriptor queue 80 | |
4259 | * Tx TC4 starts at: descriptor queue 96 | |
4260 | * Tx TC5 starts at: descriptor queue 104 | |
4261 | * Tx TC6 starts at: descriptor queue 112 | |
4262 | * Tx TC7 starts at: descriptor queue 120 | |
4263 | * | |
4264 | * Rx TC0-TC7 are offset by 16 queues each | |
4265 | */ | |
4266 | for (i = 0; i < 3; i++) { | |
4a0b9ca0 PW |
4267 | adapter->tx_ring[i]->reg_idx = i << 5; |
4268 | adapter->rx_ring[i]->reg_idx = i << 4; | |
f92ef202 PW |
4269 | } |
4270 | for ( ; i < 5; i++) { | |
4a0b9ca0 | 4271 | adapter->tx_ring[i]->reg_idx = |
e8e9f696 | 4272 | ((i + 2) << 4); |
4a0b9ca0 | 4273 | adapter->rx_ring[i]->reg_idx = i << 4; |
f92ef202 PW |
4274 | } |
4275 | for ( ; i < dcb_i; i++) { | |
4a0b9ca0 | 4276 | adapter->tx_ring[i]->reg_idx = |
e8e9f696 | 4277 | ((i + 8) << 3); |
4a0b9ca0 | 4278 | adapter->rx_ring[i]->reg_idx = i << 4; |
f92ef202 PW |
4279 | } |
4280 | ||
4281 | ret = true; | |
4282 | } else if (dcb_i == 4) { | |
4283 | /* | |
4284 | * Tx TC0 starts at: descriptor queue 0 | |
4285 | * Tx TC1 starts at: descriptor queue 64 | |
4286 | * Tx TC2 starts at: descriptor queue 96 | |
4287 | * Tx TC3 starts at: descriptor queue 112 | |
4288 | * | |
4289 | * Rx TC0-TC3 are offset by 32 queues each | |
4290 | */ | |
4a0b9ca0 PW |
4291 | adapter->tx_ring[0]->reg_idx = 0; |
4292 | adapter->tx_ring[1]->reg_idx = 64; | |
4293 | adapter->tx_ring[2]->reg_idx = 96; | |
4294 | adapter->tx_ring[3]->reg_idx = 112; | |
f92ef202 | 4295 | for (i = 0 ; i < dcb_i; i++) |
4a0b9ca0 | 4296 | adapter->rx_ring[i]->reg_idx = i << 5; |
f92ef202 PW |
4297 | |
4298 | ret = true; | |
4299 | } else { | |
4300 | ret = false; | |
e8e26350 | 4301 | } |
bc97114d PWJ |
4302 | } else { |
4303 | ret = false; | |
021230d4 | 4304 | } |
bc97114d PWJ |
4305 | } else { |
4306 | ret = false; | |
021230d4 | 4307 | } |
bc97114d PWJ |
4308 | |
4309 | return ret; | |
4310 | } | |
4311 | #endif | |
4312 | ||
c4cf55e5 PWJ |
4313 | /** |
4314 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | |
4315 | * @adapter: board private structure to initialize | |
4316 | * | |
4317 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | |
4318 | * | |
4319 | **/ | |
e8e9f696 | 4320 | static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) |
c4cf55e5 PWJ |
4321 | { |
4322 | int i; | |
4323 | bool ret = false; | |
4324 | ||
4325 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | |
4326 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | |
4327 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { | |
4328 | for (i = 0; i < adapter->num_rx_queues; i++) | |
4a0b9ca0 | 4329 | adapter->rx_ring[i]->reg_idx = i; |
c4cf55e5 | 4330 | for (i = 0; i < adapter->num_tx_queues; i++) |
4a0b9ca0 | 4331 | adapter->tx_ring[i]->reg_idx = i; |
c4cf55e5 PWJ |
4332 | ret = true; |
4333 | } | |
4334 | ||
4335 | return ret; | |
4336 | } | |
4337 | ||
0331a832 YZ |
4338 | #ifdef IXGBE_FCOE |
4339 | /** | |
4340 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | |
4341 | * @adapter: board private structure to initialize | |
4342 | * | |
4343 | * Cache the descriptor ring offsets for FCoE mode to the assigned rings. | |
4344 | * | |
4345 | */ | |
4346 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |
4347 | { | |
8de8b2e6 | 4348 | int i, fcoe_rx_i = 0, fcoe_tx_i = 0; |
0331a832 YZ |
4349 | bool ret = false; |
4350 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | |
4351 | ||
4352 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | |
4353 | #ifdef CONFIG_IXGBE_DCB | |
4354 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | |
8de8b2e6 YZ |
4355 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
4356 | ||
0331a832 | 4357 | ixgbe_cache_ring_dcb(adapter); |
8de8b2e6 | 4358 | /* find out queues in TC for FCoE */ |
4a0b9ca0 PW |
4359 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; |
4360 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; | |
8de8b2e6 YZ |
4361 | /* |
4362 | * In 82599, the number of Tx queues for each traffic | |
4363 | * class for both 8-TC and 4-TC modes are: | |
4364 | * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 | |
4365 | * 8 TCs: 32 32 16 16 8 8 8 8 | |
4366 | * 4 TCs: 64 64 32 32 | |
4367 | * We have max 8 queues for FCoE, where 8 the is | |
4368 | * FCoE redirection table size. If TC for FCoE is | |
4369 | * less than or equal to TC3, we have enough queues | |
4370 | * to add max of 8 queues for FCoE, so we start FCoE | |
4371 | * tx descriptor from the next one, i.e., reg_idx + 1. | |
4372 | * If TC for FCoE is above TC3, implying 8 TC mode, | |
4373 | * and we need 8 for FCoE, we have to take all queues | |
4374 | * in that traffic class for FCoE. | |
4375 | */ | |
4376 | if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) | |
4377 | fcoe_tx_i--; | |
0331a832 YZ |
4378 | } |
4379 | #endif /* CONFIG_IXGBE_DCB */ | |
4380 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | |
8faa2a78 YZ |
4381 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
4382 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | |
4383 | ixgbe_cache_ring_fdir(adapter); | |
4384 | else | |
4385 | ixgbe_cache_ring_rss(adapter); | |
4386 | ||
8de8b2e6 YZ |
4387 | fcoe_rx_i = f->mask; |
4388 | fcoe_tx_i = f->mask; | |
4389 | } | |
4390 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | |
4a0b9ca0 PW |
4391 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; |
4392 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | |
0331a832 | 4393 | } |
0331a832 YZ |
4394 | ret = true; |
4395 | } | |
4396 | return ret; | |
4397 | } | |
4398 | ||
4399 | #endif /* IXGBE_FCOE */ | |
1cdd1ec8 GR |
4400 | /** |
4401 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | |
4402 | * @adapter: board private structure to initialize | |
4403 | * | |
4404 | * SR-IOV doesn't use any descriptor rings but changes the default if | |
4405 | * no other mapping is used. | |
4406 | * | |
4407 | */ | |
4408 | static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) | |
4409 | { | |
4a0b9ca0 PW |
4410 | adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; |
4411 | adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; | |
1cdd1ec8 GR |
4412 | if (adapter->num_vfs) |
4413 | return true; | |
4414 | else | |
4415 | return false; | |
4416 | } | |
4417 | ||
bc97114d PWJ |
4418 | /** |
4419 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | |
4420 | * @adapter: board private structure to initialize | |
4421 | * | |
4422 | * Once we know the feature-set enabled for the device, we'll cache | |
4423 | * the register offset the descriptor ring is assigned to. | |
4424 | * | |
4425 | * Note, the order the various feature calls is important. It must start with | |
4426 | * the "most" features enabled at the same time, then trickle down to the | |
4427 | * least amount of features turned on at once. | |
4428 | **/ | |
4429 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |
4430 | { | |
4431 | /* start with default case */ | |
4a0b9ca0 PW |
4432 | adapter->rx_ring[0]->reg_idx = 0; |
4433 | adapter->tx_ring[0]->reg_idx = 0; | |
bc97114d | 4434 | |
1cdd1ec8 GR |
4435 | if (ixgbe_cache_ring_sriov(adapter)) |
4436 | return; | |
4437 | ||
0331a832 YZ |
4438 | #ifdef IXGBE_FCOE |
4439 | if (ixgbe_cache_ring_fcoe(adapter)) | |
4440 | return; | |
4441 | ||
4442 | #endif /* IXGBE_FCOE */ | |
bc97114d PWJ |
4443 | #ifdef CONFIG_IXGBE_DCB |
4444 | if (ixgbe_cache_ring_dcb(adapter)) | |
4445 | return; | |
4446 | ||
4447 | #endif | |
c4cf55e5 PWJ |
4448 | if (ixgbe_cache_ring_fdir(adapter)) |
4449 | return; | |
4450 | ||
bc97114d PWJ |
4451 | if (ixgbe_cache_ring_rss(adapter)) |
4452 | return; | |
021230d4 AV |
4453 | } |
4454 | ||
9a799d71 AK |
4455 | /** |
4456 | * ixgbe_alloc_queues - Allocate memory for all rings | |
4457 | * @adapter: board private structure to initialize | |
4458 | * | |
4459 | * We allocate one ring per queue at run-time since we don't know the | |
4df10466 JB |
4460 | * number of queues at compile-time. The polling_netdev array is |
4461 | * intended for Multiqueue, but should work fine with a single queue. | |
9a799d71 | 4462 | **/ |
2f90b865 | 4463 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) |
9a799d71 AK |
4464 | { |
4465 | int i; | |
4a0b9ca0 | 4466 | int orig_node = adapter->node; |
9a799d71 | 4467 | |
021230d4 | 4468 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4a0b9ca0 PW |
4469 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
4470 | if (orig_node == -1) { | |
4471 | int cur_node = next_online_node(adapter->node); | |
4472 | if (cur_node == MAX_NUMNODES) | |
4473 | cur_node = first_online_node; | |
4474 | adapter->node = cur_node; | |
4475 | } | |
4476 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | |
e8e9f696 | 4477 | adapter->node); |
4a0b9ca0 PW |
4478 | if (!ring) |
4479 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | |
4480 | if (!ring) | |
4481 | goto err_tx_ring_allocation; | |
4482 | ring->count = adapter->tx_ring_count; | |
4483 | ring->queue_index = i; | |
4484 | ring->numa_node = adapter->node; | |
4485 | ||
4486 | adapter->tx_ring[i] = ring; | |
021230d4 | 4487 | } |
b9804972 | 4488 | |
4a0b9ca0 PW |
4489 | /* Restore the adapter's original node */ |
4490 | adapter->node = orig_node; | |
4491 | ||
9a799d71 | 4492 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4a0b9ca0 PW |
4493 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
4494 | if (orig_node == -1) { | |
4495 | int cur_node = next_online_node(adapter->node); | |
4496 | if (cur_node == MAX_NUMNODES) | |
4497 | cur_node = first_online_node; | |
4498 | adapter->node = cur_node; | |
4499 | } | |
4500 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | |
e8e9f696 | 4501 | adapter->node); |
4a0b9ca0 PW |
4502 | if (!ring) |
4503 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | |
4504 | if (!ring) | |
4505 | goto err_rx_ring_allocation; | |
4506 | ring->count = adapter->rx_ring_count; | |
4507 | ring->queue_index = i; | |
4508 | ring->numa_node = adapter->node; | |
4509 | ||
4510 | adapter->rx_ring[i] = ring; | |
021230d4 AV |
4511 | } |
4512 | ||
4a0b9ca0 PW |
4513 | /* Restore the adapter's original node */ |
4514 | adapter->node = orig_node; | |
4515 | ||
021230d4 AV |
4516 | ixgbe_cache_ring_register(adapter); |
4517 | ||
4518 | return 0; | |
4519 | ||
4520 | err_rx_ring_allocation: | |
4a0b9ca0 PW |
4521 | for (i = 0; i < adapter->num_tx_queues; i++) |
4522 | kfree(adapter->tx_ring[i]); | |
021230d4 AV |
4523 | err_tx_ring_allocation: |
4524 | return -ENOMEM; | |
4525 | } | |
4526 | ||
4527 | /** | |
4528 | * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported | |
4529 | * @adapter: board private structure to initialize | |
4530 | * | |
4531 | * Attempt to configure the interrupts using the best available | |
4532 | * capabilities of the hardware and the kernel. | |
4533 | **/ | |
feea6a57 | 4534 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) |
021230d4 | 4535 | { |
8be0e467 | 4536 | struct ixgbe_hw *hw = &adapter->hw; |
021230d4 AV |
4537 | int err = 0; |
4538 | int vector, v_budget; | |
4539 | ||
4540 | /* | |
4541 | * It's easy to be greedy for MSI-X vectors, but it really | |
4542 | * doesn't do us much good if we have a lot more vectors | |
4543 | * than CPU's. So let's be conservative and only ask for | |
342bde1b | 4544 | * (roughly) the same number of vectors as there are CPU's. |
021230d4 AV |
4545 | */ |
4546 | v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, | |
e8e9f696 | 4547 | (int)num_online_cpus()) + NON_Q_VECTORS; |
021230d4 AV |
4548 | |
4549 | /* | |
4550 | * At the same time, hardware can only support a maximum of | |
8be0e467 PW |
4551 | * hw.mac->max_msix_vectors vectors. With features |
4552 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | |
4553 | * descriptor queues supported by our device. Thus, we cap it off in | |
4554 | * those rare cases where the cpu count also exceeds our vector limit. | |
021230d4 | 4555 | */ |
8be0e467 | 4556 | v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); |
021230d4 AV |
4557 | |
4558 | /* A failure in MSI-X entry allocation isn't fatal, but it does | |
4559 | * mean we disable MSI-X capabilities of the adapter. */ | |
4560 | adapter->msix_entries = kcalloc(v_budget, | |
e8e9f696 | 4561 | sizeof(struct msix_entry), GFP_KERNEL); |
7a921c93 AD |
4562 | if (adapter->msix_entries) { |
4563 | for (vector = 0; vector < v_budget; vector++) | |
4564 | adapter->msix_entries[vector].entry = vector; | |
021230d4 | 4565 | |
7a921c93 | 4566 | ixgbe_acquire_msix_vectors(adapter, v_budget); |
021230d4 | 4567 | |
7a921c93 AD |
4568 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
4569 | goto out; | |
4570 | } | |
26d27844 | 4571 | |
7a921c93 AD |
4572 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
4573 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | |
c4cf55e5 PWJ |
4574 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4575 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | |
4576 | adapter->atr_sample_rate = 0; | |
1cdd1ec8 GR |
4577 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
4578 | ixgbe_disable_sriov(adapter); | |
4579 | ||
847f53ff BH |
4580 | err = ixgbe_set_num_queues(adapter); |
4581 | if (err) | |
4582 | return err; | |
021230d4 | 4583 | |
021230d4 AV |
4584 | err = pci_enable_msi(adapter->pdev); |
4585 | if (!err) { | |
4586 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | |
4587 | } else { | |
849c4542 ET |
4588 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, |
4589 | "Unable to allocate MSI interrupt, " | |
4590 | "falling back to legacy. Error: %d\n", err); | |
021230d4 AV |
4591 | /* reset err */ |
4592 | err = 0; | |
4593 | } | |
4594 | ||
4595 | out: | |
021230d4 AV |
4596 | return err; |
4597 | } | |
4598 | ||
7a921c93 AD |
4599 | /** |
4600 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | |
4601 | * @adapter: board private structure to initialize | |
4602 | * | |
4603 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
4604 | * return -ENOMEM. | |
4605 | **/ | |
4606 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |
4607 | { | |
4608 | int q_idx, num_q_vectors; | |
4609 | struct ixgbe_q_vector *q_vector; | |
4610 | int napi_vectors; | |
4611 | int (*poll)(struct napi_struct *, int); | |
4612 | ||
4613 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | |
4614 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | |
4615 | napi_vectors = adapter->num_rx_queues; | |
91281fd3 | 4616 | poll = &ixgbe_clean_rxtx_many; |
7a921c93 AD |
4617 | } else { |
4618 | num_q_vectors = 1; | |
4619 | napi_vectors = 1; | |
4620 | poll = &ixgbe_poll; | |
4621 | } | |
4622 | ||
4623 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | |
1a6c14a2 | 4624 | q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), |
e8e9f696 | 4625 | GFP_KERNEL, adapter->node); |
1a6c14a2 JB |
4626 | if (!q_vector) |
4627 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), | |
e8e9f696 | 4628 | GFP_KERNEL); |
7a921c93 AD |
4629 | if (!q_vector) |
4630 | goto err_out; | |
4631 | q_vector->adapter = adapter; | |
f7554a2b NS |
4632 | if (q_vector->txr_count && !q_vector->rxr_count) |
4633 | q_vector->eitr = adapter->tx_eitr_param; | |
4634 | else | |
4635 | q_vector->eitr = adapter->rx_eitr_param; | |
fe49f04a | 4636 | q_vector->v_idx = q_idx; |
91281fd3 | 4637 | netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); |
7a921c93 AD |
4638 | adapter->q_vector[q_idx] = q_vector; |
4639 | } | |
4640 | ||
4641 | return 0; | |
4642 | ||
4643 | err_out: | |
4644 | while (q_idx) { | |
4645 | q_idx--; | |
4646 | q_vector = adapter->q_vector[q_idx]; | |
4647 | netif_napi_del(&q_vector->napi); | |
4648 | kfree(q_vector); | |
4649 | adapter->q_vector[q_idx] = NULL; | |
4650 | } | |
4651 | return -ENOMEM; | |
4652 | } | |
4653 | ||
4654 | /** | |
4655 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | |
4656 | * @adapter: board private structure to initialize | |
4657 | * | |
4658 | * This function frees the memory allocated to the q_vectors. In addition if | |
4659 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
4660 | * to freeing the q_vector. | |
4661 | **/ | |
4662 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | |
4663 | { | |
4664 | int q_idx, num_q_vectors; | |
7a921c93 | 4665 | |
91281fd3 | 4666 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
7a921c93 | 4667 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
91281fd3 | 4668 | else |
7a921c93 | 4669 | num_q_vectors = 1; |
7a921c93 AD |
4670 | |
4671 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | |
4672 | struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; | |
7a921c93 | 4673 | adapter->q_vector[q_idx] = NULL; |
91281fd3 | 4674 | netif_napi_del(&q_vector->napi); |
7a921c93 AD |
4675 | kfree(q_vector); |
4676 | } | |
4677 | } | |
4678 | ||
7b25cdba | 4679 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) |
021230d4 AV |
4680 | { |
4681 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | |
4682 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | |
4683 | pci_disable_msix(adapter->pdev); | |
4684 | kfree(adapter->msix_entries); | |
4685 | adapter->msix_entries = NULL; | |
4686 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | |
4687 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | |
4688 | pci_disable_msi(adapter->pdev); | |
4689 | } | |
021230d4 AV |
4690 | } |
4691 | ||
4692 | /** | |
4693 | * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme | |
4694 | * @adapter: board private structure to initialize | |
4695 | * | |
4696 | * We determine which interrupt scheme to use based on... | |
4697 | * - Kernel support (MSI, MSI-X) | |
4698 | * - which can be user-defined (via MODULE_PARAM) | |
4699 | * - Hardware queue count (num_*_queues) | |
4700 | * - defined by miscellaneous hardware support/features (RSS, etc.) | |
4701 | **/ | |
2f90b865 | 4702 | int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) |
021230d4 AV |
4703 | { |
4704 | int err; | |
4705 | ||
4706 | /* Number of supported queues */ | |
847f53ff BH |
4707 | err = ixgbe_set_num_queues(adapter); |
4708 | if (err) | |
4709 | return err; | |
021230d4 | 4710 | |
021230d4 AV |
4711 | err = ixgbe_set_interrupt_capability(adapter); |
4712 | if (err) { | |
849c4542 | 4713 | e_dev_err("Unable to setup interrupt capabilities\n"); |
021230d4 | 4714 | goto err_set_interrupt; |
9a799d71 AK |
4715 | } |
4716 | ||
7a921c93 AD |
4717 | err = ixgbe_alloc_q_vectors(adapter); |
4718 | if (err) { | |
849c4542 | 4719 | e_dev_err("Unable to allocate memory for queue vectors\n"); |
7a921c93 AD |
4720 | goto err_alloc_q_vectors; |
4721 | } | |
4722 | ||
4723 | err = ixgbe_alloc_queues(adapter); | |
4724 | if (err) { | |
849c4542 | 4725 | e_dev_err("Unable to allocate memory for queues\n"); |
7a921c93 AD |
4726 | goto err_alloc_queues; |
4727 | } | |
4728 | ||
849c4542 | 4729 | e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", |
396e799c ET |
4730 | (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", |
4731 | adapter->num_rx_queues, adapter->num_tx_queues); | |
021230d4 AV |
4732 | |
4733 | set_bit(__IXGBE_DOWN, &adapter->state); | |
4734 | ||
9a799d71 | 4735 | return 0; |
021230d4 | 4736 | |
7a921c93 AD |
4737 | err_alloc_queues: |
4738 | ixgbe_free_q_vectors(adapter); | |
4739 | err_alloc_q_vectors: | |
4740 | ixgbe_reset_interrupt_capability(adapter); | |
021230d4 | 4741 | err_set_interrupt: |
7a921c93 AD |
4742 | return err; |
4743 | } | |
4744 | ||
4745 | /** | |
4746 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | |
4747 | * @adapter: board private structure to clear interrupt scheme on | |
4748 | * | |
4749 | * We go through and clear interrupt specific resources and reset the structure | |
4750 | * to pre-load conditions | |
4751 | **/ | |
4752 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | |
4753 | { | |
4a0b9ca0 PW |
4754 | int i; |
4755 | ||
4756 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
4757 | kfree(adapter->tx_ring[i]); | |
4758 | adapter->tx_ring[i] = NULL; | |
4759 | } | |
4760 | for (i = 0; i < adapter->num_rx_queues; i++) { | |
4761 | kfree(adapter->rx_ring[i]); | |
4762 | adapter->rx_ring[i] = NULL; | |
4763 | } | |
7a921c93 AD |
4764 | |
4765 | ixgbe_free_q_vectors(adapter); | |
4766 | ixgbe_reset_interrupt_capability(adapter); | |
9a799d71 AK |
4767 | } |
4768 | ||
c4900be0 DS |
4769 | /** |
4770 | * ixgbe_sfp_timer - worker thread to find a missing module | |
4771 | * @data: pointer to our adapter struct | |
4772 | **/ | |
4773 | static void ixgbe_sfp_timer(unsigned long data) | |
4774 | { | |
4775 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | |
4776 | ||
4df10466 JB |
4777 | /* |
4778 | * Do the sfp_timer outside of interrupt context due to the | |
c4900be0 DS |
4779 | * delays that sfp+ detection requires |
4780 | */ | |
4781 | schedule_work(&adapter->sfp_task); | |
4782 | } | |
4783 | ||
4784 | /** | |
4785 | * ixgbe_sfp_task - worker thread to find a missing module | |
4786 | * @work: pointer to work_struct containing our data | |
4787 | **/ | |
4788 | static void ixgbe_sfp_task(struct work_struct *work) | |
4789 | { | |
4790 | struct ixgbe_adapter *adapter = container_of(work, | |
e8e9f696 JP |
4791 | struct ixgbe_adapter, |
4792 | sfp_task); | |
c4900be0 DS |
4793 | struct ixgbe_hw *hw = &adapter->hw; |
4794 | ||
4795 | if ((hw->phy.type == ixgbe_phy_nl) && | |
4796 | (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { | |
4797 | s32 ret = hw->phy.ops.identify_sfp(hw); | |
63d6e1d8 | 4798 | if (ret == IXGBE_ERR_SFP_NOT_PRESENT) |
c4900be0 DS |
4799 | goto reschedule; |
4800 | ret = hw->phy.ops.reset(hw); | |
4801 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { | |
849c4542 ET |
4802 | e_dev_err("failed to initialize because an unsupported " |
4803 | "SFP+ module type was detected.\n"); | |
4804 | e_dev_err("Reload the driver after installing a " | |
4805 | "supported module.\n"); | |
c4900be0 DS |
4806 | unregister_netdev(adapter->netdev); |
4807 | } else { | |
396e799c | 4808 | e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); |
c4900be0 DS |
4809 | } |
4810 | /* don't need this routine any more */ | |
4811 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | |
4812 | } | |
4813 | return; | |
4814 | reschedule: | |
4815 | if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) | |
4816 | mod_timer(&adapter->sfp_timer, | |
e8e9f696 | 4817 | round_jiffies(jiffies + (2 * HZ))); |
c4900be0 DS |
4818 | } |
4819 | ||
9a799d71 AK |
4820 | /** |
4821 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) | |
4822 | * @adapter: board private structure to initialize | |
4823 | * | |
4824 | * ixgbe_sw_init initializes the Adapter private data structure. | |
4825 | * Fields are initialized based on PCI device information and | |
4826 | * OS network device settings (MTU size). | |
4827 | **/ | |
4828 | static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |
4829 | { | |
4830 | struct ixgbe_hw *hw = &adapter->hw; | |
4831 | struct pci_dev *pdev = adapter->pdev; | |
9a713e7c | 4832 | struct net_device *dev = adapter->netdev; |
021230d4 | 4833 | unsigned int rss; |
7a6b6f51 | 4834 | #ifdef CONFIG_IXGBE_DCB |
2f90b865 AD |
4835 | int j; |
4836 | struct tc_configuration *tc; | |
4837 | #endif | |
021230d4 | 4838 | |
c44ade9e JB |
4839 | /* PCI config space info */ |
4840 | ||
4841 | hw->vendor_id = pdev->vendor; | |
4842 | hw->device_id = pdev->device; | |
4843 | hw->revision_id = pdev->revision; | |
4844 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | |
4845 | hw->subsystem_device_id = pdev->subsystem_device; | |
4846 | ||
021230d4 AV |
4847 | /* Set capability flags */ |
4848 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); | |
4849 | adapter->ring_feature[RING_F_RSS].indices = rss; | |
4850 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | |
2f90b865 | 4851 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; |
bf069c97 DS |
4852 | if (hw->mac.type == ixgbe_mac_82598EB) { |
4853 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | |
4854 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | |
e8e26350 | 4855 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; |
bf069c97 | 4856 | } else if (hw->mac.type == ixgbe_mac_82599EB) { |
e8e26350 | 4857 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
0c19d6af PWJ |
4858 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
4859 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | |
119fc60a MC |
4860 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) |
4861 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; | |
9a713e7c PW |
4862 | if (dev->features & NETIF_F_NTUPLE) { |
4863 | /* Flow Director perfect filter enabled */ | |
4864 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | |
4865 | adapter->atr_sample_rate = 0; | |
4866 | spin_lock_init(&adapter->fdir_perfect_lock); | |
4867 | } else { | |
4868 | /* Flow Director hash filters enabled */ | |
4869 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | |
4870 | adapter->atr_sample_rate = 20; | |
4871 | } | |
c4cf55e5 | 4872 | adapter->ring_feature[RING_F_FDIR].indices = |
e8e9f696 | 4873 | IXGBE_MAX_FDIR_INDICES; |
c4cf55e5 | 4874 | adapter->fdir_pballoc = 0; |
eacd73f7 | 4875 | #ifdef IXGBE_FCOE |
0d551589 YZ |
4876 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
4877 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | |
4878 | adapter->ring_feature[RING_F_FCOE].indices = 0; | |
61a0f421 | 4879 | #ifdef CONFIG_IXGBE_DCB |
6ee16520 YZ |
4880 | /* Default traffic class to use for FCoE */ |
4881 | adapter->fcoe.tc = IXGBE_FCOE_DEFTC; | |
56075a98 | 4882 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; |
61a0f421 | 4883 | #endif |
eacd73f7 | 4884 | #endif /* IXGBE_FCOE */ |
f8212f97 | 4885 | } |
2f90b865 | 4886 | |
7a6b6f51 | 4887 | #ifdef CONFIG_IXGBE_DCB |
2f90b865 AD |
4888 | /* Configure DCB traffic classes */ |
4889 | for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { | |
4890 | tc = &adapter->dcb_cfg.tc_config[j]; | |
4891 | tc->path[DCB_TX_CONFIG].bwg_id = 0; | |
4892 | tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); | |
4893 | tc->path[DCB_RX_CONFIG].bwg_id = 0; | |
4894 | tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); | |
4895 | tc->dcb_pfc = pfc_disabled; | |
4896 | } | |
4897 | adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; | |
4898 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; | |
4899 | adapter->dcb_cfg.rx_pba_cfg = pba_equal; | |
264857b8 | 4900 | adapter->dcb_cfg.pfc_mode_enable = false; |
2f90b865 AD |
4901 | adapter->dcb_cfg.round_robin_enable = false; |
4902 | adapter->dcb_set_bitmap = 0x00; | |
4903 | ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, | |
e8e9f696 | 4904 | adapter->ring_feature[RING_F_DCB].indices); |
2f90b865 AD |
4905 | |
4906 | #endif | |
9a799d71 AK |
4907 | |
4908 | /* default flow control settings */ | |
cd7664f6 | 4909 | hw->fc.requested_mode = ixgbe_fc_full; |
71fd570b | 4910 | hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ |
264857b8 PWJ |
4911 | #ifdef CONFIG_DCB |
4912 | adapter->last_lfc_mode = hw->fc.current_mode; | |
4913 | #endif | |
2b9ade93 JB |
4914 | hw->fc.high_water = IXGBE_DEFAULT_FCRTH; |
4915 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; | |
4916 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; | |
4917 | hw->fc.send_xon = true; | |
71fd570b | 4918 | hw->fc.disable_fc_autoneg = false; |
9a799d71 | 4919 | |
30efa5a3 | 4920 | /* enable itr by default in dynamic mode */ |
f7554a2b NS |
4921 | adapter->rx_itr_setting = 1; |
4922 | adapter->rx_eitr_param = 20000; | |
4923 | adapter->tx_itr_setting = 1; | |
4924 | adapter->tx_eitr_param = 10000; | |
30efa5a3 JB |
4925 | |
4926 | /* set defaults for eitr in MegaBytes */ | |
4927 | adapter->eitr_low = 10; | |
4928 | adapter->eitr_high = 20; | |
4929 | ||
4930 | /* set default ring sizes */ | |
4931 | adapter->tx_ring_count = IXGBE_DEFAULT_TXD; | |
4932 | adapter->rx_ring_count = IXGBE_DEFAULT_RXD; | |
4933 | ||
9a799d71 | 4934 | /* initialize eeprom parameters */ |
c44ade9e | 4935 | if (ixgbe_init_eeprom_params_generic(hw)) { |
849c4542 | 4936 | e_dev_err("EEPROM initialization failed\n"); |
9a799d71 AK |
4937 | return -EIO; |
4938 | } | |
4939 | ||
021230d4 | 4940 | /* enable rx csum by default */ |
9a799d71 AK |
4941 | adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; |
4942 | ||
1a6c14a2 JB |
4943 | /* get assigned NUMA node */ |
4944 | adapter->node = dev_to_node(&pdev->dev); | |
4945 | ||
9a799d71 AK |
4946 | set_bit(__IXGBE_DOWN, &adapter->state); |
4947 | ||
4948 | return 0; | |
4949 | } | |
4950 | ||
4951 | /** | |
4952 | * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) | |
4953 | * @adapter: board private structure | |
3a581073 | 4954 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
9a799d71 AK |
4955 | * |
4956 | * Return 0 on success, negative on failure | |
4957 | **/ | |
4958 | int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |
e8e9f696 | 4959 | struct ixgbe_ring *tx_ring) |
9a799d71 AK |
4960 | { |
4961 | struct pci_dev *pdev = adapter->pdev; | |
4962 | int size; | |
4963 | ||
3a581073 | 4964 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
4a0b9ca0 | 4965 | tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); |
1a6c14a2 JB |
4966 | if (!tx_ring->tx_buffer_info) |
4967 | tx_ring->tx_buffer_info = vmalloc(size); | |
e01c31a5 JB |
4968 | if (!tx_ring->tx_buffer_info) |
4969 | goto err; | |
3a581073 | 4970 | memset(tx_ring->tx_buffer_info, 0, size); |
9a799d71 AK |
4971 | |
4972 | /* round up to nearest 4K */ | |
12207e49 | 4973 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
3a581073 | 4974 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
9a799d71 | 4975 | |
1b507730 NN |
4976 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, |
4977 | &tx_ring->dma, GFP_KERNEL); | |
e01c31a5 JB |
4978 | if (!tx_ring->desc) |
4979 | goto err; | |
9a799d71 | 4980 | |
3a581073 JB |
4981 | tx_ring->next_to_use = 0; |
4982 | tx_ring->next_to_clean = 0; | |
4983 | tx_ring->work_limit = tx_ring->count; | |
9a799d71 | 4984 | return 0; |
e01c31a5 JB |
4985 | |
4986 | err: | |
4987 | vfree(tx_ring->tx_buffer_info); | |
4988 | tx_ring->tx_buffer_info = NULL; | |
396e799c | 4989 | e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); |
e01c31a5 | 4990 | return -ENOMEM; |
9a799d71 AK |
4991 | } |
4992 | ||
69888674 AD |
4993 | /** |
4994 | * ixgbe_setup_all_tx_resources - allocate all queues Tx resources | |
4995 | * @adapter: board private structure | |
4996 | * | |
4997 | * If this function returns with an error, then it's possible one or | |
4998 | * more of the rings is populated (while the rest are not). It is the | |
4999 | * callers duty to clean those orphaned rings. | |
5000 | * | |
5001 | * Return 0 on success, negative on failure | |
5002 | **/ | |
5003 | static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |
5004 | { | |
5005 | int i, err = 0; | |
5006 | ||
5007 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
4a0b9ca0 | 5008 | err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); |
69888674 AD |
5009 | if (!err) |
5010 | continue; | |
396e799c | 5011 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); |
69888674 AD |
5012 | break; |
5013 | } | |
5014 | ||
5015 | return err; | |
5016 | } | |
5017 | ||
9a799d71 AK |
5018 | /** |
5019 | * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) | |
5020 | * @adapter: board private structure | |
3a581073 | 5021 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
9a799d71 AK |
5022 | * |
5023 | * Returns 0 on success, negative on failure | |
5024 | **/ | |
5025 | int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |
e8e9f696 | 5026 | struct ixgbe_ring *rx_ring) |
9a799d71 AK |
5027 | { |
5028 | struct pci_dev *pdev = adapter->pdev; | |
021230d4 | 5029 | int size; |
9a799d71 | 5030 | |
3a581073 | 5031 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
1a6c14a2 JB |
5032 | rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); |
5033 | if (!rx_ring->rx_buffer_info) | |
5034 | rx_ring->rx_buffer_info = vmalloc(size); | |
3a581073 | 5035 | if (!rx_ring->rx_buffer_info) { |
396e799c ET |
5036 | e_err(probe, "vmalloc allocation failed for the Rx " |
5037 | "descriptor ring\n"); | |
177db6ff | 5038 | goto alloc_failed; |
9a799d71 | 5039 | } |
3a581073 | 5040 | memset(rx_ring->rx_buffer_info, 0, size); |
9a799d71 | 5041 | |
9a799d71 | 5042 | /* Round up to nearest 4K */ |
3a581073 JB |
5043 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
5044 | rx_ring->size = ALIGN(rx_ring->size, 4096); | |
9a799d71 | 5045 | |
1b507730 NN |
5046 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, |
5047 | &rx_ring->dma, GFP_KERNEL); | |
9a799d71 | 5048 | |
3a581073 | 5049 | if (!rx_ring->desc) { |
396e799c ET |
5050 | e_err(probe, "Memory allocation failed for the Rx " |
5051 | "descriptor ring\n"); | |
3a581073 | 5052 | vfree(rx_ring->rx_buffer_info); |
177db6ff | 5053 | goto alloc_failed; |
9a799d71 AK |
5054 | } |
5055 | ||
3a581073 JB |
5056 | rx_ring->next_to_clean = 0; |
5057 | rx_ring->next_to_use = 0; | |
9a799d71 AK |
5058 | |
5059 | return 0; | |
177db6ff MC |
5060 | |
5061 | alloc_failed: | |
177db6ff | 5062 | return -ENOMEM; |
9a799d71 AK |
5063 | } |
5064 | ||
69888674 AD |
5065 | /** |
5066 | * ixgbe_setup_all_rx_resources - allocate all queues Rx resources | |
5067 | * @adapter: board private structure | |
5068 | * | |
5069 | * If this function returns with an error, then it's possible one or | |
5070 | * more of the rings is populated (while the rest are not). It is the | |
5071 | * callers duty to clean those orphaned rings. | |
5072 | * | |
5073 | * Return 0 on success, negative on failure | |
5074 | **/ | |
5075 | ||
5076 | static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |
5077 | { | |
5078 | int i, err = 0; | |
5079 | ||
5080 | for (i = 0; i < adapter->num_rx_queues; i++) { | |
4a0b9ca0 | 5081 | err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); |
69888674 AD |
5082 | if (!err) |
5083 | continue; | |
396e799c | 5084 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); |
69888674 AD |
5085 | break; |
5086 | } | |
5087 | ||
5088 | return err; | |
5089 | } | |
5090 | ||
9a799d71 AK |
5091 | /** |
5092 | * ixgbe_free_tx_resources - Free Tx Resources per Queue | |
5093 | * @adapter: board private structure | |
5094 | * @tx_ring: Tx descriptor ring for a specific queue | |
5095 | * | |
5096 | * Free all transmit software resources | |
5097 | **/ | |
c431f97e | 5098 | void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, |
e8e9f696 | 5099 | struct ixgbe_ring *tx_ring) |
9a799d71 AK |
5100 | { |
5101 | struct pci_dev *pdev = adapter->pdev; | |
5102 | ||
5103 | ixgbe_clean_tx_ring(adapter, tx_ring); | |
5104 | ||
5105 | vfree(tx_ring->tx_buffer_info); | |
5106 | tx_ring->tx_buffer_info = NULL; | |
5107 | ||
1b507730 NN |
5108 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, |
5109 | tx_ring->dma); | |
9a799d71 AK |
5110 | |
5111 | tx_ring->desc = NULL; | |
5112 | } | |
5113 | ||
5114 | /** | |
5115 | * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues | |
5116 | * @adapter: board private structure | |
5117 | * | |
5118 | * Free all transmit software resources | |
5119 | **/ | |
5120 | static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) | |
5121 | { | |
5122 | int i; | |
5123 | ||
5124 | for (i = 0; i < adapter->num_tx_queues; i++) | |
4a0b9ca0 PW |
5125 | if (adapter->tx_ring[i]->desc) |
5126 | ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); | |
9a799d71 AK |
5127 | } |
5128 | ||
5129 | /** | |
b4617240 | 5130 | * ixgbe_free_rx_resources - Free Rx Resources |
9a799d71 AK |
5131 | * @adapter: board private structure |
5132 | * @rx_ring: ring to clean the resources from | |
5133 | * | |
5134 | * Free all receive software resources | |
5135 | **/ | |
c431f97e | 5136 | void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, |
e8e9f696 | 5137 | struct ixgbe_ring *rx_ring) |
9a799d71 AK |
5138 | { |
5139 | struct pci_dev *pdev = adapter->pdev; | |
5140 | ||
5141 | ixgbe_clean_rx_ring(adapter, rx_ring); | |
5142 | ||
5143 | vfree(rx_ring->rx_buffer_info); | |
5144 | rx_ring->rx_buffer_info = NULL; | |
5145 | ||
1b507730 NN |
5146 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
5147 | rx_ring->dma); | |
9a799d71 AK |
5148 | |
5149 | rx_ring->desc = NULL; | |
5150 | } | |
5151 | ||
5152 | /** | |
5153 | * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues | |
5154 | * @adapter: board private structure | |
5155 | * | |
5156 | * Free all receive software resources | |
5157 | **/ | |
5158 | static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |
5159 | { | |
5160 | int i; | |
5161 | ||
5162 | for (i = 0; i < adapter->num_rx_queues; i++) | |
4a0b9ca0 PW |
5163 | if (adapter->rx_ring[i]->desc) |
5164 | ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); | |
9a799d71 AK |
5165 | } |
5166 | ||
9a799d71 AK |
5167 | /** |
5168 | * ixgbe_change_mtu - Change the Maximum Transfer Unit | |
5169 | * @netdev: network interface device structure | |
5170 | * @new_mtu: new value for maximum frame size | |
5171 | * | |
5172 | * Returns 0 on success, negative on failure | |
5173 | **/ | |
5174 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |
5175 | { | |
5176 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
5177 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | |
5178 | ||
42c783c5 JB |
5179 | /* MTU < 68 is an error and causes problems on some kernels */ |
5180 | if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) | |
9a799d71 AK |
5181 | return -EINVAL; |
5182 | ||
396e799c | 5183 | e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
021230d4 | 5184 | /* must set new MTU before calling down or up */ |
9a799d71 AK |
5185 | netdev->mtu = new_mtu; |
5186 | ||
d4f80882 AV |
5187 | if (netif_running(netdev)) |
5188 | ixgbe_reinit_locked(adapter); | |
9a799d71 AK |
5189 | |
5190 | return 0; | |
5191 | } | |
5192 | ||
5193 | /** | |
5194 | * ixgbe_open - Called when a network interface is made active | |
5195 | * @netdev: network interface device structure | |
5196 | * | |
5197 | * Returns 0 on success, negative value on failure | |
5198 | * | |
5199 | * The open entry point is called when a network interface is made | |
5200 | * active by the system (IFF_UP). At this point all resources needed | |
5201 | * for transmit and receive operations are allocated, the interrupt | |
5202 | * handler is registered with the OS, the watchdog timer is started, | |
5203 | * and the stack is notified that the interface is ready. | |
5204 | **/ | |
5205 | static int ixgbe_open(struct net_device *netdev) | |
5206 | { | |
5207 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
5208 | int err; | |
4bebfaa5 AK |
5209 | |
5210 | /* disallow open during test */ | |
5211 | if (test_bit(__IXGBE_TESTING, &adapter->state)) | |
5212 | return -EBUSY; | |
9a799d71 | 5213 | |
54386467 JB |
5214 | netif_carrier_off(netdev); |
5215 | ||
9a799d71 AK |
5216 | /* allocate transmit descriptors */ |
5217 | err = ixgbe_setup_all_tx_resources(adapter); | |
5218 | if (err) | |
5219 | goto err_setup_tx; | |
5220 | ||
9a799d71 AK |
5221 | /* allocate receive descriptors */ |
5222 | err = ixgbe_setup_all_rx_resources(adapter); | |
5223 | if (err) | |
5224 | goto err_setup_rx; | |
5225 | ||
5226 | ixgbe_configure(adapter); | |
5227 | ||
021230d4 | 5228 | err = ixgbe_request_irq(adapter); |
9a799d71 AK |
5229 | if (err) |
5230 | goto err_req_irq; | |
5231 | ||
9a799d71 AK |
5232 | err = ixgbe_up_complete(adapter); |
5233 | if (err) | |
5234 | goto err_up; | |
5235 | ||
d55b53ff JK |
5236 | netif_tx_start_all_queues(netdev); |
5237 | ||
9a799d71 AK |
5238 | return 0; |
5239 | ||
5240 | err_up: | |
5eba3699 | 5241 | ixgbe_release_hw_control(adapter); |
9a799d71 AK |
5242 | ixgbe_free_irq(adapter); |
5243 | err_req_irq: | |
9a799d71 | 5244 | err_setup_rx: |
a20a1199 | 5245 | ixgbe_free_all_rx_resources(adapter); |
9a799d71 | 5246 | err_setup_tx: |
a20a1199 | 5247 | ixgbe_free_all_tx_resources(adapter); |
9a799d71 AK |
5248 | ixgbe_reset(adapter); |
5249 | ||
5250 | return err; | |
5251 | } | |
5252 | ||
5253 | /** | |
5254 | * ixgbe_close - Disables a network interface | |
5255 | * @netdev: network interface device structure | |
5256 | * | |
5257 | * Returns 0, this is not allowed to fail | |
5258 | * | |
5259 | * The close entry point is called when an interface is de-activated | |
5260 | * by the OS. The hardware is still under the drivers control, but | |
5261 | * needs to be disabled. A global MAC reset is issued to stop the | |
5262 | * hardware, and all transmit and receive resources are freed. | |
5263 | **/ | |
5264 | static int ixgbe_close(struct net_device *netdev) | |
5265 | { | |
5266 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
9a799d71 AK |
5267 | |
5268 | ixgbe_down(adapter); | |
5269 | ixgbe_free_irq(adapter); | |
5270 | ||
5271 | ixgbe_free_all_tx_resources(adapter); | |
5272 | ixgbe_free_all_rx_resources(adapter); | |
5273 | ||
5eba3699 | 5274 | ixgbe_release_hw_control(adapter); |
9a799d71 AK |
5275 | |
5276 | return 0; | |
5277 | } | |
5278 | ||
b3c8b4ba AD |
5279 | #ifdef CONFIG_PM |
5280 | static int ixgbe_resume(struct pci_dev *pdev) | |
5281 | { | |
5282 | struct net_device *netdev = pci_get_drvdata(pdev); | |
5283 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
5284 | u32 err; | |
5285 | ||
5286 | pci_set_power_state(pdev, PCI_D0); | |
5287 | pci_restore_state(pdev); | |
656ab817 DS |
5288 | /* |
5289 | * pci_restore_state clears dev->state_saved so call | |
5290 | * pci_save_state to restore it. | |
5291 | */ | |
5292 | pci_save_state(pdev); | |
9ce77666 | 5293 | |
5294 | err = pci_enable_device_mem(pdev); | |
b3c8b4ba | 5295 | if (err) { |
849c4542 | 5296 | e_dev_err("Cannot enable PCI device from suspend\n"); |
b3c8b4ba AD |
5297 | return err; |
5298 | } | |
5299 | pci_set_master(pdev); | |
5300 | ||
dd4d8ca6 | 5301 | pci_wake_from_d3(pdev, false); |
b3c8b4ba AD |
5302 | |
5303 | err = ixgbe_init_interrupt_scheme(adapter); | |
5304 | if (err) { | |
849c4542 | 5305 | e_dev_err("Cannot initialize interrupts for device\n"); |
b3c8b4ba AD |
5306 | return err; |
5307 | } | |
5308 | ||
b3c8b4ba AD |
5309 | ixgbe_reset(adapter); |
5310 | ||
495dce12 WJP |
5311 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
5312 | ||
b3c8b4ba AD |
5313 | if (netif_running(netdev)) { |
5314 | err = ixgbe_open(adapter->netdev); | |
5315 | if (err) | |
5316 | return err; | |
5317 | } | |
5318 | ||
5319 | netif_device_attach(netdev); | |
5320 | ||
5321 | return 0; | |
5322 | } | |
b3c8b4ba | 5323 | #endif /* CONFIG_PM */ |
9d8d05ae RW |
5324 | |
5325 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |
b3c8b4ba AD |
5326 | { |
5327 | struct net_device *netdev = pci_get_drvdata(pdev); | |
5328 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
e8e26350 PW |
5329 | struct ixgbe_hw *hw = &adapter->hw; |
5330 | u32 ctrl, fctrl; | |
5331 | u32 wufc = adapter->wol; | |
b3c8b4ba AD |
5332 | #ifdef CONFIG_PM |
5333 | int retval = 0; | |
5334 | #endif | |
5335 | ||
5336 | netif_device_detach(netdev); | |
5337 | ||
5338 | if (netif_running(netdev)) { | |
5339 | ixgbe_down(adapter); | |
5340 | ixgbe_free_irq(adapter); | |
5341 | ixgbe_free_all_tx_resources(adapter); | |
5342 | ixgbe_free_all_rx_resources(adapter); | |
5343 | } | |
b3c8b4ba AD |
5344 | |
5345 | #ifdef CONFIG_PM | |
5346 | retval = pci_save_state(pdev); | |
5347 | if (retval) | |
5348 | return retval; | |
4df10466 | 5349 | |
b3c8b4ba | 5350 | #endif |
e8e26350 PW |
5351 | if (wufc) { |
5352 | ixgbe_set_rx_mode(netdev); | |
b3c8b4ba | 5353 | |
e8e26350 PW |
5354 | /* turn on all-multi mode if wake on multicast is enabled */ |
5355 | if (wufc & IXGBE_WUFC_MC) { | |
5356 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
5357 | fctrl |= IXGBE_FCTRL_MPE; | |
5358 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | |
5359 | } | |
5360 | ||
5361 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | |
5362 | ctrl |= IXGBE_CTRL_GIO_DIS; | |
5363 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); | |
5364 | ||
5365 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); | |
5366 | } else { | |
5367 | IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); | |
5368 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); | |
5369 | } | |
5370 | ||
dd4d8ca6 DS |
5371 | if (wufc && hw->mac.type == ixgbe_mac_82599EB) |
5372 | pci_wake_from_d3(pdev, true); | |
5373 | else | |
5374 | pci_wake_from_d3(pdev, false); | |
b3c8b4ba | 5375 | |
9d8d05ae RW |
5376 | *enable_wake = !!wufc; |
5377 | ||
fa378134 AG |
5378 | ixgbe_clear_interrupt_scheme(adapter); |
5379 | ||
b3c8b4ba AD |
5380 | ixgbe_release_hw_control(adapter); |
5381 | ||
5382 | pci_disable_device(pdev); | |
5383 | ||
9d8d05ae RW |
5384 | return 0; |
5385 | } | |
5386 | ||
5387 | #ifdef CONFIG_PM | |
5388 | static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) | |
5389 | { | |
5390 | int retval; | |
5391 | bool wake; | |
5392 | ||
5393 | retval = __ixgbe_shutdown(pdev, &wake); | |
5394 | if (retval) | |
5395 | return retval; | |
5396 | ||
5397 | if (wake) { | |
5398 | pci_prepare_to_sleep(pdev); | |
5399 | } else { | |
5400 | pci_wake_from_d3(pdev, false); | |
5401 | pci_set_power_state(pdev, PCI_D3hot); | |
5402 | } | |
b3c8b4ba AD |
5403 | |
5404 | return 0; | |
5405 | } | |
9d8d05ae | 5406 | #endif /* CONFIG_PM */ |
b3c8b4ba AD |
5407 | |
5408 | static void ixgbe_shutdown(struct pci_dev *pdev) | |
5409 | { | |
9d8d05ae RW |
5410 | bool wake; |
5411 | ||
5412 | __ixgbe_shutdown(pdev, &wake); | |
5413 | ||
5414 | if (system_state == SYSTEM_POWER_OFF) { | |
5415 | pci_wake_from_d3(pdev, wake); | |
5416 | pci_set_power_state(pdev, PCI_D3hot); | |
5417 | } | |
b3c8b4ba AD |
5418 | } |
5419 | ||
9a799d71 AK |
5420 | /** |
5421 | * ixgbe_update_stats - Update the board statistics counters. | |
5422 | * @adapter: board private structure | |
5423 | **/ | |
5424 | void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |
5425 | { | |
2d86f139 | 5426 | struct net_device *netdev = adapter->netdev; |
9a799d71 | 5427 | struct ixgbe_hw *hw = &adapter->hw; |
6f11eef7 AV |
5428 | u64 total_mpc = 0; |
5429 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; | |
eb985f09 | 5430 | u64 non_eop_descs = 0, restart_queue = 0; |
7ca647bd | 5431 | struct ixgbe_hw_stats *hwstats = &adapter->stats; |
9a799d71 | 5432 | |
d08935c2 DS |
5433 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
5434 | test_bit(__IXGBE_RESETTING, &adapter->state)) | |
5435 | return; | |
5436 | ||
94b982b2 | 5437 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
f8212f97 | 5438 | u64 rsc_count = 0; |
94b982b2 | 5439 | u64 rsc_flush = 0; |
d51019a4 PW |
5440 | for (i = 0; i < 16; i++) |
5441 | adapter->hw_rx_no_dma_resources += | |
7ca647bd | 5442 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
94b982b2 | 5443 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4a0b9ca0 PW |
5444 | rsc_count += adapter->rx_ring[i]->rsc_count; |
5445 | rsc_flush += adapter->rx_ring[i]->rsc_flush; | |
94b982b2 MC |
5446 | } |
5447 | adapter->rsc_total_count = rsc_count; | |
5448 | adapter->rsc_total_flush = rsc_flush; | |
d51019a4 PW |
5449 | } |
5450 | ||
7ca3bc58 JB |
5451 | /* gather some stats to the adapter struct that are per queue */ |
5452 | for (i = 0; i < adapter->num_tx_queues; i++) | |
4a0b9ca0 | 5453 | restart_queue += adapter->tx_ring[i]->restart_queue; |
eb985f09 | 5454 | adapter->restart_queue = restart_queue; |
7ca3bc58 JB |
5455 | |
5456 | for (i = 0; i < adapter->num_rx_queues; i++) | |
4a0b9ca0 | 5457 | non_eop_descs += adapter->rx_ring[i]->non_eop_descs; |
eb985f09 | 5458 | adapter->non_eop_descs = non_eop_descs; |
7ca3bc58 | 5459 | |
7ca647bd | 5460 | hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
6f11eef7 AV |
5461 | for (i = 0; i < 8; i++) { |
5462 | /* for packet buffers not used, the register should read 0 */ | |
5463 | mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); | |
5464 | missed_rx += mpc; | |
7ca647bd JP |
5465 | hwstats->mpc[i] += mpc; |
5466 | total_mpc += hwstats->mpc[i]; | |
e8e26350 | 5467 | if (hw->mac.type == ixgbe_mac_82598EB) |
7ca647bd JP |
5468 | hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); |
5469 | hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); | |
5470 | hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); | |
5471 | hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); | |
5472 | hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); | |
e8e26350 | 5473 | if (hw->mac.type == ixgbe_mac_82599EB) { |
7ca647bd JP |
5474 | hwstats->pxonrxc[i] += |
5475 | IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); | |
5476 | hwstats->pxoffrxc[i] += | |
5477 | IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); | |
5478 | hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | |
e8e26350 | 5479 | } else { |
7ca647bd JP |
5480 | hwstats->pxonrxc[i] += |
5481 | IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); | |
5482 | hwstats->pxoffrxc[i] += | |
5483 | IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); | |
e8e26350 | 5484 | } |
7ca647bd JP |
5485 | hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); |
5486 | hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); | |
6f11eef7 | 5487 | } |
7ca647bd | 5488 | hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); |
6f11eef7 | 5489 | /* work around hardware counting issue */ |
7ca647bd | 5490 | hwstats->gprc -= missed_rx; |
6f11eef7 AV |
5491 | |
5492 | /* 82598 hardware only has a 32 bit counter in the high register */ | |
e8e26350 | 5493 | if (hw->mac.type == ixgbe_mac_82599EB) { |
aad71918 | 5494 | u64 tmp; |
7ca647bd | 5495 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); |
e8e9f696 JP |
5496 | tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; |
5497 | /* 4 high bits of GORC */ | |
7ca647bd JP |
5498 | hwstats->gorc += (tmp << 32); |
5499 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); | |
e8e9f696 JP |
5500 | tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; |
5501 | /* 4 high bits of GOTC */ | |
7ca647bd JP |
5502 | hwstats->gotc += (tmp << 32); |
5503 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); | |
e8e9f696 | 5504 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ |
7ca647bd JP |
5505 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); |
5506 | hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); | |
5507 | hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | |
5508 | hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | |
6d45522c | 5509 | #ifdef IXGBE_FCOE |
7ca647bd JP |
5510 | hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); |
5511 | hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | |
5512 | hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); | |
5513 | hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); | |
5514 | hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | |
5515 | hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | |
6d45522c | 5516 | #endif /* IXGBE_FCOE */ |
e8e26350 | 5517 | } else { |
7ca647bd JP |
5518 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); |
5519 | hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); | |
5520 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); | |
5521 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); | |
5522 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); | |
e8e26350 | 5523 | } |
9a799d71 | 5524 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); |
7ca647bd JP |
5525 | hwstats->bprc += bprc; |
5526 | hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); | |
e8e26350 | 5527 | if (hw->mac.type == ixgbe_mac_82598EB) |
7ca647bd JP |
5528 | hwstats->mprc -= bprc; |
5529 | hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); | |
5530 | hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); | |
5531 | hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); | |
5532 | hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); | |
5533 | hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); | |
5534 | hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); | |
5535 | hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); | |
5536 | hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); | |
6f11eef7 | 5537 | lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); |
7ca647bd | 5538 | hwstats->lxontxc += lxon; |
6f11eef7 | 5539 | lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); |
7ca647bd JP |
5540 | hwstats->lxofftxc += lxoff; |
5541 | hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); | |
5542 | hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); | |
5543 | hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); | |
6f11eef7 AV |
5544 | /* |
5545 | * 82598 errata - tx of flow control packets is included in tx counters | |
5546 | */ | |
5547 | xon_off_tot = lxon + lxoff; | |
7ca647bd JP |
5548 | hwstats->gptc -= xon_off_tot; |
5549 | hwstats->mptc -= xon_off_tot; | |
5550 | hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); | |
5551 | hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); | |
5552 | hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); | |
5553 | hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); | |
5554 | hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); | |
5555 | hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); | |
5556 | hwstats->ptc64 -= xon_off_tot; | |
5557 | hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); | |
5558 | hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); | |
5559 | hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); | |
5560 | hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); | |
5561 | hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); | |
5562 | hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); | |
9a799d71 AK |
5563 | |
5564 | /* Fill out the OS statistics structure */ | |
7ca647bd | 5565 | netdev->stats.multicast = hwstats->mprc; |
9a799d71 AK |
5566 | |
5567 | /* Rx Errors */ | |
7ca647bd | 5568 | netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; |
2d86f139 | 5569 | netdev->stats.rx_dropped = 0; |
7ca647bd JP |
5570 | netdev->stats.rx_length_errors = hwstats->rlec; |
5571 | netdev->stats.rx_crc_errors = hwstats->crcerrs; | |
2d86f139 | 5572 | netdev->stats.rx_missed_errors = total_mpc; |
9a799d71 AK |
5573 | } |
5574 | ||
5575 | /** | |
5576 | * ixgbe_watchdog - Timer Call-back | |
5577 | * @data: pointer to adapter cast into an unsigned long | |
5578 | **/ | |
5579 | static void ixgbe_watchdog(unsigned long data) | |
5580 | { | |
5581 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | |
cf8280ee | 5582 | struct ixgbe_hw *hw = &adapter->hw; |
fe49f04a AD |
5583 | u64 eics = 0; |
5584 | int i; | |
cf8280ee | 5585 | |
fe49f04a AD |
5586 | /* |
5587 | * Do the watchdog outside of interrupt context due to the lovely | |
5588 | * delays that some of the newer hardware requires | |
5589 | */ | |
22d5a71b | 5590 | |
fe49f04a AD |
5591 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
5592 | goto watchdog_short_circuit; | |
22d5a71b | 5593 | |
fe49f04a AD |
5594 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
5595 | /* | |
5596 | * for legacy and MSI interrupts don't set any bits | |
5597 | * that are enabled for EIAM, because this operation | |
5598 | * would set *both* EIMS and EICS for any bit in EIAM | |
5599 | */ | |
5600 | IXGBE_WRITE_REG(hw, IXGBE_EICS, | |
5601 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); | |
5602 | goto watchdog_reschedule; | |
5603 | } | |
5604 | ||
5605 | /* get one bit for every active tx/rx interrupt vector */ | |
5606 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | |
5607 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; | |
5608 | if (qv->rxr_count || qv->txr_count) | |
5609 | eics |= ((u64)1 << i); | |
cf8280ee | 5610 | } |
9a799d71 | 5611 | |
fe49f04a AD |
5612 | /* Cause software interrupt to ensure rx rings are cleaned */ |
5613 | ixgbe_irq_rearm_queues(adapter, eics); | |
5614 | ||
5615 | watchdog_reschedule: | |
5616 | /* Reset the timer */ | |
5617 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | |
5618 | ||
5619 | watchdog_short_circuit: | |
cf8280ee JB |
5620 | schedule_work(&adapter->watchdog_task); |
5621 | } | |
5622 | ||
e8e26350 PW |
5623 | /** |
5624 | * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber | |
5625 | * @work: pointer to work_struct containing our data | |
5626 | **/ | |
5627 | static void ixgbe_multispeed_fiber_task(struct work_struct *work) | |
5628 | { | |
5629 | struct ixgbe_adapter *adapter = container_of(work, | |
e8e9f696 JP |
5630 | struct ixgbe_adapter, |
5631 | multispeed_fiber_task); | |
e8e26350 PW |
5632 | struct ixgbe_hw *hw = &adapter->hw; |
5633 | u32 autoneg; | |
8620a103 | 5634 | bool negotiation; |
e8e26350 PW |
5635 | |
5636 | adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; | |
a1f25324 MC |
5637 | autoneg = hw->phy.autoneg_advertised; |
5638 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | |
8620a103 | 5639 | hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); |
1097cd17 | 5640 | hw->mac.autotry_restart = false; |
8620a103 MC |
5641 | if (hw->mac.ops.setup_link) |
5642 | hw->mac.ops.setup_link(hw, autoneg, negotiation, true); | |
e8e26350 PW |
5643 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
5644 | adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; | |
5645 | } | |
5646 | ||
5647 | /** | |
5648 | * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module | |
5649 | * @work: pointer to work_struct containing our data | |
5650 | **/ | |
5651 | static void ixgbe_sfp_config_module_task(struct work_struct *work) | |
5652 | { | |
5653 | struct ixgbe_adapter *adapter = container_of(work, | |
e8e9f696 JP |
5654 | struct ixgbe_adapter, |
5655 | sfp_config_module_task); | |
e8e26350 PW |
5656 | struct ixgbe_hw *hw = &adapter->hw; |
5657 | u32 err; | |
5658 | ||
5659 | adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; | |
63d6e1d8 DS |
5660 | |
5661 | /* Time for electrical oscillations to settle down */ | |
5662 | msleep(100); | |
e8e26350 | 5663 | err = hw->phy.ops.identify_sfp(hw); |
63d6e1d8 | 5664 | |
e8e26350 | 5665 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
849c4542 ET |
5666 | e_dev_err("failed to initialize because an unsupported SFP+ " |
5667 | "module type was detected.\n"); | |
5668 | e_dev_err("Reload the driver after installing a supported " | |
5669 | "module.\n"); | |
63d6e1d8 | 5670 | unregister_netdev(adapter->netdev); |
e8e26350 PW |
5671 | return; |
5672 | } | |
5673 | hw->mac.ops.setup_sfp(hw); | |
5674 | ||
8d1c3c07 | 5675 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
e8e26350 PW |
5676 | /* This will also work for DA Twinax connections */ |
5677 | schedule_work(&adapter->multispeed_fiber_task); | |
5678 | adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; | |
5679 | } | |
5680 | ||
c4cf55e5 PWJ |
5681 | /** |
5682 | * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table | |
5683 | * @work: pointer to work_struct containing our data | |
5684 | **/ | |
5685 | static void ixgbe_fdir_reinit_task(struct work_struct *work) | |
5686 | { | |
5687 | struct ixgbe_adapter *adapter = container_of(work, | |
e8e9f696 JP |
5688 | struct ixgbe_adapter, |
5689 | fdir_reinit_task); | |
c4cf55e5 PWJ |
5690 | struct ixgbe_hw *hw = &adapter->hw; |
5691 | int i; | |
5692 | ||
5693 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | |
5694 | for (i = 0; i < adapter->num_tx_queues; i++) | |
5695 | set_bit(__IXGBE_FDIR_INIT_DONE, | |
e8e9f696 | 5696 | &(adapter->tx_ring[i]->reinit_state)); |
c4cf55e5 | 5697 | } else { |
396e799c | 5698 | e_err(probe, "failed to finish FDIR re-initialization, " |
849c4542 | 5699 | "ignored adding FDIR ATR filters\n"); |
c4cf55e5 PWJ |
5700 | } |
5701 | /* Done FDIR Re-initialization, enable transmits */ | |
5702 | netif_tx_start_all_queues(adapter->netdev); | |
5703 | } | |
5704 | ||
10eec955 JF |
5705 | static DEFINE_MUTEX(ixgbe_watchdog_lock); |
5706 | ||
cf8280ee | 5707 | /** |
69888674 AD |
5708 | * ixgbe_watchdog_task - worker thread to bring link up |
5709 | * @work: pointer to work_struct containing our data | |
cf8280ee JB |
5710 | **/ |
5711 | static void ixgbe_watchdog_task(struct work_struct *work) | |
5712 | { | |
5713 | struct ixgbe_adapter *adapter = container_of(work, | |
e8e9f696 JP |
5714 | struct ixgbe_adapter, |
5715 | watchdog_task); | |
cf8280ee JB |
5716 | struct net_device *netdev = adapter->netdev; |
5717 | struct ixgbe_hw *hw = &adapter->hw; | |
10eec955 JF |
5718 | u32 link_speed; |
5719 | bool link_up; | |
bc59fcda NS |
5720 | int i; |
5721 | struct ixgbe_ring *tx_ring; | |
5722 | int some_tx_pending = 0; | |
cf8280ee | 5723 | |
10eec955 JF |
5724 | mutex_lock(&ixgbe_watchdog_lock); |
5725 | ||
5726 | link_up = adapter->link_up; | |
5727 | link_speed = adapter->link_speed; | |
cf8280ee JB |
5728 | |
5729 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | |
5730 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | |
264857b8 PWJ |
5731 | if (link_up) { |
5732 | #ifdef CONFIG_DCB | |
5733 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | |
5734 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) | |
620fa036 | 5735 | hw->mac.ops.fc_enable(hw, i); |
264857b8 | 5736 | } else { |
620fa036 | 5737 | hw->mac.ops.fc_enable(hw, 0); |
264857b8 PWJ |
5738 | } |
5739 | #else | |
620fa036 | 5740 | hw->mac.ops.fc_enable(hw, 0); |
264857b8 PWJ |
5741 | #endif |
5742 | } | |
5743 | ||
cf8280ee JB |
5744 | if (link_up || |
5745 | time_after(jiffies, (adapter->link_check_timeout + | |
e8e9f696 | 5746 | IXGBE_TRY_LINK_TIMEOUT))) { |
cf8280ee | 5747 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; |
264857b8 | 5748 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); |
cf8280ee JB |
5749 | } |
5750 | adapter->link_up = link_up; | |
5751 | adapter->link_speed = link_speed; | |
5752 | } | |
9a799d71 AK |
5753 | |
5754 | if (link_up) { | |
5755 | if (!netif_carrier_ok(netdev)) { | |
e8e26350 PW |
5756 | bool flow_rx, flow_tx; |
5757 | ||
5758 | if (hw->mac.type == ixgbe_mac_82599EB) { | |
5759 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | |
5760 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | |
078788b6 PWJ |
5761 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); |
5762 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); | |
e8e26350 PW |
5763 | } else { |
5764 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
5765 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); | |
078788b6 PWJ |
5766 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); |
5767 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); | |
e8e26350 PW |
5768 | } |
5769 | ||
396e799c | 5770 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", |
a46e534b | 5771 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? |
849c4542 ET |
5772 | "10 Gbps" : |
5773 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? | |
5774 | "1 Gbps" : "unknown speed")), | |
e8e26350 | 5775 | ((flow_rx && flow_tx) ? "RX/TX" : |
849c4542 ET |
5776 | (flow_rx ? "RX" : |
5777 | (flow_tx ? "TX" : "None")))); | |
9a799d71 AK |
5778 | |
5779 | netif_carrier_on(netdev); | |
9a799d71 AK |
5780 | } else { |
5781 | /* Force detection of hung controller */ | |
5782 | adapter->detect_tx_hung = true; | |
5783 | } | |
5784 | } else { | |
cf8280ee JB |
5785 | adapter->link_up = false; |
5786 | adapter->link_speed = 0; | |
9a799d71 | 5787 | if (netif_carrier_ok(netdev)) { |
396e799c | 5788 | e_info(drv, "NIC Link is Down\n"); |
9a799d71 | 5789 | netif_carrier_off(netdev); |
9a799d71 AK |
5790 | } |
5791 | } | |
5792 | ||
bc59fcda NS |
5793 | if (!netif_carrier_ok(netdev)) { |
5794 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
4a0b9ca0 | 5795 | tx_ring = adapter->tx_ring[i]; |
bc59fcda NS |
5796 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { |
5797 | some_tx_pending = 1; | |
5798 | break; | |
5799 | } | |
5800 | } | |
5801 | ||
5802 | if (some_tx_pending) { | |
5803 | /* We've lost link, so the controller stops DMA, | |
5804 | * but we've got queued Tx work that's never going | |
5805 | * to get done, so reset controller to flush Tx. | |
5806 | * (Do the reset outside of interrupt context). | |
5807 | */ | |
5808 | schedule_work(&adapter->reset_task); | |
5809 | } | |
5810 | } | |
5811 | ||
9a799d71 | 5812 | ixgbe_update_stats(adapter); |
10eec955 | 5813 | mutex_unlock(&ixgbe_watchdog_lock); |
9a799d71 AK |
5814 | } |
5815 | ||
9a799d71 | 5816 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
e8e9f696 JP |
5817 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, |
5818 | u32 tx_flags, u8 *hdr_len) | |
9a799d71 AK |
5819 | { |
5820 | struct ixgbe_adv_tx_context_desc *context_desc; | |
5821 | unsigned int i; | |
5822 | int err; | |
5823 | struct ixgbe_tx_buffer *tx_buffer_info; | |
9f8cdf4f JB |
5824 | u32 vlan_macip_lens = 0, type_tucmd_mlhl; |
5825 | u32 mss_l4len_idx, l4len; | |
9a799d71 AK |
5826 | |
5827 | if (skb_is_gso(skb)) { | |
5828 | if (skb_header_cloned(skb)) { | |
5829 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
5830 | if (err) | |
5831 | return err; | |
5832 | } | |
5833 | l4len = tcp_hdrlen(skb); | |
5834 | *hdr_len += l4len; | |
5835 | ||
8327d000 | 5836 | if (skb->protocol == htons(ETH_P_IP)) { |
9a799d71 AK |
5837 | struct iphdr *iph = ip_hdr(skb); |
5838 | iph->tot_len = 0; | |
5839 | iph->check = 0; | |
5840 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | |
e8e9f696 JP |
5841 | iph->daddr, 0, |
5842 | IPPROTO_TCP, | |
5843 | 0); | |
8e1e8a47 | 5844 | } else if (skb_is_gso_v6(skb)) { |
9a799d71 AK |
5845 | ipv6_hdr(skb)->payload_len = 0; |
5846 | tcp_hdr(skb)->check = | |
5847 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
e8e9f696 JP |
5848 | &ipv6_hdr(skb)->daddr, |
5849 | 0, IPPROTO_TCP, 0); | |
9a799d71 AK |
5850 | } |
5851 | ||
5852 | i = tx_ring->next_to_use; | |
5853 | ||
5854 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | |
31f05a2d | 5855 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); |
9a799d71 AK |
5856 | |
5857 | /* VLAN MACLEN IPLEN */ | |
5858 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | |
5859 | vlan_macip_lens |= | |
5860 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | |
5861 | vlan_macip_lens |= ((skb_network_offset(skb)) << | |
e8e9f696 | 5862 | IXGBE_ADVTXD_MACLEN_SHIFT); |
9a799d71 AK |
5863 | *hdr_len += skb_network_offset(skb); |
5864 | vlan_macip_lens |= | |
5865 | (skb_transport_header(skb) - skb_network_header(skb)); | |
5866 | *hdr_len += | |
5867 | (skb_transport_header(skb) - skb_network_header(skb)); | |
5868 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | |
5869 | context_desc->seqnum_seed = 0; | |
5870 | ||
5871 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | |
9f8cdf4f | 5872 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | |
e8e9f696 | 5873 | IXGBE_ADVTXD_DTYP_CTXT); |
9a799d71 | 5874 | |
8327d000 | 5875 | if (skb->protocol == htons(ETH_P_IP)) |
9a799d71 AK |
5876 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
5877 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | |
5878 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | |
5879 | ||
5880 | /* MSS L4LEN IDX */ | |
9f8cdf4f | 5881 | mss_l4len_idx = |
9a799d71 AK |
5882 | (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); |
5883 | mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); | |
4eeae6fd PW |
5884 | /* use index 1 for TSO */ |
5885 | mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | |
9a799d71 AK |
5886 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
5887 | ||
5888 | tx_buffer_info->time_stamp = jiffies; | |
5889 | tx_buffer_info->next_to_watch = i; | |
5890 | ||
5891 | i++; | |
5892 | if (i == tx_ring->count) | |
5893 | i = 0; | |
5894 | tx_ring->next_to_use = i; | |
5895 | ||
5896 | return true; | |
5897 | } | |
5898 | return false; | |
5899 | } | |
5900 | ||
7ca647bd JP |
5901 | static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) |
5902 | { | |
5903 | u32 rtn = 0; | |
5904 | __be16 protocol; | |
5905 | ||
5906 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) | |
5907 | protocol = ((const struct vlan_ethhdr *)skb->data)-> | |
5908 | h_vlan_encapsulated_proto; | |
5909 | else | |
5910 | protocol = skb->protocol; | |
5911 | ||
5912 | switch (protocol) { | |
5913 | case cpu_to_be16(ETH_P_IP): | |
5914 | rtn |= IXGBE_ADVTXD_TUCMD_IPV4; | |
5915 | switch (ip_hdr(skb)->protocol) { | |
5916 | case IPPROTO_TCP: | |
5917 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | |
5918 | break; | |
5919 | case IPPROTO_SCTP: | |
5920 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; | |
5921 | break; | |
5922 | } | |
5923 | break; | |
5924 | case cpu_to_be16(ETH_P_IPV6): | |
5925 | /* XXX what about other V6 headers?? */ | |
5926 | switch (ipv6_hdr(skb)->nexthdr) { | |
5927 | case IPPROTO_TCP: | |
5928 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | |
5929 | break; | |
5930 | case IPPROTO_SCTP: | |
5931 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; | |
5932 | break; | |
5933 | } | |
5934 | break; | |
5935 | default: | |
5936 | if (unlikely(net_ratelimit())) | |
5937 | e_warn(probe, "partial checksum but proto=%x!\n", | |
5938 | skb->protocol); | |
5939 | break; | |
5940 | } | |
5941 | ||
5942 | return rtn; | |
5943 | } | |
5944 | ||
9a799d71 | 5945 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, |
e8e9f696 JP |
5946 | struct ixgbe_ring *tx_ring, |
5947 | struct sk_buff *skb, u32 tx_flags) | |
9a799d71 AK |
5948 | { |
5949 | struct ixgbe_adv_tx_context_desc *context_desc; | |
5950 | unsigned int i; | |
5951 | struct ixgbe_tx_buffer *tx_buffer_info; | |
5952 | u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; | |
5953 | ||
5954 | if (skb->ip_summed == CHECKSUM_PARTIAL || | |
5955 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) { | |
5956 | i = tx_ring->next_to_use; | |
5957 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | |
31f05a2d | 5958 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); |
9a799d71 AK |
5959 | |
5960 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | |
5961 | vlan_macip_lens |= | |
5962 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | |
5963 | vlan_macip_lens |= (skb_network_offset(skb) << | |
e8e9f696 | 5964 | IXGBE_ADVTXD_MACLEN_SHIFT); |
9a799d71 AK |
5965 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5966 | vlan_macip_lens |= (skb_transport_header(skb) - | |
e8e9f696 | 5967 | skb_network_header(skb)); |
9a799d71 AK |
5968 | |
5969 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | |
5970 | context_desc->seqnum_seed = 0; | |
5971 | ||
5972 | type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | | |
e8e9f696 | 5973 | IXGBE_ADVTXD_DTYP_CTXT); |
9a799d71 | 5974 | |
7ca647bd JP |
5975 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5976 | type_tucmd_mlhl |= ixgbe_psum(adapter, skb); | |
9a799d71 AK |
5977 | |
5978 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | |
4eeae6fd | 5979 | /* use index zero for tx checksum offload */ |
9a799d71 AK |
5980 | context_desc->mss_l4len_idx = 0; |
5981 | ||
5982 | tx_buffer_info->time_stamp = jiffies; | |
5983 | tx_buffer_info->next_to_watch = i; | |
9f8cdf4f | 5984 | |
9a799d71 AK |
5985 | i++; |
5986 | if (i == tx_ring->count) | |
5987 | i = 0; | |
5988 | tx_ring->next_to_use = i; | |
5989 | ||
5990 | return true; | |
5991 | } | |
9f8cdf4f | 5992 | |
9a799d71 AK |
5993 | return false; |
5994 | } | |
5995 | ||
5996 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |
e8e9f696 JP |
5997 | struct ixgbe_ring *tx_ring, |
5998 | struct sk_buff *skb, u32 tx_flags, | |
5999 | unsigned int first) | |
9a799d71 | 6000 | { |
e5a43549 | 6001 | struct pci_dev *pdev = adapter->pdev; |
9a799d71 | 6002 | struct ixgbe_tx_buffer *tx_buffer_info; |
eacd73f7 YZ |
6003 | unsigned int len; |
6004 | unsigned int total = skb->len; | |
9a799d71 AK |
6005 | unsigned int offset = 0, size, count = 0, i; |
6006 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | |
6007 | unsigned int f; | |
9a799d71 AK |
6008 | |
6009 | i = tx_ring->next_to_use; | |
6010 | ||
eacd73f7 YZ |
6011 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) |
6012 | /* excluding fcoe_crc_eof for FCoE */ | |
6013 | total -= sizeof(struct fcoe_crc_eof); | |
6014 | ||
6015 | len = min(skb_headlen(skb), total); | |
9a799d71 AK |
6016 | while (len) { |
6017 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | |
6018 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | |
6019 | ||
6020 | tx_buffer_info->length = size; | |
e5a43549 | 6021 | tx_buffer_info->mapped_as_page = false; |
1b507730 | 6022 | tx_buffer_info->dma = dma_map_single(&pdev->dev, |
e5a43549 | 6023 | skb->data + offset, |
1b507730 NN |
6024 | size, DMA_TO_DEVICE); |
6025 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) | |
e5a43549 | 6026 | goto dma_error; |
9a799d71 AK |
6027 | tx_buffer_info->time_stamp = jiffies; |
6028 | tx_buffer_info->next_to_watch = i; | |
6029 | ||
6030 | len -= size; | |
eacd73f7 | 6031 | total -= size; |
9a799d71 AK |
6032 | offset += size; |
6033 | count++; | |
44df32c5 AD |
6034 | |
6035 | if (len) { | |
6036 | i++; | |
6037 | if (i == tx_ring->count) | |
6038 | i = 0; | |
6039 | } | |
9a799d71 AK |
6040 | } |
6041 | ||
6042 | for (f = 0; f < nr_frags; f++) { | |
6043 | struct skb_frag_struct *frag; | |
6044 | ||
6045 | frag = &skb_shinfo(skb)->frags[f]; | |
eacd73f7 | 6046 | len = min((unsigned int)frag->size, total); |
e5a43549 | 6047 | offset = frag->page_offset; |
9a799d71 AK |
6048 | |
6049 | while (len) { | |
44df32c5 AD |
6050 | i++; |
6051 | if (i == tx_ring->count) | |
6052 | i = 0; | |
6053 | ||
9a799d71 AK |
6054 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
6055 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | |
6056 | ||
6057 | tx_buffer_info->length = size; | |
1b507730 | 6058 | tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, |
e5a43549 AD |
6059 | frag->page, |
6060 | offset, size, | |
1b507730 | 6061 | DMA_TO_DEVICE); |
e5a43549 | 6062 | tx_buffer_info->mapped_as_page = true; |
1b507730 | 6063 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) |
e5a43549 | 6064 | goto dma_error; |
9a799d71 AK |
6065 | tx_buffer_info->time_stamp = jiffies; |
6066 | tx_buffer_info->next_to_watch = i; | |
6067 | ||
6068 | len -= size; | |
eacd73f7 | 6069 | total -= size; |
9a799d71 AK |
6070 | offset += size; |
6071 | count++; | |
9a799d71 | 6072 | } |
eacd73f7 YZ |
6073 | if (total == 0) |
6074 | break; | |
9a799d71 | 6075 | } |
44df32c5 | 6076 | |
9a799d71 AK |
6077 | tx_ring->tx_buffer_info[i].skb = skb; |
6078 | tx_ring->tx_buffer_info[first].next_to_watch = i; | |
6079 | ||
e5a43549 AD |
6080 | return count; |
6081 | ||
6082 | dma_error: | |
849c4542 | 6083 | e_dev_err("TX DMA map failed\n"); |
e5a43549 AD |
6084 | |
6085 | /* clear timestamp and dma mappings for failed tx_buffer_info map */ | |
6086 | tx_buffer_info->dma = 0; | |
6087 | tx_buffer_info->time_stamp = 0; | |
6088 | tx_buffer_info->next_to_watch = 0; | |
c1fa347f RK |
6089 | if (count) |
6090 | count--; | |
e5a43549 AD |
6091 | |
6092 | /* clear timestamp and dma mappings for remaining portion of packet */ | |
c1fa347f | 6093 | while (count--) { |
e8e9f696 | 6094 | if (i == 0) |
e5a43549 | 6095 | i += tx_ring->count; |
c1fa347f | 6096 | i--; |
e5a43549 AD |
6097 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
6098 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | |
6099 | } | |
6100 | ||
e44d38e1 | 6101 | return 0; |
9a799d71 AK |
6102 | } |
6103 | ||
6104 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |
e8e9f696 JP |
6105 | struct ixgbe_ring *tx_ring, |
6106 | int tx_flags, int count, u32 paylen, u8 hdr_len) | |
9a799d71 AK |
6107 | { |
6108 | union ixgbe_adv_tx_desc *tx_desc = NULL; | |
6109 | struct ixgbe_tx_buffer *tx_buffer_info; | |
6110 | u32 olinfo_status = 0, cmd_type_len = 0; | |
6111 | unsigned int i; | |
6112 | u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; | |
6113 | ||
6114 | cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; | |
6115 | ||
6116 | cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; | |
6117 | ||
6118 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | |
6119 | cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; | |
6120 | ||
6121 | if (tx_flags & IXGBE_TX_FLAGS_TSO) { | |
6122 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | |
6123 | ||
6124 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | |
e8e9f696 | 6125 | IXGBE_ADVTXD_POPTS_SHIFT; |
9a799d71 | 6126 | |
4eeae6fd PW |
6127 | /* use index 1 context for tso */ |
6128 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | |
9a799d71 AK |
6129 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
6130 | olinfo_status |= IXGBE_TXD_POPTS_IXSM << | |
e8e9f696 | 6131 | IXGBE_ADVTXD_POPTS_SHIFT; |
9a799d71 AK |
6132 | |
6133 | } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) | |
6134 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | |
e8e9f696 | 6135 | IXGBE_ADVTXD_POPTS_SHIFT; |
9a799d71 | 6136 | |
eacd73f7 YZ |
6137 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
6138 | olinfo_status |= IXGBE_ADVTXD_CC; | |
6139 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | |
6140 | if (tx_flags & IXGBE_TX_FLAGS_FSO) | |
6141 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | |
6142 | } | |
6143 | ||
9a799d71 AK |
6144 | olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); |
6145 | ||
6146 | i = tx_ring->next_to_use; | |
6147 | while (count--) { | |
6148 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | |
31f05a2d | 6149 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
9a799d71 AK |
6150 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); |
6151 | tx_desc->read.cmd_type_len = | |
e8e9f696 | 6152 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); |
9a799d71 | 6153 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
9a799d71 AK |
6154 | i++; |
6155 | if (i == tx_ring->count) | |
6156 | i = 0; | |
6157 | } | |
6158 | ||
6159 | tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); | |
6160 | ||
6161 | /* | |
6162 | * Force memory writes to complete before letting h/w | |
6163 | * know there are new descriptors to fetch. (Only | |
6164 | * applicable for weak-ordered memory model archs, | |
6165 | * such as IA-64). | |
6166 | */ | |
6167 | wmb(); | |
6168 | ||
6169 | tx_ring->next_to_use = i; | |
6170 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | |
6171 | } | |
6172 | ||
c4cf55e5 | 6173 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, |
e8e9f696 | 6174 | int queue, u32 tx_flags) |
c4cf55e5 | 6175 | { |
c4cf55e5 PWJ |
6176 | struct ixgbe_atr_input atr_input; |
6177 | struct tcphdr *th; | |
c4cf55e5 PWJ |
6178 | struct iphdr *iph = ip_hdr(skb); |
6179 | struct ethhdr *eth = (struct ethhdr *)skb->data; | |
6180 | u16 vlan_id, src_port, dst_port, flex_bytes; | |
6181 | u32 src_ipv4_addr, dst_ipv4_addr; | |
6182 | u8 l4type = 0; | |
6183 | ||
d3ead241 GG |
6184 | /* Right now, we support IPv4 only */ |
6185 | if (skb->protocol != htons(ETH_P_IP)) | |
6186 | return; | |
c4cf55e5 PWJ |
6187 | /* check if we're UDP or TCP */ |
6188 | if (iph->protocol == IPPROTO_TCP) { | |
6189 | th = tcp_hdr(skb); | |
6190 | src_port = th->source; | |
6191 | dst_port = th->dest; | |
6192 | l4type |= IXGBE_ATR_L4TYPE_TCP; | |
6193 | /* l4type IPv4 type is 0, no need to assign */ | |
c4cf55e5 PWJ |
6194 | } else { |
6195 | /* Unsupported L4 header, just bail here */ | |
6196 | return; | |
6197 | } | |
6198 | ||
6199 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); | |
6200 | ||
6201 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> | |
e8e9f696 | 6202 | IXGBE_TX_FLAGS_VLAN_SHIFT; |
c4cf55e5 PWJ |
6203 | src_ipv4_addr = iph->saddr; |
6204 | dst_ipv4_addr = iph->daddr; | |
6205 | flex_bytes = eth->h_proto; | |
6206 | ||
6207 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); | |
6208 | ixgbe_atr_set_src_port_82599(&atr_input, dst_port); | |
6209 | ixgbe_atr_set_dst_port_82599(&atr_input, src_port); | |
6210 | ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); | |
6211 | ixgbe_atr_set_l4type_82599(&atr_input, l4type); | |
6212 | /* src and dst are inverted, think how the receiver sees them */ | |
6213 | ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); | |
6214 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); | |
6215 | ||
6216 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | |
6217 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); | |
6218 | } | |
6219 | ||
e092be60 | 6220 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, |
e8e9f696 | 6221 | struct ixgbe_ring *tx_ring, int size) |
e092be60 | 6222 | { |
30eba97a | 6223 | netif_stop_subqueue(netdev, tx_ring->queue_index); |
e092be60 AV |
6224 | /* Herbert's original patch had: |
6225 | * smp_mb__after_netif_stop_queue(); | |
6226 | * but since that doesn't exist yet, just open code it. */ | |
6227 | smp_mb(); | |
6228 | ||
6229 | /* We need to check again in a case another CPU has just | |
6230 | * made room available. */ | |
6231 | if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) | |
6232 | return -EBUSY; | |
6233 | ||
6234 | /* A reprieve! - use start_queue because it doesn't call schedule */ | |
af72166f | 6235 | netif_start_subqueue(netdev, tx_ring->queue_index); |
7ca3bc58 | 6236 | ++tx_ring->restart_queue; |
e092be60 AV |
6237 | return 0; |
6238 | } | |
6239 | ||
6240 | static int ixgbe_maybe_stop_tx(struct net_device *netdev, | |
e8e9f696 | 6241 | struct ixgbe_ring *tx_ring, int size) |
e092be60 AV |
6242 | { |
6243 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) | |
6244 | return 0; | |
6245 | return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); | |
6246 | } | |
6247 | ||
09a3b1f8 SH |
6248 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) |
6249 | { | |
6250 | struct ixgbe_adapter *adapter = netdev_priv(dev); | |
5f715823 | 6251 | int txq = smp_processor_id(); |
09a3b1f8 | 6252 | |
56075a98 JF |
6253 | #ifdef IXGBE_FCOE |
6254 | if ((skb->protocol == htons(ETH_P_FCOE)) || | |
6255 | (skb->protocol == htons(ETH_P_FIP))) { | |
6256 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | |
6257 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | |
6258 | txq += adapter->ring_feature[RING_F_FCOE].mask; | |
6259 | return txq; | |
4bc091d8 | 6260 | #ifdef CONFIG_IXGBE_DCB |
56075a98 JF |
6261 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
6262 | txq = adapter->fcoe.up; | |
6263 | return txq; | |
4bc091d8 | 6264 | #endif |
56075a98 JF |
6265 | } |
6266 | } | |
6267 | #endif | |
6268 | ||
fdd3d631 KK |
6269 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
6270 | while (unlikely(txq >= dev->real_num_tx_queues)) | |
6271 | txq -= dev->real_num_tx_queues; | |
5f715823 | 6272 | return txq; |
fdd3d631 | 6273 | } |
c4cf55e5 | 6274 | |
2ea186ae JF |
6275 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
6276 | if (skb->priority == TC_PRIO_CONTROL) | |
6277 | txq = adapter->ring_feature[RING_F_DCB].indices-1; | |
6278 | else | |
6279 | txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) | |
6280 | >> 13; | |
6281 | return txq; | |
6282 | } | |
09a3b1f8 SH |
6283 | |
6284 | return skb_tx_hash(dev, skb); | |
6285 | } | |
6286 | ||
84418e3b AD |
6287 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, |
6288 | struct ixgbe_adapter *adapter, | |
6289 | struct ixgbe_ring *tx_ring) | |
9a799d71 | 6290 | { |
60d51134 | 6291 | struct netdev_queue *txq; |
9a799d71 AK |
6292 | unsigned int first; |
6293 | unsigned int tx_flags = 0; | |
30eba97a | 6294 | u8 hdr_len = 0; |
5f715823 | 6295 | int tso; |
9a799d71 AK |
6296 | int count = 0; |
6297 | unsigned int f; | |
9f8cdf4f | 6298 | |
eab6d18d | 6299 | if (vlan_tx_tag_present(skb)) { |
9f8cdf4f | 6300 | tx_flags |= vlan_tx_tag_get(skb); |
2f90b865 AD |
6301 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
6302 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; | |
5f715823 | 6303 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); |
2f90b865 AD |
6304 | } |
6305 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | |
6306 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | |
33c66bd1 JF |
6307 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && |
6308 | skb->priority != TC_PRIO_CONTROL) { | |
2ea186ae JF |
6309 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); |
6310 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | |
6311 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | |
9a799d71 | 6312 | } |
eacd73f7 | 6313 | |
09ad1cc0 | 6314 | #ifdef IXGBE_FCOE |
56075a98 JF |
6315 | /* for FCoE with DCB, we force the priority to what |
6316 | * was specified by the switch */ | |
6317 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && | |
6318 | (skb->protocol == htons(ETH_P_FCOE) || | |
6319 | skb->protocol == htons(ETH_P_FIP))) { | |
4bc091d8 JF |
6320 | #ifdef CONFIG_IXGBE_DCB |
6321 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | |
6322 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK | |
6323 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | |
6324 | tx_flags |= ((adapter->fcoe.up << 13) | |
6325 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | |
6326 | } | |
6327 | #endif | |
ca77cd59 RL |
6328 | /* flag for FCoE offloads */ |
6329 | if (skb->protocol == htons(ETH_P_FCOE)) | |
6330 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | |
09ad1cc0 | 6331 | } |
ca77cd59 RL |
6332 | #endif |
6333 | ||
eacd73f7 | 6334 | /* four things can cause us to need a context descriptor */ |
9f8cdf4f JB |
6335 | if (skb_is_gso(skb) || |
6336 | (skb->ip_summed == CHECKSUM_PARTIAL) || | |
eacd73f7 YZ |
6337 | (tx_flags & IXGBE_TX_FLAGS_VLAN) || |
6338 | (tx_flags & IXGBE_TX_FLAGS_FCOE)) | |
9a799d71 AK |
6339 | count++; |
6340 | ||
9f8cdf4f JB |
6341 | count += TXD_USE_COUNT(skb_headlen(skb)); |
6342 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | |
9a799d71 AK |
6343 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
6344 | ||
e092be60 | 6345 | if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { |
9a799d71 | 6346 | adapter->tx_busy++; |
9a799d71 AK |
6347 | return NETDEV_TX_BUSY; |
6348 | } | |
9a799d71 | 6349 | |
9a799d71 | 6350 | first = tx_ring->next_to_use; |
eacd73f7 YZ |
6351 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
6352 | #ifdef IXGBE_FCOE | |
6353 | /* setup tx offload for FCoE */ | |
6354 | tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); | |
6355 | if (tso < 0) { | |
6356 | dev_kfree_skb_any(skb); | |
6357 | return NETDEV_TX_OK; | |
6358 | } | |
6359 | if (tso) | |
6360 | tx_flags |= IXGBE_TX_FLAGS_FSO; | |
6361 | #endif /* IXGBE_FCOE */ | |
6362 | } else { | |
6363 | if (skb->protocol == htons(ETH_P_IP)) | |
6364 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | |
6365 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); | |
6366 | if (tso < 0) { | |
6367 | dev_kfree_skb_any(skb); | |
6368 | return NETDEV_TX_OK; | |
6369 | } | |
9a799d71 | 6370 | |
eacd73f7 YZ |
6371 | if (tso) |
6372 | tx_flags |= IXGBE_TX_FLAGS_TSO; | |
6373 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && | |
6374 | (skb->ip_summed == CHECKSUM_PARTIAL)) | |
6375 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | |
6376 | } | |
9a799d71 | 6377 | |
eacd73f7 | 6378 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); |
44df32c5 | 6379 | if (count) { |
c4cf55e5 PWJ |
6380 | /* add the ATR filter if ATR is on */ |
6381 | if (tx_ring->atr_sample_rate) { | |
6382 | ++tx_ring->atr_count; | |
6383 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && | |
e8e9f696 JP |
6384 | test_bit(__IXGBE_FDIR_INIT_DONE, |
6385 | &tx_ring->reinit_state)) { | |
c4cf55e5 | 6386 | ixgbe_atr(adapter, skb, tx_ring->queue_index, |
e8e9f696 | 6387 | tx_flags); |
c4cf55e5 PWJ |
6388 | tx_ring->atr_count = 0; |
6389 | } | |
6390 | } | |
60d51134 ED |
6391 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); |
6392 | txq->tx_bytes += skb->len; | |
6393 | txq->tx_packets++; | |
44df32c5 | 6394 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, |
e8e9f696 | 6395 | hdr_len); |
44df32c5 | 6396 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); |
9a799d71 | 6397 | |
44df32c5 AD |
6398 | } else { |
6399 | dev_kfree_skb_any(skb); | |
6400 | tx_ring->tx_buffer_info[first].time_stamp = 0; | |
6401 | tx_ring->next_to_use = first; | |
6402 | } | |
9a799d71 AK |
6403 | |
6404 | return NETDEV_TX_OK; | |
6405 | } | |
6406 | ||
84418e3b AD |
6407 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
6408 | { | |
6409 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
6410 | struct ixgbe_ring *tx_ring; | |
6411 | ||
6412 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | |
6413 | return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring); | |
6414 | } | |
6415 | ||
9a799d71 AK |
6416 | /** |
6417 | * ixgbe_set_mac - Change the Ethernet Address of the NIC | |
6418 | * @netdev: network interface device structure | |
6419 | * @p: pointer to an address structure | |
6420 | * | |
6421 | * Returns 0 on success, negative on failure | |
6422 | **/ | |
6423 | static int ixgbe_set_mac(struct net_device *netdev, void *p) | |
6424 | { | |
6425 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
b4617240 | 6426 | struct ixgbe_hw *hw = &adapter->hw; |
9a799d71 AK |
6427 | struct sockaddr *addr = p; |
6428 | ||
6429 | if (!is_valid_ether_addr(addr->sa_data)) | |
6430 | return -EADDRNOTAVAIL; | |
6431 | ||
6432 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
b4617240 | 6433 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
9a799d71 | 6434 | |
1cdd1ec8 GR |
6435 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
6436 | IXGBE_RAH_AV); | |
9a799d71 AK |
6437 | |
6438 | return 0; | |
6439 | } | |
6440 | ||
6b73e10d BH |
6441 | static int |
6442 | ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) | |
6443 | { | |
6444 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
6445 | struct ixgbe_hw *hw = &adapter->hw; | |
6446 | u16 value; | |
6447 | int rc; | |
6448 | ||
6449 | if (prtad != hw->phy.mdio.prtad) | |
6450 | return -EINVAL; | |
6451 | rc = hw->phy.ops.read_reg(hw, addr, devad, &value); | |
6452 | if (!rc) | |
6453 | rc = value; | |
6454 | return rc; | |
6455 | } | |
6456 | ||
6457 | static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, | |
6458 | u16 addr, u16 value) | |
6459 | { | |
6460 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
6461 | struct ixgbe_hw *hw = &adapter->hw; | |
6462 | ||
6463 | if (prtad != hw->phy.mdio.prtad) | |
6464 | return -EINVAL; | |
6465 | return hw->phy.ops.write_reg(hw, addr, devad, value); | |
6466 | } | |
6467 | ||
6468 | static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) | |
6469 | { | |
6470 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
6471 | ||
6472 | return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); | |
6473 | } | |
6474 | ||
0365e6e4 PW |
6475 | /** |
6476 | * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding | |
31278e71 | 6477 | * netdev->dev_addrs |
0365e6e4 PW |
6478 | * @netdev: network interface device structure |
6479 | * | |
6480 | * Returns non-zero on failure | |
6481 | **/ | |
6482 | static int ixgbe_add_sanmac_netdev(struct net_device *dev) | |
6483 | { | |
6484 | int err = 0; | |
6485 | struct ixgbe_adapter *adapter = netdev_priv(dev); | |
6486 | struct ixgbe_mac_info *mac = &adapter->hw.mac; | |
6487 | ||
6488 | if (is_valid_ether_addr(mac->san_addr)) { | |
6489 | rtnl_lock(); | |
6490 | err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); | |
6491 | rtnl_unlock(); | |
6492 | } | |
6493 | return err; | |
6494 | } | |
6495 | ||
6496 | /** | |
6497 | * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding | |
31278e71 | 6498 | * netdev->dev_addrs |
0365e6e4 PW |
6499 | * @netdev: network interface device structure |
6500 | * | |
6501 | * Returns non-zero on failure | |
6502 | **/ | |
6503 | static int ixgbe_del_sanmac_netdev(struct net_device *dev) | |
6504 | { | |
6505 | int err = 0; | |
6506 | struct ixgbe_adapter *adapter = netdev_priv(dev); | |
6507 | struct ixgbe_mac_info *mac = &adapter->hw.mac; | |
6508 | ||
6509 | if (is_valid_ether_addr(mac->san_addr)) { | |
6510 | rtnl_lock(); | |
6511 | err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); | |
6512 | rtnl_unlock(); | |
6513 | } | |
6514 | return err; | |
6515 | } | |
6516 | ||
9a799d71 AK |
6517 | #ifdef CONFIG_NET_POLL_CONTROLLER |
6518 | /* | |
6519 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
6520 | * without having to re-enable interrupts. It's not called while | |
6521 | * the interrupt routine is executing. | |
6522 | */ | |
6523 | static void ixgbe_netpoll(struct net_device *netdev) | |
6524 | { | |
6525 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
8f9a7167 | 6526 | int i; |
9a799d71 | 6527 | |
1a647bd2 AD |
6528 | /* if interface is down do nothing */ |
6529 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | |
6530 | return; | |
6531 | ||
9a799d71 | 6532 | adapter->flags |= IXGBE_FLAG_IN_NETPOLL; |
8f9a7167 PWJ |
6533 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
6534 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | |
6535 | for (i = 0; i < num_q_vectors; i++) { | |
6536 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | |
6537 | ixgbe_msix_clean_many(0, q_vector); | |
6538 | } | |
6539 | } else { | |
6540 | ixgbe_intr(adapter->pdev->irq, netdev); | |
6541 | } | |
9a799d71 | 6542 | adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; |
9a799d71 AK |
6543 | } |
6544 | #endif | |
6545 | ||
de1036b1 ED |
6546 | static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, |
6547 | struct rtnl_link_stats64 *stats) | |
6548 | { | |
6549 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
6550 | int i; | |
6551 | ||
6552 | /* accurate rx/tx bytes/packets stats */ | |
6553 | dev_txq_stats_fold(netdev, stats); | |
6554 | for (i = 0; i < adapter->num_rx_queues; i++) { | |
6555 | struct ixgbe_ring *ring = adapter->rx_ring[i]; | |
6556 | u64 bytes, packets; | |
6557 | unsigned int start; | |
6558 | ||
6559 | do { | |
6560 | start = u64_stats_fetch_begin_bh(&ring->syncp); | |
6561 | packets = ring->stats.packets; | |
6562 | bytes = ring->stats.bytes; | |
6563 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | |
6564 | stats->rx_packets += packets; | |
6565 | stats->rx_bytes += bytes; | |
6566 | } | |
6567 | ||
6568 | /* following stats updated by ixgbe_watchdog_task() */ | |
6569 | stats->multicast = netdev->stats.multicast; | |
6570 | stats->rx_errors = netdev->stats.rx_errors; | |
6571 | stats->rx_length_errors = netdev->stats.rx_length_errors; | |
6572 | stats->rx_crc_errors = netdev->stats.rx_crc_errors; | |
6573 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; | |
6574 | return stats; | |
6575 | } | |
6576 | ||
6577 | ||
0edc3527 | 6578 | static const struct net_device_ops ixgbe_netdev_ops = { |
e8e9f696 | 6579 | .ndo_open = ixgbe_open, |
0edc3527 | 6580 | .ndo_stop = ixgbe_close, |
00829823 | 6581 | .ndo_start_xmit = ixgbe_xmit_frame, |
09a3b1f8 | 6582 | .ndo_select_queue = ixgbe_select_queue, |
e90d400c | 6583 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
0edc3527 SH |
6584 | .ndo_set_multicast_list = ixgbe_set_rx_mode, |
6585 | .ndo_validate_addr = eth_validate_addr, | |
6586 | .ndo_set_mac_address = ixgbe_set_mac, | |
6587 | .ndo_change_mtu = ixgbe_change_mtu, | |
6588 | .ndo_tx_timeout = ixgbe_tx_timeout, | |
0edc3527 SH |
6589 | .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, |
6590 | .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, | |
6b73e10d | 6591 | .ndo_do_ioctl = ixgbe_ioctl, |
7f01648a GR |
6592 | .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, |
6593 | .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, | |
6594 | .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, | |
6595 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | |
de1036b1 | 6596 | .ndo_get_stats64 = ixgbe_get_stats64, |
0edc3527 SH |
6597 | #ifdef CONFIG_NET_POLL_CONTROLLER |
6598 | .ndo_poll_controller = ixgbe_netpoll, | |
6599 | #endif | |
332d4a7d YZ |
6600 | #ifdef IXGBE_FCOE |
6601 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, | |
6602 | .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, | |
8450ff8c YZ |
6603 | .ndo_fcoe_enable = ixgbe_fcoe_enable, |
6604 | .ndo_fcoe_disable = ixgbe_fcoe_disable, | |
61a1fa10 | 6605 | .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, |
332d4a7d | 6606 | #endif /* IXGBE_FCOE */ |
0edc3527 SH |
6607 | }; |
6608 | ||
1cdd1ec8 GR |
6609 | static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, |
6610 | const struct ixgbe_info *ii) | |
6611 | { | |
6612 | #ifdef CONFIG_PCI_IOV | |
6613 | struct ixgbe_hw *hw = &adapter->hw; | |
6614 | int err; | |
6615 | ||
6616 | if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs) | |
6617 | return; | |
6618 | ||
6619 | /* The 82599 supports up to 64 VFs per physical function | |
6620 | * but this implementation limits allocation to 63 so that | |
6621 | * basic networking resources are still available to the | |
6622 | * physical function | |
6623 | */ | |
6624 | adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; | |
6625 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; | |
6626 | err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); | |
6627 | if (err) { | |
396e799c | 6628 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); |
1cdd1ec8 GR |
6629 | goto err_novfs; |
6630 | } | |
6631 | /* If call to enable VFs succeeded then allocate memory | |
6632 | * for per VF control structures. | |
6633 | */ | |
6634 | adapter->vfinfo = | |
6635 | kcalloc(adapter->num_vfs, | |
6636 | sizeof(struct vf_data_storage), GFP_KERNEL); | |
6637 | if (adapter->vfinfo) { | |
6638 | /* Now that we're sure SR-IOV is enabled | |
6639 | * and memory allocated set up the mailbox parameters | |
6640 | */ | |
6641 | ixgbe_init_mbx_params_pf(hw); | |
6642 | memcpy(&hw->mbx.ops, ii->mbx_ops, | |
6643 | sizeof(hw->mbx.ops)); | |
6644 | ||
6645 | /* Disable RSC when in SR-IOV mode */ | |
6646 | adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | | |
6647 | IXGBE_FLAG2_RSC_ENABLED); | |
6648 | return; | |
6649 | } | |
6650 | ||
6651 | /* Oh oh */ | |
396e799c ET |
6652 | e_err(probe, "Unable to allocate memory for VF Data Storage - " |
6653 | "SRIOV disabled\n"); | |
1cdd1ec8 GR |
6654 | pci_disable_sriov(adapter->pdev); |
6655 | ||
6656 | err_novfs: | |
6657 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | |
6658 | adapter->num_vfs = 0; | |
6659 | #endif /* CONFIG_PCI_IOV */ | |
6660 | } | |
6661 | ||
9a799d71 AK |
6662 | /** |
6663 | * ixgbe_probe - Device Initialization Routine | |
6664 | * @pdev: PCI device information struct | |
6665 | * @ent: entry in ixgbe_pci_tbl | |
6666 | * | |
6667 | * Returns 0 on success, negative on failure | |
6668 | * | |
6669 | * ixgbe_probe initializes an adapter identified by a pci_dev structure. | |
6670 | * The OS initialization, configuring of the adapter private structure, | |
6671 | * and a hardware reset occur. | |
6672 | **/ | |
6673 | static int __devinit ixgbe_probe(struct pci_dev *pdev, | |
e8e9f696 | 6674 | const struct pci_device_id *ent) |
9a799d71 AK |
6675 | { |
6676 | struct net_device *netdev; | |
6677 | struct ixgbe_adapter *adapter = NULL; | |
6678 | struct ixgbe_hw *hw; | |
6679 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; | |
9a799d71 AK |
6680 | static int cards_found; |
6681 | int i, err, pci_using_dac; | |
c85a2618 | 6682 | unsigned int indices = num_possible_cpus(); |
eacd73f7 YZ |
6683 | #ifdef IXGBE_FCOE |
6684 | u16 device_caps; | |
6685 | #endif | |
c44ade9e | 6686 | u32 part_num, eec; |
9a799d71 | 6687 | |
bded64a7 AG |
6688 | /* Catch broken hardware that put the wrong VF device ID in |
6689 | * the PCIe SR-IOV capability. | |
6690 | */ | |
6691 | if (pdev->is_virtfn) { | |
6692 | WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", | |
6693 | pci_name(pdev), pdev->vendor, pdev->device); | |
6694 | return -EINVAL; | |
6695 | } | |
6696 | ||
9ce77666 | 6697 | err = pci_enable_device_mem(pdev); |
9a799d71 AK |
6698 | if (err) |
6699 | return err; | |
6700 | ||
1b507730 NN |
6701 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && |
6702 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | |
9a799d71 AK |
6703 | pci_using_dac = 1; |
6704 | } else { | |
1b507730 | 6705 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
9a799d71 | 6706 | if (err) { |
1b507730 NN |
6707 | err = dma_set_coherent_mask(&pdev->dev, |
6708 | DMA_BIT_MASK(32)); | |
9a799d71 | 6709 | if (err) { |
b8bc0421 DC |
6710 | dev_err(&pdev->dev, |
6711 | "No usable DMA configuration, aborting\n"); | |
9a799d71 AK |
6712 | goto err_dma; |
6713 | } | |
6714 | } | |
6715 | pci_using_dac = 0; | |
6716 | } | |
6717 | ||
9ce77666 | 6718 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, |
e8e9f696 | 6719 | IORESOURCE_MEM), ixgbe_driver_name); |
9a799d71 | 6720 | if (err) { |
b8bc0421 DC |
6721 | dev_err(&pdev->dev, |
6722 | "pci_request_selected_regions failed 0x%x\n", err); | |
9a799d71 AK |
6723 | goto err_pci_reg; |
6724 | } | |
6725 | ||
19d5afd4 | 6726 | pci_enable_pcie_error_reporting(pdev); |
6fabd715 | 6727 | |
9a799d71 | 6728 | pci_set_master(pdev); |
fb3b27bc | 6729 | pci_save_state(pdev); |
9a799d71 | 6730 | |
c85a2618 JF |
6731 | if (ii->mac == ixgbe_mac_82598EB) |
6732 | indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); | |
6733 | else | |
6734 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); | |
6735 | ||
6736 | indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); | |
6737 | #ifdef IXGBE_FCOE | |
6738 | indices += min_t(unsigned int, num_possible_cpus(), | |
6739 | IXGBE_MAX_FCOE_INDICES); | |
6740 | #endif | |
c85a2618 | 6741 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); |
9a799d71 AK |
6742 | if (!netdev) { |
6743 | err = -ENOMEM; | |
6744 | goto err_alloc_etherdev; | |
6745 | } | |
6746 | ||
9a799d71 AK |
6747 | SET_NETDEV_DEV(netdev, &pdev->dev); |
6748 | ||
6749 | pci_set_drvdata(pdev, netdev); | |
6750 | adapter = netdev_priv(netdev); | |
6751 | ||
6752 | adapter->netdev = netdev; | |
6753 | adapter->pdev = pdev; | |
6754 | hw = &adapter->hw; | |
6755 | hw->back = adapter; | |
6756 | adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; | |
6757 | ||
05857980 | 6758 | hw->hw_addr = ioremap(pci_resource_start(pdev, 0), |
e8e9f696 | 6759 | pci_resource_len(pdev, 0)); |
9a799d71 AK |
6760 | if (!hw->hw_addr) { |
6761 | err = -EIO; | |
6762 | goto err_ioremap; | |
6763 | } | |
6764 | ||
6765 | for (i = 1; i <= 5; i++) { | |
6766 | if (pci_resource_len(pdev, i) == 0) | |
6767 | continue; | |
6768 | } | |
6769 | ||
0edc3527 | 6770 | netdev->netdev_ops = &ixgbe_netdev_ops; |
9a799d71 | 6771 | ixgbe_set_ethtool_ops(netdev); |
9a799d71 | 6772 | netdev->watchdog_timeo = 5 * HZ; |
9a799d71 AK |
6773 | strcpy(netdev->name, pci_name(pdev)); |
6774 | ||
9a799d71 AK |
6775 | adapter->bd_number = cards_found; |
6776 | ||
9a799d71 AK |
6777 | /* Setup hw api */ |
6778 | memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); | |
021230d4 | 6779 | hw->mac.type = ii->mac; |
9a799d71 | 6780 | |
c44ade9e JB |
6781 | /* EEPROM */ |
6782 | memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); | |
6783 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | |
6784 | /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ | |
6785 | if (!(eec & (1 << 8))) | |
6786 | hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; | |
6787 | ||
6788 | /* PHY */ | |
6789 | memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); | |
c4900be0 | 6790 | hw->phy.sfp_type = ixgbe_sfp_type_unknown; |
6b73e10d BH |
6791 | /* ixgbe_identify_phy_generic will set prtad and mmds properly */ |
6792 | hw->phy.mdio.prtad = MDIO_PRTAD_NONE; | |
6793 | hw->phy.mdio.mmds = 0; | |
6794 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | |
6795 | hw->phy.mdio.dev = netdev; | |
6796 | hw->phy.mdio.mdio_read = ixgbe_mdio_read; | |
6797 | hw->phy.mdio.mdio_write = ixgbe_mdio_write; | |
c4900be0 DS |
6798 | |
6799 | /* set up this timer and work struct before calling get_invariants | |
6800 | * which might start the timer | |
6801 | */ | |
6802 | init_timer(&adapter->sfp_timer); | |
c061b18d | 6803 | adapter->sfp_timer.function = ixgbe_sfp_timer; |
c4900be0 DS |
6804 | adapter->sfp_timer.data = (unsigned long) adapter; |
6805 | ||
6806 | INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); | |
c44ade9e | 6807 | |
e8e26350 PW |
6808 | /* multispeed fiber has its own tasklet, called from GPI SDP1 context */ |
6809 | INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task); | |
6810 | ||
6811 | /* a new SFP+ module arrival, called from GPI SDP2 context */ | |
6812 | INIT_WORK(&adapter->sfp_config_module_task, | |
e8e9f696 | 6813 | ixgbe_sfp_config_module_task); |
e8e26350 | 6814 | |
8ca783ab | 6815 | ii->get_invariants(hw); |
9a799d71 AK |
6816 | |
6817 | /* setup the private structure */ | |
6818 | err = ixgbe_sw_init(adapter); | |
6819 | if (err) | |
6820 | goto err_sw_init; | |
6821 | ||
e86bff0e DS |
6822 | /* Make it possible the adapter to be woken up via WOL */ |
6823 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | |
6824 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | |
6825 | ||
bf069c97 DS |
6826 | /* |
6827 | * If there is a fan on this device and it has failed log the | |
6828 | * failure. | |
6829 | */ | |
6830 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | |
6831 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | |
6832 | if (esdp & IXGBE_ESDP_SDP1) | |
396e799c | 6833 | e_crit(probe, "Fan has stopped, replace the adapter\n"); |
bf069c97 DS |
6834 | } |
6835 | ||
c44ade9e | 6836 | /* reset_hw fills in the perm_addr as well */ |
119fc60a | 6837 | hw->phy.reset_if_overtemp = true; |
c44ade9e | 6838 | err = hw->mac.ops.reset_hw(hw); |
119fc60a | 6839 | hw->phy.reset_if_overtemp = false; |
8ca783ab DS |
6840 | if (err == IXGBE_ERR_SFP_NOT_PRESENT && |
6841 | hw->mac.type == ixgbe_mac_82598EB) { | |
6842 | /* | |
6843 | * Start a kernel thread to watch for a module to arrive. | |
6844 | * Only do this for 82598, since 82599 will generate | |
6845 | * interrupts on module arrival. | |
6846 | */ | |
6847 | set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | |
6848 | mod_timer(&adapter->sfp_timer, | |
6849 | round_jiffies(jiffies + (2 * HZ))); | |
6850 | err = 0; | |
6851 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | |
849c4542 ET |
6852 | e_dev_err("failed to initialize because an unsupported SFP+ " |
6853 | "module type was detected.\n"); | |
6854 | e_dev_err("Reload the driver after installing a supported " | |
6855 | "module.\n"); | |
04f165ef PW |
6856 | goto err_sw_init; |
6857 | } else if (err) { | |
849c4542 | 6858 | e_dev_err("HW Init failed: %d\n", err); |
c44ade9e JB |
6859 | goto err_sw_init; |
6860 | } | |
6861 | ||
1cdd1ec8 GR |
6862 | ixgbe_probe_vf(adapter, ii); |
6863 | ||
396e799c | 6864 | netdev->features = NETIF_F_SG | |
e8e9f696 JP |
6865 | NETIF_F_IP_CSUM | |
6866 | NETIF_F_HW_VLAN_TX | | |
6867 | NETIF_F_HW_VLAN_RX | | |
6868 | NETIF_F_HW_VLAN_FILTER; | |
9a799d71 | 6869 | |
e9990a9c | 6870 | netdev->features |= NETIF_F_IPV6_CSUM; |
9a799d71 | 6871 | netdev->features |= NETIF_F_TSO; |
9a799d71 | 6872 | netdev->features |= NETIF_F_TSO6; |
78b6f4ce | 6873 | netdev->features |= NETIF_F_GRO; |
ad31c402 | 6874 | |
45a5ead0 JB |
6875 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) |
6876 | netdev->features |= NETIF_F_SCTP_CSUM; | |
6877 | ||
ad31c402 JK |
6878 | netdev->vlan_features |= NETIF_F_TSO; |
6879 | netdev->vlan_features |= NETIF_F_TSO6; | |
22f32b7a | 6880 | netdev->vlan_features |= NETIF_F_IP_CSUM; |
cd1da503 | 6881 | netdev->vlan_features |= NETIF_F_IPV6_CSUM; |
ad31c402 JK |
6882 | netdev->vlan_features |= NETIF_F_SG; |
6883 | ||
1cdd1ec8 GR |
6884 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
6885 | adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | | |
6886 | IXGBE_FLAG_DCB_ENABLED); | |
2f90b865 AD |
6887 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
6888 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | |
6889 | ||
7a6b6f51 | 6890 | #ifdef CONFIG_IXGBE_DCB |
2f90b865 AD |
6891 | netdev->dcbnl_ops = &dcbnl_ops; |
6892 | #endif | |
6893 | ||
eacd73f7 | 6894 | #ifdef IXGBE_FCOE |
0d551589 | 6895 | if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { |
eacd73f7 YZ |
6896 | if (hw->mac.ops.get_device_caps) { |
6897 | hw->mac.ops.get_device_caps(hw, &device_caps); | |
0d551589 YZ |
6898 | if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) |
6899 | adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; | |
eacd73f7 YZ |
6900 | } |
6901 | } | |
5e09d7f6 YZ |
6902 | if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { |
6903 | netdev->vlan_features |= NETIF_F_FCOE_CRC; | |
6904 | netdev->vlan_features |= NETIF_F_FSO; | |
6905 | netdev->vlan_features |= NETIF_F_FCOE_MTU; | |
6906 | } | |
eacd73f7 | 6907 | #endif /* IXGBE_FCOE */ |
7b872a55 | 6908 | if (pci_using_dac) { |
9a799d71 | 6909 | netdev->features |= NETIF_F_HIGHDMA; |
7b872a55 YZ |
6910 | netdev->vlan_features |= NETIF_F_HIGHDMA; |
6911 | } | |
9a799d71 | 6912 | |
0c19d6af | 6913 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
f8212f97 AD |
6914 | netdev->features |= NETIF_F_LRO; |
6915 | ||
9a799d71 | 6916 | /* make sure the EEPROM is good */ |
c44ade9e | 6917 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { |
849c4542 | 6918 | e_dev_err("The EEPROM Checksum Is Not Valid\n"); |
9a799d71 AK |
6919 | err = -EIO; |
6920 | goto err_eeprom; | |
6921 | } | |
6922 | ||
6923 | memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); | |
6924 | memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); | |
6925 | ||
c44ade9e | 6926 | if (ixgbe_validate_mac_addr(netdev->perm_addr)) { |
849c4542 | 6927 | e_dev_err("invalid MAC address\n"); |
9a799d71 AK |
6928 | err = -EIO; |
6929 | goto err_eeprom; | |
6930 | } | |
6931 | ||
61fac744 PW |
6932 | /* power down the optics */ |
6933 | if (hw->phy.multispeed_fiber) | |
6934 | hw->mac.ops.disable_tx_laser(hw); | |
6935 | ||
9a799d71 | 6936 | init_timer(&adapter->watchdog_timer); |
c061b18d | 6937 | adapter->watchdog_timer.function = ixgbe_watchdog; |
9a799d71 AK |
6938 | adapter->watchdog_timer.data = (unsigned long)adapter; |
6939 | ||
6940 | INIT_WORK(&adapter->reset_task, ixgbe_reset_task); | |
cf8280ee | 6941 | INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); |
9a799d71 | 6942 | |
021230d4 AV |
6943 | err = ixgbe_init_interrupt_scheme(adapter); |
6944 | if (err) | |
6945 | goto err_sw_init; | |
9a799d71 | 6946 | |
e8e26350 PW |
6947 | switch (pdev->device) { |
6948 | case IXGBE_DEV_ID_82599_KX4: | |
495dce12 | 6949 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | |
e8e9f696 | 6950 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); |
e8e26350 PW |
6951 | break; |
6952 | default: | |
6953 | adapter->wol = 0; | |
6954 | break; | |
6955 | } | |
e8e26350 PW |
6956 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); |
6957 | ||
04f165ef PW |
6958 | /* pick up the PCI bus settings for reporting later */ |
6959 | hw->mac.ops.get_bus_info(hw); | |
6960 | ||
9a799d71 | 6961 | /* print bus type/speed/width info */ |
849c4542 | 6962 | e_dev_info("(PCI Express:%s:%s) %pM\n", |
e8e9f696 JP |
6963 | (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" : |
6964 | hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" : | |
6965 | "Unknown"), | |
6966 | (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : | |
6967 | hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : | |
6968 | hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : | |
6969 | "Unknown"), | |
6970 | netdev->dev_addr); | |
c44ade9e | 6971 | ixgbe_read_pba_num_generic(hw, &part_num); |
e8e26350 | 6972 | if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) |
849c4542 ET |
6973 | e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " |
6974 | "PBA No: %06x-%03x\n", | |
6975 | hw->mac.type, hw->phy.type, hw->phy.sfp_type, | |
6976 | (part_num >> 8), (part_num & 0xff)); | |
e8e26350 | 6977 | else |
849c4542 ET |
6978 | e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
6979 | hw->mac.type, hw->phy.type, | |
6980 | (part_num >> 8), (part_num & 0xff)); | |
9a799d71 | 6981 | |
e8e26350 | 6982 | if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { |
849c4542 ET |
6983 | e_dev_warn("PCI-Express bandwidth available for this card is " |
6984 | "not sufficient for optimal performance.\n"); | |
6985 | e_dev_warn("For optimal performance a x8 PCI-Express slot " | |
6986 | "is required.\n"); | |
0c254d86 AK |
6987 | } |
6988 | ||
34b0368c PWJ |
6989 | /* save off EEPROM version number */ |
6990 | hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); | |
6991 | ||
9a799d71 | 6992 | /* reset the hardware with the new settings */ |
794caeb2 | 6993 | err = hw->mac.ops.start_hw(hw); |
c44ade9e | 6994 | |
794caeb2 PWJ |
6995 | if (err == IXGBE_ERR_EEPROM_VERSION) { |
6996 | /* We are running on a pre-production device, log a warning */ | |
849c4542 ET |
6997 | e_dev_warn("This device is a pre-production adapter/LOM. " |
6998 | "Please be aware there may be issues associated " | |
6999 | "with your hardware. If you are experiencing " | |
7000 | "problems please contact your Intel or hardware " | |
7001 | "representative who provided you with this " | |
7002 | "hardware.\n"); | |
794caeb2 | 7003 | } |
9a799d71 AK |
7004 | strcpy(netdev->name, "eth%d"); |
7005 | err = register_netdev(netdev); | |
7006 | if (err) | |
7007 | goto err_register; | |
7008 | ||
54386467 JB |
7009 | /* carrier off reporting is important to ethtool even BEFORE open */ |
7010 | netif_carrier_off(netdev); | |
7011 | ||
c4cf55e5 PWJ |
7012 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
7013 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | |
7014 | INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); | |
7015 | ||
119fc60a | 7016 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) |
e8e9f696 JP |
7017 | INIT_WORK(&adapter->check_overtemp_task, |
7018 | ixgbe_check_overtemp_task); | |
5dd2d332 | 7019 | #ifdef CONFIG_IXGBE_DCA |
652f093f | 7020 | if (dca_add_requester(&pdev->dev) == 0) { |
bd0362dd | 7021 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
bd0362dd JC |
7022 | ixgbe_setup_dca(adapter); |
7023 | } | |
7024 | #endif | |
1cdd1ec8 | 7025 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
396e799c | 7026 | e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); |
1cdd1ec8 GR |
7027 | for (i = 0; i < adapter->num_vfs; i++) |
7028 | ixgbe_vf_configuration(pdev, (i | 0x10000000)); | |
7029 | } | |
7030 | ||
0365e6e4 PW |
7031 | /* add san mac addr to netdev */ |
7032 | ixgbe_add_sanmac_netdev(netdev); | |
9a799d71 | 7033 | |
849c4542 | 7034 | e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); |
9a799d71 AK |
7035 | cards_found++; |
7036 | return 0; | |
7037 | ||
7038 | err_register: | |
5eba3699 | 7039 | ixgbe_release_hw_control(adapter); |
7a921c93 | 7040 | ixgbe_clear_interrupt_scheme(adapter); |
9a799d71 AK |
7041 | err_sw_init: |
7042 | err_eeprom: | |
1cdd1ec8 GR |
7043 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
7044 | ixgbe_disable_sriov(adapter); | |
c4900be0 DS |
7045 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); |
7046 | del_timer_sync(&adapter->sfp_timer); | |
7047 | cancel_work_sync(&adapter->sfp_task); | |
e8e26350 PW |
7048 | cancel_work_sync(&adapter->multispeed_fiber_task); |
7049 | cancel_work_sync(&adapter->sfp_config_module_task); | |
9a799d71 AK |
7050 | iounmap(hw->hw_addr); |
7051 | err_ioremap: | |
7052 | free_netdev(netdev); | |
7053 | err_alloc_etherdev: | |
e8e9f696 JP |
7054 | pci_release_selected_regions(pdev, |
7055 | pci_select_bars(pdev, IORESOURCE_MEM)); | |
9a799d71 AK |
7056 | err_pci_reg: |
7057 | err_dma: | |
7058 | pci_disable_device(pdev); | |
7059 | return err; | |
7060 | } | |
7061 | ||
7062 | /** | |
7063 | * ixgbe_remove - Device Removal Routine | |
7064 | * @pdev: PCI device information struct | |
7065 | * | |
7066 | * ixgbe_remove is called by the PCI subsystem to alert the driver | |
7067 | * that it should release a PCI device. The could be caused by a | |
7068 | * Hot-Plug event, or because the driver is going to be removed from | |
7069 | * memory. | |
7070 | **/ | |
7071 | static void __devexit ixgbe_remove(struct pci_dev *pdev) | |
7072 | { | |
7073 | struct net_device *netdev = pci_get_drvdata(pdev); | |
7074 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
7075 | ||
7076 | set_bit(__IXGBE_DOWN, &adapter->state); | |
c4900be0 DS |
7077 | /* clear the module not found bit to make sure the worker won't |
7078 | * reschedule | |
7079 | */ | |
7080 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | |
9a799d71 AK |
7081 | del_timer_sync(&adapter->watchdog_timer); |
7082 | ||
c4900be0 DS |
7083 | del_timer_sync(&adapter->sfp_timer); |
7084 | cancel_work_sync(&adapter->watchdog_task); | |
7085 | cancel_work_sync(&adapter->sfp_task); | |
e8e26350 PW |
7086 | cancel_work_sync(&adapter->multispeed_fiber_task); |
7087 | cancel_work_sync(&adapter->sfp_config_module_task); | |
c4cf55e5 PWJ |
7088 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
7089 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | |
7090 | cancel_work_sync(&adapter->fdir_reinit_task); | |
9a799d71 AK |
7091 | flush_scheduled_work(); |
7092 | ||
5dd2d332 | 7093 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd JC |
7094 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
7095 | adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; | |
7096 | dca_remove_requester(&pdev->dev); | |
7097 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); | |
7098 | } | |
7099 | ||
7100 | #endif | |
332d4a7d YZ |
7101 | #ifdef IXGBE_FCOE |
7102 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | |
7103 | ixgbe_cleanup_fcoe(adapter); | |
7104 | ||
7105 | #endif /* IXGBE_FCOE */ | |
0365e6e4 PW |
7106 | |
7107 | /* remove the added san mac */ | |
7108 | ixgbe_del_sanmac_netdev(netdev); | |
7109 | ||
c4900be0 DS |
7110 | if (netdev->reg_state == NETREG_REGISTERED) |
7111 | unregister_netdev(netdev); | |
9a799d71 | 7112 | |
1cdd1ec8 GR |
7113 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
7114 | ixgbe_disable_sriov(adapter); | |
7115 | ||
7a921c93 | 7116 | ixgbe_clear_interrupt_scheme(adapter); |
5eba3699 | 7117 | |
021230d4 | 7118 | ixgbe_release_hw_control(adapter); |
9a799d71 AK |
7119 | |
7120 | iounmap(adapter->hw.hw_addr); | |
9ce77666 | 7121 | pci_release_selected_regions(pdev, pci_select_bars(pdev, |
e8e9f696 | 7122 | IORESOURCE_MEM)); |
9a799d71 | 7123 | |
849c4542 | 7124 | e_dev_info("complete\n"); |
021230d4 | 7125 | |
9a799d71 AK |
7126 | free_netdev(netdev); |
7127 | ||
19d5afd4 | 7128 | pci_disable_pcie_error_reporting(pdev); |
6fabd715 | 7129 | |
9a799d71 AK |
7130 | pci_disable_device(pdev); |
7131 | } | |
7132 | ||
7133 | /** | |
7134 | * ixgbe_io_error_detected - called when PCI error is detected | |
7135 | * @pdev: Pointer to PCI device | |
7136 | * @state: The current pci connection state | |
7137 | * | |
7138 | * This function is called after a PCI bus error affecting | |
7139 | * this device has been detected. | |
7140 | */ | |
7141 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |
e8e9f696 | 7142 | pci_channel_state_t state) |
9a799d71 AK |
7143 | { |
7144 | struct net_device *netdev = pci_get_drvdata(pdev); | |
454d7c9b | 7145 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
9a799d71 AK |
7146 | |
7147 | netif_device_detach(netdev); | |
7148 | ||
3044b8d1 BL |
7149 | if (state == pci_channel_io_perm_failure) |
7150 | return PCI_ERS_RESULT_DISCONNECT; | |
7151 | ||
9a799d71 AK |
7152 | if (netif_running(netdev)) |
7153 | ixgbe_down(adapter); | |
7154 | pci_disable_device(pdev); | |
7155 | ||
b4617240 | 7156 | /* Request a slot reset. */ |
9a799d71 AK |
7157 | return PCI_ERS_RESULT_NEED_RESET; |
7158 | } | |
7159 | ||
7160 | /** | |
7161 | * ixgbe_io_slot_reset - called after the pci bus has been reset. | |
7162 | * @pdev: Pointer to PCI device | |
7163 | * | |
7164 | * Restart the card from scratch, as if from a cold-boot. | |
7165 | */ | |
7166 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |
7167 | { | |
7168 | struct net_device *netdev = pci_get_drvdata(pdev); | |
454d7c9b | 7169 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6fabd715 PWJ |
7170 | pci_ers_result_t result; |
7171 | int err; | |
9a799d71 | 7172 | |
9ce77666 | 7173 | if (pci_enable_device_mem(pdev)) { |
396e799c | 7174 | e_err(probe, "Cannot re-enable PCI device after reset.\n"); |
6fabd715 PWJ |
7175 | result = PCI_ERS_RESULT_DISCONNECT; |
7176 | } else { | |
7177 | pci_set_master(pdev); | |
7178 | pci_restore_state(pdev); | |
c0e1f68b | 7179 | pci_save_state(pdev); |
9a799d71 | 7180 | |
dd4d8ca6 | 7181 | pci_wake_from_d3(pdev, false); |
9a799d71 | 7182 | |
6fabd715 | 7183 | ixgbe_reset(adapter); |
88512539 | 7184 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
6fabd715 PWJ |
7185 | result = PCI_ERS_RESULT_RECOVERED; |
7186 | } | |
7187 | ||
7188 | err = pci_cleanup_aer_uncorrect_error_status(pdev); | |
7189 | if (err) { | |
849c4542 ET |
7190 | e_dev_err("pci_cleanup_aer_uncorrect_error_status " |
7191 | "failed 0x%0x\n", err); | |
6fabd715 PWJ |
7192 | /* non-fatal, continue */ |
7193 | } | |
9a799d71 | 7194 | |
6fabd715 | 7195 | return result; |
9a799d71 AK |
7196 | } |
7197 | ||
7198 | /** | |
7199 | * ixgbe_io_resume - called when traffic can start flowing again. | |
7200 | * @pdev: Pointer to PCI device | |
7201 | * | |
7202 | * This callback is called when the error recovery driver tells us that | |
7203 | * its OK to resume normal operation. | |
7204 | */ | |
7205 | static void ixgbe_io_resume(struct pci_dev *pdev) | |
7206 | { | |
7207 | struct net_device *netdev = pci_get_drvdata(pdev); | |
454d7c9b | 7208 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
9a799d71 AK |
7209 | |
7210 | if (netif_running(netdev)) { | |
7211 | if (ixgbe_up(adapter)) { | |
396e799c | 7212 | e_info(probe, "ixgbe_up failed after reset\n"); |
9a799d71 AK |
7213 | return; |
7214 | } | |
7215 | } | |
7216 | ||
7217 | netif_device_attach(netdev); | |
9a799d71 AK |
7218 | } |
7219 | ||
7220 | static struct pci_error_handlers ixgbe_err_handler = { | |
7221 | .error_detected = ixgbe_io_error_detected, | |
7222 | .slot_reset = ixgbe_io_slot_reset, | |
7223 | .resume = ixgbe_io_resume, | |
7224 | }; | |
7225 | ||
7226 | static struct pci_driver ixgbe_driver = { | |
7227 | .name = ixgbe_driver_name, | |
7228 | .id_table = ixgbe_pci_tbl, | |
7229 | .probe = ixgbe_probe, | |
7230 | .remove = __devexit_p(ixgbe_remove), | |
7231 | #ifdef CONFIG_PM | |
7232 | .suspend = ixgbe_suspend, | |
7233 | .resume = ixgbe_resume, | |
7234 | #endif | |
7235 | .shutdown = ixgbe_shutdown, | |
7236 | .err_handler = &ixgbe_err_handler | |
7237 | }; | |
7238 | ||
7239 | /** | |
7240 | * ixgbe_init_module - Driver Registration Routine | |
7241 | * | |
7242 | * ixgbe_init_module is the first routine called when the driver is | |
7243 | * loaded. All it does is register with the PCI subsystem. | |
7244 | **/ | |
7245 | static int __init ixgbe_init_module(void) | |
7246 | { | |
7247 | int ret; | |
c7689578 | 7248 | pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); |
849c4542 | 7249 | pr_info("%s\n", ixgbe_copyright); |
9a799d71 | 7250 | |
5dd2d332 | 7251 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd | 7252 | dca_register_notify(&dca_notifier); |
bd0362dd | 7253 | #endif |
5dd2d332 | 7254 | |
9a799d71 AK |
7255 | ret = pci_register_driver(&ixgbe_driver); |
7256 | return ret; | |
7257 | } | |
b4617240 | 7258 | |
9a799d71 AK |
7259 | module_init(ixgbe_init_module); |
7260 | ||
7261 | /** | |
7262 | * ixgbe_exit_module - Driver Exit Cleanup Routine | |
7263 | * | |
7264 | * ixgbe_exit_module is called just before the driver is removed | |
7265 | * from memory. | |
7266 | **/ | |
7267 | static void __exit ixgbe_exit_module(void) | |
7268 | { | |
5dd2d332 | 7269 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd JC |
7270 | dca_unregister_notify(&dca_notifier); |
7271 | #endif | |
9a799d71 AK |
7272 | pci_unregister_driver(&ixgbe_driver); |
7273 | } | |
bd0362dd | 7274 | |
5dd2d332 | 7275 | #ifdef CONFIG_IXGBE_DCA |
bd0362dd | 7276 | static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, |
e8e9f696 | 7277 | void *p) |
bd0362dd JC |
7278 | { |
7279 | int ret_val; | |
7280 | ||
7281 | ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, | |
e8e9f696 | 7282 | __ixgbe_notify_dca); |
bd0362dd JC |
7283 | |
7284 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | |
7285 | } | |
b453368d | 7286 | |
5dd2d332 | 7287 | #endif /* CONFIG_IXGBE_DCA */ |
849c4542 | 7288 | |
b453368d | 7289 | /** |
849c4542 | 7290 | * ixgbe_get_hw_dev return device |
b453368d AD |
7291 | * used by hardware layer to print debugging information |
7292 | **/ | |
849c4542 | 7293 | struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw) |
b453368d AD |
7294 | { |
7295 | struct ixgbe_adapter *adapter = hw->back; | |
849c4542 | 7296 | return adapter->netdev; |
b453368d | 7297 | } |
bd0362dd | 7298 | |
9a799d71 AK |
7299 | module_exit(ixgbe_exit_module); |
7300 | ||
7301 | /* ixgbe_main.c */ |