Merge tag 'pci-v4.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[deliverable/linux.git] / drivers / net / ethernet / intel / igb / e1000_mac.c
1 /* Intel(R) Gigabit Ethernet Linux driver
2 * Copyright(c) 2007-2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
23
24 #include <linux/if_ether.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29
30 #include "e1000_mac.h"
31
32 #include "igb.h"
33
34 static s32 igb_set_default_fc(struct e1000_hw *hw);
35 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
36
37 /**
38 * igb_get_bus_info_pcie - Get PCIe bus information
39 * @hw: pointer to the HW structure
40 *
41 * Determines and stores the system bus information for a particular
42 * network interface. The following bus information is determined and stored:
43 * bus speed, bus width, type (PCIe), and PCIe function.
44 **/
45 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
46 {
47 struct e1000_bus_info *bus = &hw->bus;
48 s32 ret_val;
49 u32 reg;
50 u16 pcie_link_status;
51
52 bus->type = e1000_bus_type_pci_express;
53
54 ret_val = igb_read_pcie_cap_reg(hw,
55 PCI_EXP_LNKSTA,
56 &pcie_link_status);
57 if (ret_val) {
58 bus->width = e1000_bus_width_unknown;
59 bus->speed = e1000_bus_speed_unknown;
60 } else {
61 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
62 case PCI_EXP_LNKSTA_CLS_2_5GB:
63 bus->speed = e1000_bus_speed_2500;
64 break;
65 case PCI_EXP_LNKSTA_CLS_5_0GB:
66 bus->speed = e1000_bus_speed_5000;
67 break;
68 default:
69 bus->speed = e1000_bus_speed_unknown;
70 break;
71 }
72
73 bus->width = (enum e1000_bus_width)((pcie_link_status &
74 PCI_EXP_LNKSTA_NLW) >>
75 PCI_EXP_LNKSTA_NLW_SHIFT);
76 }
77
78 reg = rd32(E1000_STATUS);
79 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
80
81 return 0;
82 }
83
84 /**
85 * igb_clear_vfta - Clear VLAN filter table
86 * @hw: pointer to the HW structure
87 *
88 * Clears the register array which contains the VLAN filter table by
89 * setting all the values to 0.
90 **/
91 void igb_clear_vfta(struct e1000_hw *hw)
92 {
93 u32 offset;
94
95 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
96 array_wr32(E1000_VFTA, offset, 0);
97 wrfl();
98 }
99 }
100
101 /**
102 * igb_write_vfta - Write value to VLAN filter table
103 * @hw: pointer to the HW structure
104 * @offset: register offset in VLAN filter table
105 * @value: register value written to VLAN filter table
106 *
107 * Writes value at the given offset in the register array which stores
108 * the VLAN filter table.
109 **/
110 static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
111 {
112 array_wr32(E1000_VFTA, offset, value);
113 wrfl();
114 }
115
116 /* Due to a hw errata, if the host tries to configure the VFTA register
117 * while performing queries from the BMC or DMA, then the VFTA in some
118 * cases won't be written.
119 */
120
121 /**
122 * igb_clear_vfta_i350 - Clear VLAN filter table
123 * @hw: pointer to the HW structure
124 *
125 * Clears the register array which contains the VLAN filter table by
126 * setting all the values to 0.
127 **/
128 void igb_clear_vfta_i350(struct e1000_hw *hw)
129 {
130 u32 offset;
131 int i;
132
133 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
134 for (i = 0; i < 10; i++)
135 array_wr32(E1000_VFTA, offset, 0);
136
137 wrfl();
138 }
139 }
140
141 /**
142 * igb_write_vfta_i350 - Write value to VLAN filter table
143 * @hw: pointer to the HW structure
144 * @offset: register offset in VLAN filter table
145 * @value: register value written to VLAN filter table
146 *
147 * Writes value at the given offset in the register array which stores
148 * the VLAN filter table.
149 **/
150 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
151 {
152 int i;
153
154 for (i = 0; i < 10; i++)
155 array_wr32(E1000_VFTA, offset, value);
156
157 wrfl();
158 }
159
160 /**
161 * igb_init_rx_addrs - Initialize receive address's
162 * @hw: pointer to the HW structure
163 * @rar_count: receive address registers
164 *
165 * Setups the receive address registers by setting the base receive address
166 * register to the devices MAC address and clearing all the other receive
167 * address registers to 0.
168 **/
169 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
170 {
171 u32 i;
172 u8 mac_addr[ETH_ALEN] = {0};
173
174 /* Setup the receive address */
175 hw_dbg("Programming MAC Address into RAR[0]\n");
176
177 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
178
179 /* Zero out the other (rar_entry_count - 1) receive addresses */
180 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
181 for (i = 1; i < rar_count; i++)
182 hw->mac.ops.rar_set(hw, mac_addr, i);
183 }
184
185 /**
186 * igb_vfta_set - enable or disable vlan in VLAN filter table
187 * @hw: pointer to the HW structure
188 * @vid: VLAN id to add or remove
189 * @add: if true add filter, if false remove
190 *
191 * Sets or clears a bit in the VLAN filter table array based on VLAN id
192 * and if we are adding or removing the filter
193 **/
194 s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
195 {
196 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
197 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
198 u32 vfta;
199 struct igb_adapter *adapter = hw->back;
200 s32 ret_val = 0;
201
202 vfta = adapter->shadow_vfta[index];
203
204 /* bit was set/cleared before we started */
205 if ((!!(vfta & mask)) == add) {
206 ret_val = -E1000_ERR_CONFIG;
207 } else {
208 if (add)
209 vfta |= mask;
210 else
211 vfta &= ~mask;
212 }
213 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
214 igb_write_vfta_i350(hw, index, vfta);
215 else
216 igb_write_vfta(hw, index, vfta);
217 adapter->shadow_vfta[index] = vfta;
218
219 return ret_val;
220 }
221
222 /**
223 * igb_check_alt_mac_addr - Check for alternate MAC addr
224 * @hw: pointer to the HW structure
225 *
226 * Checks the nvm for an alternate MAC address. An alternate MAC address
227 * can be setup by pre-boot software and must be treated like a permanent
228 * address and must override the actual permanent MAC address. If an
229 * alternate MAC address is found it is saved in the hw struct and
230 * programmed into RAR0 and the function returns success, otherwise the
231 * function returns an error.
232 **/
233 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
234 {
235 u32 i;
236 s32 ret_val = 0;
237 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
238 u8 alt_mac_addr[ETH_ALEN];
239
240 /* Alternate MAC address is handled by the option ROM for 82580
241 * and newer. SW support not required.
242 */
243 if (hw->mac.type >= e1000_82580)
244 goto out;
245
246 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
247 &nvm_alt_mac_addr_offset);
248 if (ret_val) {
249 hw_dbg("NVM Read Error\n");
250 goto out;
251 }
252
253 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
254 (nvm_alt_mac_addr_offset == 0x0000))
255 /* There is no Alternate MAC Address */
256 goto out;
257
258 if (hw->bus.func == E1000_FUNC_1)
259 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
260 if (hw->bus.func == E1000_FUNC_2)
261 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
262
263 if (hw->bus.func == E1000_FUNC_3)
264 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
265 for (i = 0; i < ETH_ALEN; i += 2) {
266 offset = nvm_alt_mac_addr_offset + (i >> 1);
267 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
268 if (ret_val) {
269 hw_dbg("NVM Read Error\n");
270 goto out;
271 }
272
273 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
274 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
275 }
276
277 /* if multicast bit is set, the alternate address will not be used */
278 if (is_multicast_ether_addr(alt_mac_addr)) {
279 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
280 goto out;
281 }
282
283 /* We have a valid alternate MAC address, and we want to treat it the
284 * same as the normal permanent MAC address stored by the HW into the
285 * RAR. Do this by mapping this address into RAR0.
286 */
287 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
288
289 out:
290 return ret_val;
291 }
292
293 /**
294 * igb_rar_set - Set receive address register
295 * @hw: pointer to the HW structure
296 * @addr: pointer to the receive address
297 * @index: receive address array register
298 *
299 * Sets the receive address array register at index to the address passed
300 * in by addr.
301 **/
302 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
303 {
304 u32 rar_low, rar_high;
305
306 /* HW expects these in little endian so we reverse the byte order
307 * from network order (big endian) to little endian
308 */
309 rar_low = ((u32) addr[0] |
310 ((u32) addr[1] << 8) |
311 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
312
313 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
314
315 /* If MAC address zero, no need to set the AV bit */
316 if (rar_low || rar_high)
317 rar_high |= E1000_RAH_AV;
318
319 /* Some bridges will combine consecutive 32-bit writes into
320 * a single burst write, which will malfunction on some parts.
321 * The flushes avoid this.
322 */
323 wr32(E1000_RAL(index), rar_low);
324 wrfl();
325 wr32(E1000_RAH(index), rar_high);
326 wrfl();
327 }
328
329 /**
330 * igb_mta_set - Set multicast filter table address
331 * @hw: pointer to the HW structure
332 * @hash_value: determines the MTA register and bit to set
333 *
334 * The multicast table address is a register array of 32-bit registers.
335 * The hash_value is used to determine what register the bit is in, the
336 * current value is read, the new bit is OR'd in and the new value is
337 * written back into the register.
338 **/
339 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
340 {
341 u32 hash_bit, hash_reg, mta;
342
343 /* The MTA is a register array of 32-bit registers. It is
344 * treated like an array of (32*mta_reg_count) bits. We want to
345 * set bit BitArray[hash_value]. So we figure out what register
346 * the bit is in, read it, OR in the new bit, then write
347 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
348 * mask to bits 31:5 of the hash value which gives us the
349 * register we're modifying. The hash bit within that register
350 * is determined by the lower 5 bits of the hash value.
351 */
352 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
353 hash_bit = hash_value & 0x1F;
354
355 mta = array_rd32(E1000_MTA, hash_reg);
356
357 mta |= (1 << hash_bit);
358
359 array_wr32(E1000_MTA, hash_reg, mta);
360 wrfl();
361 }
362
363 /**
364 * igb_hash_mc_addr - Generate a multicast hash value
365 * @hw: pointer to the HW structure
366 * @mc_addr: pointer to a multicast address
367 *
368 * Generates a multicast address hash value which is used to determine
369 * the multicast filter table array address and new table value. See
370 * igb_mta_set()
371 **/
372 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
373 {
374 u32 hash_value, hash_mask;
375 u8 bit_shift = 0;
376
377 /* Register count multiplied by bits per register */
378 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
379
380 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
381 * where 0xFF would still fall within the hash mask.
382 */
383 while (hash_mask >> bit_shift != 0xFF)
384 bit_shift++;
385
386 /* The portion of the address that is used for the hash table
387 * is determined by the mc_filter_type setting.
388 * The algorithm is such that there is a total of 8 bits of shifting.
389 * The bit_shift for a mc_filter_type of 0 represents the number of
390 * left-shifts where the MSB of mc_addr[5] would still fall within
391 * the hash_mask. Case 0 does this exactly. Since there are a total
392 * of 8 bits of shifting, then mc_addr[4] will shift right the
393 * remaining number of bits. Thus 8 - bit_shift. The rest of the
394 * cases are a variation of this algorithm...essentially raising the
395 * number of bits to shift mc_addr[5] left, while still keeping the
396 * 8-bit shifting total.
397 *
398 * For example, given the following Destination MAC Address and an
399 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
400 * we can see that the bit_shift for case 0 is 4. These are the hash
401 * values resulting from each mc_filter_type...
402 * [0] [1] [2] [3] [4] [5]
403 * 01 AA 00 12 34 56
404 * LSB MSB
405 *
406 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
407 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
408 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
409 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
410 */
411 switch (hw->mac.mc_filter_type) {
412 default:
413 case 0:
414 break;
415 case 1:
416 bit_shift += 1;
417 break;
418 case 2:
419 bit_shift += 2;
420 break;
421 case 3:
422 bit_shift += 4;
423 break;
424 }
425
426 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
427 (((u16) mc_addr[5]) << bit_shift)));
428
429 return hash_value;
430 }
431
432 /**
433 * igb_update_mc_addr_list - Update Multicast addresses
434 * @hw: pointer to the HW structure
435 * @mc_addr_list: array of multicast addresses to program
436 * @mc_addr_count: number of multicast addresses to program
437 *
438 * Updates entire Multicast Table Array.
439 * The caller must have a packed mc_addr_list of multicast addresses.
440 **/
441 void igb_update_mc_addr_list(struct e1000_hw *hw,
442 u8 *mc_addr_list, u32 mc_addr_count)
443 {
444 u32 hash_value, hash_bit, hash_reg;
445 int i;
446
447 /* clear mta_shadow */
448 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
449
450 /* update mta_shadow from mc_addr_list */
451 for (i = 0; (u32) i < mc_addr_count; i++) {
452 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
453
454 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
455 hash_bit = hash_value & 0x1F;
456
457 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
458 mc_addr_list += (ETH_ALEN);
459 }
460
461 /* replace the entire MTA table */
462 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
463 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
464 wrfl();
465 }
466
467 /**
468 * igb_clear_hw_cntrs_base - Clear base hardware counters
469 * @hw: pointer to the HW structure
470 *
471 * Clears the base hardware counters by reading the counter registers.
472 **/
473 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
474 {
475 rd32(E1000_CRCERRS);
476 rd32(E1000_SYMERRS);
477 rd32(E1000_MPC);
478 rd32(E1000_SCC);
479 rd32(E1000_ECOL);
480 rd32(E1000_MCC);
481 rd32(E1000_LATECOL);
482 rd32(E1000_COLC);
483 rd32(E1000_DC);
484 rd32(E1000_SEC);
485 rd32(E1000_RLEC);
486 rd32(E1000_XONRXC);
487 rd32(E1000_XONTXC);
488 rd32(E1000_XOFFRXC);
489 rd32(E1000_XOFFTXC);
490 rd32(E1000_FCRUC);
491 rd32(E1000_GPRC);
492 rd32(E1000_BPRC);
493 rd32(E1000_MPRC);
494 rd32(E1000_GPTC);
495 rd32(E1000_GORCL);
496 rd32(E1000_GORCH);
497 rd32(E1000_GOTCL);
498 rd32(E1000_GOTCH);
499 rd32(E1000_RNBC);
500 rd32(E1000_RUC);
501 rd32(E1000_RFC);
502 rd32(E1000_ROC);
503 rd32(E1000_RJC);
504 rd32(E1000_TORL);
505 rd32(E1000_TORH);
506 rd32(E1000_TOTL);
507 rd32(E1000_TOTH);
508 rd32(E1000_TPR);
509 rd32(E1000_TPT);
510 rd32(E1000_MPTC);
511 rd32(E1000_BPTC);
512 }
513
514 /**
515 * igb_check_for_copper_link - Check for link (Copper)
516 * @hw: pointer to the HW structure
517 *
518 * Checks to see of the link status of the hardware has changed. If a
519 * change in link status has been detected, then we read the PHY registers
520 * to get the current speed/duplex if link exists.
521 **/
522 s32 igb_check_for_copper_link(struct e1000_hw *hw)
523 {
524 struct e1000_mac_info *mac = &hw->mac;
525 s32 ret_val;
526 bool link;
527
528 /* We only want to go out to the PHY registers to see if Auto-Neg
529 * has completed and/or if our link status has changed. The
530 * get_link_status flag is set upon receiving a Link Status
531 * Change or Rx Sequence Error interrupt.
532 */
533 if (!mac->get_link_status) {
534 ret_val = 0;
535 goto out;
536 }
537
538 /* First we want to see if the MII Status Register reports
539 * link. If so, then we want to get the current speed/duplex
540 * of the PHY.
541 */
542 ret_val = igb_phy_has_link(hw, 1, 0, &link);
543 if (ret_val)
544 goto out;
545
546 if (!link)
547 goto out; /* No link detected */
548
549 mac->get_link_status = false;
550
551 /* Check if there was DownShift, must be checked
552 * immediately after link-up
553 */
554 igb_check_downshift(hw);
555
556 /* If we are forcing speed/duplex, then we simply return since
557 * we have already determined whether we have link or not.
558 */
559 if (!mac->autoneg) {
560 ret_val = -E1000_ERR_CONFIG;
561 goto out;
562 }
563
564 /* Auto-Neg is enabled. Auto Speed Detection takes care
565 * of MAC speed/duplex configuration. So we only need to
566 * configure Collision Distance in the MAC.
567 */
568 igb_config_collision_dist(hw);
569
570 /* Configure Flow Control now that Auto-Neg has completed.
571 * First, we need to restore the desired flow control
572 * settings because we may have had to re-autoneg with a
573 * different link partner.
574 */
575 ret_val = igb_config_fc_after_link_up(hw);
576 if (ret_val)
577 hw_dbg("Error configuring flow control\n");
578
579 out:
580 return ret_val;
581 }
582
583 /**
584 * igb_setup_link - Setup flow control and link settings
585 * @hw: pointer to the HW structure
586 *
587 * Determines which flow control settings to use, then configures flow
588 * control. Calls the appropriate media-specific link configuration
589 * function. Assuming the adapter has a valid link partner, a valid link
590 * should be established. Assumes the hardware has previously been reset
591 * and the transmitter and receiver are not enabled.
592 **/
593 s32 igb_setup_link(struct e1000_hw *hw)
594 {
595 s32 ret_val = 0;
596
597 /* In the case of the phy reset being blocked, we already have a link.
598 * We do not need to set it up again.
599 */
600 if (igb_check_reset_block(hw))
601 goto out;
602
603 /* If requested flow control is set to default, set flow control
604 * based on the EEPROM flow control settings.
605 */
606 if (hw->fc.requested_mode == e1000_fc_default) {
607 ret_val = igb_set_default_fc(hw);
608 if (ret_val)
609 goto out;
610 }
611
612 /* We want to save off the original Flow Control configuration just
613 * in case we get disconnected and then reconnected into a different
614 * hub or switch with different Flow Control capabilities.
615 */
616 hw->fc.current_mode = hw->fc.requested_mode;
617
618 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
619
620 /* Call the necessary media_type subroutine to configure the link. */
621 ret_val = hw->mac.ops.setup_physical_interface(hw);
622 if (ret_val)
623 goto out;
624
625 /* Initialize the flow control address, type, and PAUSE timer
626 * registers to their default values. This is done even if flow
627 * control is disabled, because it does not hurt anything to
628 * initialize these registers.
629 */
630 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
631 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
632 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
633 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
634
635 wr32(E1000_FCTTV, hw->fc.pause_time);
636
637 ret_val = igb_set_fc_watermarks(hw);
638
639 out:
640
641 return ret_val;
642 }
643
644 /**
645 * igb_config_collision_dist - Configure collision distance
646 * @hw: pointer to the HW structure
647 *
648 * Configures the collision distance to the default value and is used
649 * during link setup. Currently no func pointer exists and all
650 * implementations are handled in the generic version of this function.
651 **/
652 void igb_config_collision_dist(struct e1000_hw *hw)
653 {
654 u32 tctl;
655
656 tctl = rd32(E1000_TCTL);
657
658 tctl &= ~E1000_TCTL_COLD;
659 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
660
661 wr32(E1000_TCTL, tctl);
662 wrfl();
663 }
664
665 /**
666 * igb_set_fc_watermarks - Set flow control high/low watermarks
667 * @hw: pointer to the HW structure
668 *
669 * Sets the flow control high/low threshold (watermark) registers. If
670 * flow control XON frame transmission is enabled, then set XON frame
671 * tansmission as well.
672 **/
673 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
674 {
675 s32 ret_val = 0;
676 u32 fcrtl = 0, fcrth = 0;
677
678 /* Set the flow control receive threshold registers. Normally,
679 * these registers will be set to a default threshold that may be
680 * adjusted later by the driver's runtime code. However, if the
681 * ability to transmit pause frames is not enabled, then these
682 * registers will be set to 0.
683 */
684 if (hw->fc.current_mode & e1000_fc_tx_pause) {
685 /* We need to set up the Receive Threshold high and low water
686 * marks as well as (optionally) enabling the transmission of
687 * XON frames.
688 */
689 fcrtl = hw->fc.low_water;
690 if (hw->fc.send_xon)
691 fcrtl |= E1000_FCRTL_XONE;
692
693 fcrth = hw->fc.high_water;
694 }
695 wr32(E1000_FCRTL, fcrtl);
696 wr32(E1000_FCRTH, fcrth);
697
698 return ret_val;
699 }
700
701 /**
702 * igb_set_default_fc - Set flow control default values
703 * @hw: pointer to the HW structure
704 *
705 * Read the EEPROM for the default values for flow control and store the
706 * values.
707 **/
708 static s32 igb_set_default_fc(struct e1000_hw *hw)
709 {
710 s32 ret_val = 0;
711 u16 lan_offset;
712 u16 nvm_data;
713
714 /* Read and store word 0x0F of the EEPROM. This word contains bits
715 * that determine the hardware's default PAUSE (flow control) mode,
716 * a bit that determines whether the HW defaults to enabling or
717 * disabling auto-negotiation, and the direction of the
718 * SW defined pins. If there is no SW over-ride of the flow
719 * control setting, then the variable hw->fc will
720 * be initialized based on a value in the EEPROM.
721 */
722 if (hw->mac.type == e1000_i350) {
723 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
724 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
725 + lan_offset, 1, &nvm_data);
726 } else {
727 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
728 1, &nvm_data);
729 }
730
731 if (ret_val) {
732 hw_dbg("NVM Read Error\n");
733 goto out;
734 }
735
736 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
737 hw->fc.requested_mode = e1000_fc_none;
738 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
739 NVM_WORD0F_ASM_DIR)
740 hw->fc.requested_mode = e1000_fc_tx_pause;
741 else
742 hw->fc.requested_mode = e1000_fc_full;
743
744 out:
745 return ret_val;
746 }
747
748 /**
749 * igb_force_mac_fc - Force the MAC's flow control settings
750 * @hw: pointer to the HW structure
751 *
752 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
753 * device control register to reflect the adapter settings. TFCE and RFCE
754 * need to be explicitly set by software when a copper PHY is used because
755 * autonegotiation is managed by the PHY rather than the MAC. Software must
756 * also configure these bits when link is forced on a fiber connection.
757 **/
758 s32 igb_force_mac_fc(struct e1000_hw *hw)
759 {
760 u32 ctrl;
761 s32 ret_val = 0;
762
763 ctrl = rd32(E1000_CTRL);
764
765 /* Because we didn't get link via the internal auto-negotiation
766 * mechanism (we either forced link or we got link via PHY
767 * auto-neg), we have to manually enable/disable transmit an
768 * receive flow control.
769 *
770 * The "Case" statement below enables/disable flow control
771 * according to the "hw->fc.current_mode" parameter.
772 *
773 * The possible values of the "fc" parameter are:
774 * 0: Flow control is completely disabled
775 * 1: Rx flow control is enabled (we can receive pause
776 * frames but not send pause frames).
777 * 2: Tx flow control is enabled (we can send pause frames
778 * frames but we do not receive pause frames).
779 * 3: Both Rx and TX flow control (symmetric) is enabled.
780 * other: No other values should be possible at this point.
781 */
782 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
783
784 switch (hw->fc.current_mode) {
785 case e1000_fc_none:
786 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
787 break;
788 case e1000_fc_rx_pause:
789 ctrl &= (~E1000_CTRL_TFCE);
790 ctrl |= E1000_CTRL_RFCE;
791 break;
792 case e1000_fc_tx_pause:
793 ctrl &= (~E1000_CTRL_RFCE);
794 ctrl |= E1000_CTRL_TFCE;
795 break;
796 case e1000_fc_full:
797 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
798 break;
799 default:
800 hw_dbg("Flow control param set incorrectly\n");
801 ret_val = -E1000_ERR_CONFIG;
802 goto out;
803 }
804
805 wr32(E1000_CTRL, ctrl);
806
807 out:
808 return ret_val;
809 }
810
811 /**
812 * igb_config_fc_after_link_up - Configures flow control after link
813 * @hw: pointer to the HW structure
814 *
815 * Checks the status of auto-negotiation after link up to ensure that the
816 * speed and duplex were not forced. If the link needed to be forced, then
817 * flow control needs to be forced also. If auto-negotiation is enabled
818 * and did not fail, then we configure flow control based on our link
819 * partner.
820 **/
821 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
822 {
823 struct e1000_mac_info *mac = &hw->mac;
824 s32 ret_val = 0;
825 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
826 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
827 u16 speed, duplex;
828
829 /* Check for the case where we have fiber media and auto-neg failed
830 * so we had to force link. In this case, we need to force the
831 * configuration of the MAC to match the "fc" parameter.
832 */
833 if (mac->autoneg_failed) {
834 if (hw->phy.media_type == e1000_media_type_internal_serdes)
835 ret_val = igb_force_mac_fc(hw);
836 } else {
837 if (hw->phy.media_type == e1000_media_type_copper)
838 ret_val = igb_force_mac_fc(hw);
839 }
840
841 if (ret_val) {
842 hw_dbg("Error forcing flow control settings\n");
843 goto out;
844 }
845
846 /* Check for the case where we have copper media and auto-neg is
847 * enabled. In this case, we need to check and see if Auto-Neg
848 * has completed, and if so, how the PHY and link partner has
849 * flow control configured.
850 */
851 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
852 /* Read the MII Status Register and check to see if AutoNeg
853 * has completed. We read this twice because this reg has
854 * some "sticky" (latched) bits.
855 */
856 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
857 &mii_status_reg);
858 if (ret_val)
859 goto out;
860 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
861 &mii_status_reg);
862 if (ret_val)
863 goto out;
864
865 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
866 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
867 goto out;
868 }
869
870 /* The AutoNeg process has completed, so we now need to
871 * read both the Auto Negotiation Advertisement
872 * Register (Address 4) and the Auto_Negotiation Base
873 * Page Ability Register (Address 5) to determine how
874 * flow control was negotiated.
875 */
876 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
877 &mii_nway_adv_reg);
878 if (ret_val)
879 goto out;
880 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
881 &mii_nway_lp_ability_reg);
882 if (ret_val)
883 goto out;
884
885 /* Two bits in the Auto Negotiation Advertisement Register
886 * (Address 4) and two bits in the Auto Negotiation Base
887 * Page Ability Register (Address 5) determine flow control
888 * for both the PHY and the link partner. The following
889 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
890 * 1999, describes these PAUSE resolution bits and how flow
891 * control is determined based upon these settings.
892 * NOTE: DC = Don't Care
893 *
894 * LOCAL DEVICE | LINK PARTNER
895 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
896 *-------|---------|-------|---------|--------------------
897 * 0 | 0 | DC | DC | e1000_fc_none
898 * 0 | 1 | 0 | DC | e1000_fc_none
899 * 0 | 1 | 1 | 0 | e1000_fc_none
900 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
901 * 1 | 0 | 0 | DC | e1000_fc_none
902 * 1 | DC | 1 | DC | e1000_fc_full
903 * 1 | 1 | 0 | 0 | e1000_fc_none
904 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
905 *
906 * Are both PAUSE bits set to 1? If so, this implies
907 * Symmetric Flow Control is enabled at both ends. The
908 * ASM_DIR bits are irrelevant per the spec.
909 *
910 * For Symmetric Flow Control:
911 *
912 * LOCAL DEVICE | LINK PARTNER
913 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
914 *-------|---------|-------|---------|--------------------
915 * 1 | DC | 1 | DC | E1000_fc_full
916 *
917 */
918 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
919 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
920 /* Now we need to check if the user selected RX ONLY
921 * of pause frames. In this case, we had to advertise
922 * FULL flow control because we could not advertise RX
923 * ONLY. Hence, we must now check to see if we need to
924 * turn OFF the TRANSMISSION of PAUSE frames.
925 */
926 if (hw->fc.requested_mode == e1000_fc_full) {
927 hw->fc.current_mode = e1000_fc_full;
928 hw_dbg("Flow Control = FULL.\n");
929 } else {
930 hw->fc.current_mode = e1000_fc_rx_pause;
931 hw_dbg("Flow Control = RX PAUSE frames only.\n");
932 }
933 }
934 /* For receiving PAUSE frames ONLY.
935 *
936 * LOCAL DEVICE | LINK PARTNER
937 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
938 *-------|---------|-------|---------|--------------------
939 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
940 */
941 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
942 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
943 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
944 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
945 hw->fc.current_mode = e1000_fc_tx_pause;
946 hw_dbg("Flow Control = TX PAUSE frames only.\n");
947 }
948 /* For transmitting PAUSE frames ONLY.
949 *
950 * LOCAL DEVICE | LINK PARTNER
951 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
952 *-------|---------|-------|---------|--------------------
953 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
954 */
955 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
956 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
957 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
958 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
959 hw->fc.current_mode = e1000_fc_rx_pause;
960 hw_dbg("Flow Control = RX PAUSE frames only.\n");
961 }
962 /* Per the IEEE spec, at this point flow control should be
963 * disabled. However, we want to consider that we could
964 * be connected to a legacy switch that doesn't advertise
965 * desired flow control, but can be forced on the link
966 * partner. So if we advertised no flow control, that is
967 * what we will resolve to. If we advertised some kind of
968 * receive capability (Rx Pause Only or Full Flow Control)
969 * and the link partner advertised none, we will configure
970 * ourselves to enable Rx Flow Control only. We can do
971 * this safely for two reasons: If the link partner really
972 * didn't want flow control enabled, and we enable Rx, no
973 * harm done since we won't be receiving any PAUSE frames
974 * anyway. If the intent on the link partner was to have
975 * flow control enabled, then by us enabling RX only, we
976 * can at least receive pause frames and process them.
977 * This is a good idea because in most cases, since we are
978 * predominantly a server NIC, more times than not we will
979 * be asked to delay transmission of packets than asking
980 * our link partner to pause transmission of frames.
981 */
982 else if ((hw->fc.requested_mode == e1000_fc_none) ||
983 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
984 (hw->fc.strict_ieee)) {
985 hw->fc.current_mode = e1000_fc_none;
986 hw_dbg("Flow Control = NONE.\n");
987 } else {
988 hw->fc.current_mode = e1000_fc_rx_pause;
989 hw_dbg("Flow Control = RX PAUSE frames only.\n");
990 }
991
992 /* Now we need to do one last check... If we auto-
993 * negotiated to HALF DUPLEX, flow control should not be
994 * enabled per IEEE 802.3 spec.
995 */
996 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
997 if (ret_val) {
998 hw_dbg("Error getting link speed and duplex\n");
999 goto out;
1000 }
1001
1002 if (duplex == HALF_DUPLEX)
1003 hw->fc.current_mode = e1000_fc_none;
1004
1005 /* Now we call a subroutine to actually force the MAC
1006 * controller to use the correct flow control settings.
1007 */
1008 ret_val = igb_force_mac_fc(hw);
1009 if (ret_val) {
1010 hw_dbg("Error forcing flow control settings\n");
1011 goto out;
1012 }
1013 }
1014 /* Check for the case where we have SerDes media and auto-neg is
1015 * enabled. In this case, we need to check and see if Auto-Neg
1016 * has completed, and if so, how the PHY and link partner has
1017 * flow control configured.
1018 */
1019 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1020 && mac->autoneg) {
1021 /* Read the PCS_LSTS and check to see if AutoNeg
1022 * has completed.
1023 */
1024 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1025
1026 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1027 hw_dbg("PCS Auto Neg has not completed.\n");
1028 return ret_val;
1029 }
1030
1031 /* The AutoNeg process has completed, so we now need to
1032 * read both the Auto Negotiation Advertisement
1033 * Register (PCS_ANADV) and the Auto_Negotiation Base
1034 * Page Ability Register (PCS_LPAB) to determine how
1035 * flow control was negotiated.
1036 */
1037 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1038 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1039
1040 /* Two bits in the Auto Negotiation Advertisement Register
1041 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1042 * Page Ability Register (PCS_LPAB) determine flow control
1043 * for both the PHY and the link partner. The following
1044 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1045 * 1999, describes these PAUSE resolution bits and how flow
1046 * control is determined based upon these settings.
1047 * NOTE: DC = Don't Care
1048 *
1049 * LOCAL DEVICE | LINK PARTNER
1050 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1051 *-------|---------|-------|---------|--------------------
1052 * 0 | 0 | DC | DC | e1000_fc_none
1053 * 0 | 1 | 0 | DC | e1000_fc_none
1054 * 0 | 1 | 1 | 0 | e1000_fc_none
1055 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1056 * 1 | 0 | 0 | DC | e1000_fc_none
1057 * 1 | DC | 1 | DC | e1000_fc_full
1058 * 1 | 1 | 0 | 0 | e1000_fc_none
1059 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1060 *
1061 * Are both PAUSE bits set to 1? If so, this implies
1062 * Symmetric Flow Control is enabled at both ends. The
1063 * ASM_DIR bits are irrelevant per the spec.
1064 *
1065 * For Symmetric Flow Control:
1066 *
1067 * LOCAL DEVICE | LINK PARTNER
1068 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1069 *-------|---------|-------|---------|--------------------
1070 * 1 | DC | 1 | DC | e1000_fc_full
1071 *
1072 */
1073 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1074 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1075 /* Now we need to check if the user selected Rx ONLY
1076 * of pause frames. In this case, we had to advertise
1077 * FULL flow control because we could not advertise Rx
1078 * ONLY. Hence, we must now check to see if we need to
1079 * turn OFF the TRANSMISSION of PAUSE frames.
1080 */
1081 if (hw->fc.requested_mode == e1000_fc_full) {
1082 hw->fc.current_mode = e1000_fc_full;
1083 hw_dbg("Flow Control = FULL.\n");
1084 } else {
1085 hw->fc.current_mode = e1000_fc_rx_pause;
1086 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1087 }
1088 }
1089 /* For receiving PAUSE frames ONLY.
1090 *
1091 * LOCAL DEVICE | LINK PARTNER
1092 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1093 *-------|---------|-------|---------|--------------------
1094 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1095 */
1096 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1097 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1098 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1099 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1100 hw->fc.current_mode = e1000_fc_tx_pause;
1101 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1102 }
1103 /* For transmitting PAUSE frames ONLY.
1104 *
1105 * LOCAL DEVICE | LINK PARTNER
1106 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1107 *-------|---------|-------|---------|--------------------
1108 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1109 */
1110 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1111 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1112 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1113 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1114 hw->fc.current_mode = e1000_fc_rx_pause;
1115 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1116 } else {
1117 /* Per the IEEE spec, at this point flow control
1118 * should be disabled.
1119 */
1120 hw->fc.current_mode = e1000_fc_none;
1121 hw_dbg("Flow Control = NONE.\n");
1122 }
1123
1124 /* Now we call a subroutine to actually force the MAC
1125 * controller to use the correct flow control settings.
1126 */
1127 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1128 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1129 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1130
1131 ret_val = igb_force_mac_fc(hw);
1132 if (ret_val) {
1133 hw_dbg("Error forcing flow control settings\n");
1134 return ret_val;
1135 }
1136 }
1137
1138 out:
1139 return ret_val;
1140 }
1141
1142 /**
1143 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
1144 * @hw: pointer to the HW structure
1145 * @speed: stores the current speed
1146 * @duplex: stores the current duplex
1147 *
1148 * Read the status register for the current speed/duplex and store the current
1149 * speed and duplex for copper connections.
1150 **/
1151 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1152 u16 *duplex)
1153 {
1154 u32 status;
1155
1156 status = rd32(E1000_STATUS);
1157 if (status & E1000_STATUS_SPEED_1000) {
1158 *speed = SPEED_1000;
1159 hw_dbg("1000 Mbs, ");
1160 } else if (status & E1000_STATUS_SPEED_100) {
1161 *speed = SPEED_100;
1162 hw_dbg("100 Mbs, ");
1163 } else {
1164 *speed = SPEED_10;
1165 hw_dbg("10 Mbs, ");
1166 }
1167
1168 if (status & E1000_STATUS_FD) {
1169 *duplex = FULL_DUPLEX;
1170 hw_dbg("Full Duplex\n");
1171 } else {
1172 *duplex = HALF_DUPLEX;
1173 hw_dbg("Half Duplex\n");
1174 }
1175
1176 return 0;
1177 }
1178
1179 /**
1180 * igb_get_hw_semaphore - Acquire hardware semaphore
1181 * @hw: pointer to the HW structure
1182 *
1183 * Acquire the HW semaphore to access the PHY or NVM
1184 **/
1185 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1186 {
1187 u32 swsm;
1188 s32 ret_val = 0;
1189 s32 timeout = hw->nvm.word_size + 1;
1190 s32 i = 0;
1191
1192 /* Get the SW semaphore */
1193 while (i < timeout) {
1194 swsm = rd32(E1000_SWSM);
1195 if (!(swsm & E1000_SWSM_SMBI))
1196 break;
1197
1198 udelay(50);
1199 i++;
1200 }
1201
1202 if (i == timeout) {
1203 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1204 ret_val = -E1000_ERR_NVM;
1205 goto out;
1206 }
1207
1208 /* Get the FW semaphore. */
1209 for (i = 0; i < timeout; i++) {
1210 swsm = rd32(E1000_SWSM);
1211 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1212
1213 /* Semaphore acquired if bit latched */
1214 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1215 break;
1216
1217 udelay(50);
1218 }
1219
1220 if (i == timeout) {
1221 /* Release semaphores */
1222 igb_put_hw_semaphore(hw);
1223 hw_dbg("Driver can't access the NVM\n");
1224 ret_val = -E1000_ERR_NVM;
1225 goto out;
1226 }
1227
1228 out:
1229 return ret_val;
1230 }
1231
1232 /**
1233 * igb_put_hw_semaphore - Release hardware semaphore
1234 * @hw: pointer to the HW structure
1235 *
1236 * Release hardware semaphore used to access the PHY or NVM
1237 **/
1238 void igb_put_hw_semaphore(struct e1000_hw *hw)
1239 {
1240 u32 swsm;
1241
1242 swsm = rd32(E1000_SWSM);
1243
1244 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1245
1246 wr32(E1000_SWSM, swsm);
1247 }
1248
1249 /**
1250 * igb_get_auto_rd_done - Check for auto read completion
1251 * @hw: pointer to the HW structure
1252 *
1253 * Check EEPROM for Auto Read done bit.
1254 **/
1255 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1256 {
1257 s32 i = 0;
1258 s32 ret_val = 0;
1259
1260
1261 while (i < AUTO_READ_DONE_TIMEOUT) {
1262 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1263 break;
1264 usleep_range(1000, 2000);
1265 i++;
1266 }
1267
1268 if (i == AUTO_READ_DONE_TIMEOUT) {
1269 hw_dbg("Auto read by HW from NVM has not completed.\n");
1270 ret_val = -E1000_ERR_RESET;
1271 goto out;
1272 }
1273
1274 out:
1275 return ret_val;
1276 }
1277
1278 /**
1279 * igb_valid_led_default - Verify a valid default LED config
1280 * @hw: pointer to the HW structure
1281 * @data: pointer to the NVM (EEPROM)
1282 *
1283 * Read the EEPROM for the current default LED configuration. If the
1284 * LED configuration is not valid, set to a valid LED configuration.
1285 **/
1286 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1287 {
1288 s32 ret_val;
1289
1290 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1291 if (ret_val) {
1292 hw_dbg("NVM Read Error\n");
1293 goto out;
1294 }
1295
1296 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1297 switch (hw->phy.media_type) {
1298 case e1000_media_type_internal_serdes:
1299 *data = ID_LED_DEFAULT_82575_SERDES;
1300 break;
1301 case e1000_media_type_copper:
1302 default:
1303 *data = ID_LED_DEFAULT;
1304 break;
1305 }
1306 }
1307 out:
1308 return ret_val;
1309 }
1310
1311 /**
1312 * igb_id_led_init -
1313 * @hw: pointer to the HW structure
1314 *
1315 **/
1316 s32 igb_id_led_init(struct e1000_hw *hw)
1317 {
1318 struct e1000_mac_info *mac = &hw->mac;
1319 s32 ret_val;
1320 const u32 ledctl_mask = 0x000000FF;
1321 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1322 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1323 u16 data, i, temp;
1324 const u16 led_mask = 0x0F;
1325
1326 /* i210 and i211 devices have different LED mechanism */
1327 if ((hw->mac.type == e1000_i210) ||
1328 (hw->mac.type == e1000_i211))
1329 ret_val = igb_valid_led_default_i210(hw, &data);
1330 else
1331 ret_val = igb_valid_led_default(hw, &data);
1332
1333 if (ret_val)
1334 goto out;
1335
1336 mac->ledctl_default = rd32(E1000_LEDCTL);
1337 mac->ledctl_mode1 = mac->ledctl_default;
1338 mac->ledctl_mode2 = mac->ledctl_default;
1339
1340 for (i = 0; i < 4; i++) {
1341 temp = (data >> (i << 2)) & led_mask;
1342 switch (temp) {
1343 case ID_LED_ON1_DEF2:
1344 case ID_LED_ON1_ON2:
1345 case ID_LED_ON1_OFF2:
1346 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1347 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1348 break;
1349 case ID_LED_OFF1_DEF2:
1350 case ID_LED_OFF1_ON2:
1351 case ID_LED_OFF1_OFF2:
1352 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1353 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1354 break;
1355 default:
1356 /* Do nothing */
1357 break;
1358 }
1359 switch (temp) {
1360 case ID_LED_DEF1_ON2:
1361 case ID_LED_ON1_ON2:
1362 case ID_LED_OFF1_ON2:
1363 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1364 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1365 break;
1366 case ID_LED_DEF1_OFF2:
1367 case ID_LED_ON1_OFF2:
1368 case ID_LED_OFF1_OFF2:
1369 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1370 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1371 break;
1372 default:
1373 /* Do nothing */
1374 break;
1375 }
1376 }
1377
1378 out:
1379 return ret_val;
1380 }
1381
1382 /**
1383 * igb_cleanup_led - Set LED config to default operation
1384 * @hw: pointer to the HW structure
1385 *
1386 * Remove the current LED configuration and set the LED configuration
1387 * to the default value, saved from the EEPROM.
1388 **/
1389 s32 igb_cleanup_led(struct e1000_hw *hw)
1390 {
1391 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1392 return 0;
1393 }
1394
1395 /**
1396 * igb_blink_led - Blink LED
1397 * @hw: pointer to the HW structure
1398 *
1399 * Blink the led's which are set to be on.
1400 **/
1401 s32 igb_blink_led(struct e1000_hw *hw)
1402 {
1403 u32 ledctl_blink = 0;
1404 u32 i;
1405
1406 if (hw->phy.media_type == e1000_media_type_fiber) {
1407 /* always blink LED0 for PCI-E fiber */
1408 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1409 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1410 } else {
1411 /* Set the blink bit for each LED that's "on" (0x0E)
1412 * (or "off" if inverted) in ledctl_mode2. The blink
1413 * logic in hardware only works when mode is set to "on"
1414 * so it must be changed accordingly when the mode is
1415 * "off" and inverted.
1416 */
1417 ledctl_blink = hw->mac.ledctl_mode2;
1418 for (i = 0; i < 32; i += 8) {
1419 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1420 E1000_LEDCTL_LED0_MODE_MASK;
1421 u32 led_default = hw->mac.ledctl_default >> i;
1422
1423 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1424 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1425 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1426 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1427 ledctl_blink &=
1428 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1429 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1430 E1000_LEDCTL_MODE_LED_ON) << i;
1431 }
1432 }
1433 }
1434
1435 wr32(E1000_LEDCTL, ledctl_blink);
1436
1437 return 0;
1438 }
1439
1440 /**
1441 * igb_led_off - Turn LED off
1442 * @hw: pointer to the HW structure
1443 *
1444 * Turn LED off.
1445 **/
1446 s32 igb_led_off(struct e1000_hw *hw)
1447 {
1448 switch (hw->phy.media_type) {
1449 case e1000_media_type_copper:
1450 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1451 break;
1452 default:
1453 break;
1454 }
1455
1456 return 0;
1457 }
1458
1459 /**
1460 * igb_disable_pcie_master - Disables PCI-express master access
1461 * @hw: pointer to the HW structure
1462 *
1463 * Returns 0 (0) if successful, else returns -10
1464 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1465 * the master requests to be disabled.
1466 *
1467 * Disables PCI-Express master access and verifies there are no pending
1468 * requests.
1469 **/
1470 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1471 {
1472 u32 ctrl;
1473 s32 timeout = MASTER_DISABLE_TIMEOUT;
1474 s32 ret_val = 0;
1475
1476 if (hw->bus.type != e1000_bus_type_pci_express)
1477 goto out;
1478
1479 ctrl = rd32(E1000_CTRL);
1480 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1481 wr32(E1000_CTRL, ctrl);
1482
1483 while (timeout) {
1484 if (!(rd32(E1000_STATUS) &
1485 E1000_STATUS_GIO_MASTER_ENABLE))
1486 break;
1487 udelay(100);
1488 timeout--;
1489 }
1490
1491 if (!timeout) {
1492 hw_dbg("Master requests are pending.\n");
1493 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1494 goto out;
1495 }
1496
1497 out:
1498 return ret_val;
1499 }
1500
1501 /**
1502 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1503 * @hw: pointer to the HW structure
1504 *
1505 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1506 * set, which is forced to MDI mode only.
1507 **/
1508 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1509 {
1510 s32 ret_val = 0;
1511
1512 /* All MDI settings are supported on 82580 and newer. */
1513 if (hw->mac.type >= e1000_82580)
1514 goto out;
1515
1516 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1517 hw_dbg("Invalid MDI setting detected\n");
1518 hw->phy.mdix = 1;
1519 ret_val = -E1000_ERR_CONFIG;
1520 goto out;
1521 }
1522
1523 out:
1524 return ret_val;
1525 }
1526
1527 /**
1528 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1529 * @hw: pointer to the HW structure
1530 * @reg: 32bit register offset such as E1000_SCTL
1531 * @offset: register offset to write to
1532 * @data: data to write at register offset
1533 *
1534 * Writes an address/data control type register. There are several of these
1535 * and they all have the format address << 8 | data and bit 31 is polled for
1536 * completion.
1537 **/
1538 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1539 u32 offset, u8 data)
1540 {
1541 u32 i, regvalue = 0;
1542 s32 ret_val = 0;
1543
1544 /* Set up the address and data */
1545 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1546 wr32(reg, regvalue);
1547
1548 /* Poll the ready bit to see if the MDI read completed */
1549 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1550 udelay(5);
1551 regvalue = rd32(reg);
1552 if (regvalue & E1000_GEN_CTL_READY)
1553 break;
1554 }
1555 if (!(regvalue & E1000_GEN_CTL_READY)) {
1556 hw_dbg("Reg %08x did not indicate ready\n", reg);
1557 ret_val = -E1000_ERR_PHY;
1558 goto out;
1559 }
1560
1561 out:
1562 return ret_val;
1563 }
1564
1565 /**
1566 * igb_enable_mng_pass_thru - Enable processing of ARP's
1567 * @hw: pointer to the HW structure
1568 *
1569 * Verifies the hardware needs to leave interface enabled so that frames can
1570 * be directed to and from the management interface.
1571 **/
1572 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1573 {
1574 u32 manc;
1575 u32 fwsm, factps;
1576 bool ret_val = false;
1577
1578 if (!hw->mac.asf_firmware_present)
1579 goto out;
1580
1581 manc = rd32(E1000_MANC);
1582
1583 if (!(manc & E1000_MANC_RCV_TCO_EN))
1584 goto out;
1585
1586 if (hw->mac.arc_subsystem_valid) {
1587 fwsm = rd32(E1000_FWSM);
1588 factps = rd32(E1000_FACTPS);
1589
1590 if (!(factps & E1000_FACTPS_MNGCG) &&
1591 ((fwsm & E1000_FWSM_MODE_MASK) ==
1592 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1593 ret_val = true;
1594 goto out;
1595 }
1596 } else {
1597 if ((manc & E1000_MANC_SMBUS_EN) &&
1598 !(manc & E1000_MANC_ASF_EN)) {
1599 ret_val = true;
1600 goto out;
1601 }
1602 }
1603
1604 out:
1605 return ret_val;
1606 }
This page took 0.065138 seconds and 5 git commands to generate.