Merge branch 'efi-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbevf / vf.c
1 /*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 *******************************************************************************/
26
27 #include "vf.h"
28 #include "ixgbevf.h"
29
30 /**
31 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
32 * @hw: pointer to hardware structure
33 *
34 * Starts the hardware by filling the bus info structure and media type, clears
35 * all on chip counters, initializes receive address registers, multicast
36 * table, VLAN filter table, calls routine to set up link and flow control
37 * settings, and leaves transmit and receive units disabled and uninitialized
38 **/
39 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
40 {
41 /* Clear adapter stopped flag */
42 hw->adapter_stopped = false;
43
44 return 0;
45 }
46
47 /**
48 * ixgbevf_init_hw_vf - virtual function hardware initialization
49 * @hw: pointer to hardware structure
50 *
51 * Initialize the hardware by resetting the hardware and then starting
52 * the hardware
53 **/
54 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
55 {
56 s32 status = hw->mac.ops.start_hw(hw);
57
58 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
59
60 return status;
61 }
62
63 /**
64 * ixgbevf_reset_hw_vf - Performs hardware reset
65 * @hw: pointer to hardware structure
66 *
67 * Resets the hardware by resetting the transmit and receive units, masks and
68 * clears all interrupts.
69 **/
70 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
71 {
72 struct ixgbe_mbx_info *mbx = &hw->mbx;
73 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
74 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
75 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
76 u8 *addr = (u8 *)(&msgbuf[1]);
77
78 /* Call adapter stop to disable tx/rx and clear interrupts */
79 hw->mac.ops.stop_adapter(hw);
80
81 /* reset the api version */
82 hw->api_version = ixgbe_mbox_api_10;
83
84 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
85 IXGBE_WRITE_FLUSH(hw);
86
87 /* we cannot reset while the RSTI / RSTD bits are asserted */
88 while (!mbx->ops.check_for_rst(hw) && timeout) {
89 timeout--;
90 udelay(5);
91 }
92
93 if (!timeout)
94 return IXGBE_ERR_RESET_FAILED;
95
96 /* mailbox timeout can now become active */
97 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
98
99 msgbuf[0] = IXGBE_VF_RESET;
100 mbx->ops.write_posted(hw, msgbuf, 1);
101
102 mdelay(10);
103
104 /* set our "perm_addr" based on info provided by PF
105 * also set up the mc_filter_type which is piggy backed
106 * on the mac address in word 3
107 */
108 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
109 if (ret_val)
110 return ret_val;
111
112 /* New versions of the PF may NACK the reset return message
113 * to indicate that no MAC address has yet been assigned for
114 * the VF.
115 */
116 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
117 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
118 return IXGBE_ERR_INVALID_MAC_ADDR;
119
120 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
121 ether_addr_copy(hw->mac.perm_addr, addr);
122
123 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
124
125 return 0;
126 }
127
128 /**
129 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
130 * @hw: pointer to hardware structure
131 *
132 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
133 * disables transmit and receive units. The adapter_stopped flag is used by
134 * the shared code and drivers to determine if the adapter is in a stopped
135 * state and should not touch the hardware.
136 **/
137 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
138 {
139 u32 number_of_queues;
140 u32 reg_val;
141 u16 i;
142
143 /* Set the adapter_stopped flag so other driver functions stop touching
144 * the hardware
145 */
146 hw->adapter_stopped = true;
147
148 /* Disable the receive unit by stopped each queue */
149 number_of_queues = hw->mac.max_rx_queues;
150 for (i = 0; i < number_of_queues; i++) {
151 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
152 if (reg_val & IXGBE_RXDCTL_ENABLE) {
153 reg_val &= ~IXGBE_RXDCTL_ENABLE;
154 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
155 }
156 }
157
158 IXGBE_WRITE_FLUSH(hw);
159
160 /* Clear interrupt mask to stop from interrupts being generated */
161 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
162
163 /* Clear any pending interrupts */
164 IXGBE_READ_REG(hw, IXGBE_VTEICR);
165
166 /* Disable the transmit unit. Each queue must be disabled. */
167 number_of_queues = hw->mac.max_tx_queues;
168 for (i = 0; i < number_of_queues; i++) {
169 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
170 if (reg_val & IXGBE_TXDCTL_ENABLE) {
171 reg_val &= ~IXGBE_TXDCTL_ENABLE;
172 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
173 }
174 }
175
176 return 0;
177 }
178
179 /**
180 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
181 * @hw: pointer to hardware structure
182 * @mc_addr: the multicast address
183 *
184 * Extracts the 12 bits, from a multicast address, to determine which
185 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
186 * incoming Rx multicast addresses, to determine the bit-vector to check in
187 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
188 * by the MO field of the MCSTCTRL. The MO field is set during initialization
189 * to mc_filter_type.
190 **/
191 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
192 {
193 u32 vector = 0;
194
195 switch (hw->mac.mc_filter_type) {
196 case 0: /* use bits [47:36] of the address */
197 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
198 break;
199 case 1: /* use bits [46:35] of the address */
200 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
201 break;
202 case 2: /* use bits [45:34] of the address */
203 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
204 break;
205 case 3: /* use bits [43:32] of the address */
206 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
207 break;
208 default: /* Invalid mc_filter_type */
209 break;
210 }
211
212 /* vector can only be 12-bits or boundary will be exceeded */
213 vector &= 0xFFF;
214 return vector;
215 }
216
217 /**
218 * ixgbevf_get_mac_addr_vf - Read device MAC address
219 * @hw: pointer to the HW structure
220 * @mac_addr: pointer to storage for retrieved MAC address
221 **/
222 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
223 {
224 ether_addr_copy(mac_addr, hw->mac.perm_addr);
225
226 return 0;
227 }
228
229 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
230 {
231 struct ixgbe_mbx_info *mbx = &hw->mbx;
232 u32 msgbuf[3];
233 u8 *msg_addr = (u8 *)(&msgbuf[1]);
234 s32 ret_val;
235
236 memset(msgbuf, 0, sizeof(msgbuf));
237 /* If index is one then this is the start of a new list and needs
238 * indication to the PF so it can do it's own list management.
239 * If it is zero then that tells the PF to just clear all of
240 * this VF's macvlans and there is no new list.
241 */
242 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
243 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
244 if (addr)
245 ether_addr_copy(msg_addr, addr);
246 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
247
248 if (!ret_val)
249 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
250
251 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
252
253 if (!ret_val)
254 if (msgbuf[0] ==
255 (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
256 ret_val = -ENOMEM;
257
258 return ret_val;
259 }
260
261 /**
262 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
263 * @adapter: pointer to the port handle
264 * @reta: buffer to fill with RETA contents.
265 * @num_rx_queues: Number of Rx queues configured for this port
266 *
267 * The "reta" buffer should be big enough to contain 32 registers.
268 *
269 * Returns: 0 on success.
270 * if API doesn't support this operation - (-EOPNOTSUPP).
271 */
272 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
273 {
274 int err, i, j;
275 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
276 u32 *hw_reta = &msgbuf[1];
277 u32 mask = 0;
278
279 /* We have to use a mailbox for 82599 and x540 devices only.
280 * For these devices RETA has 128 entries.
281 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
282 * 16 RETA entries in each DWORD giving 2 bits to each entry.
283 */
284 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
285
286 /* We support the RSS querying for 82599 and x540 devices only.
287 * Thus return an error if API doesn't support RETA querying or querying
288 * is not supported for this device type.
289 */
290 if (hw->api_version != ixgbe_mbox_api_12 ||
291 hw->mac.type >= ixgbe_mac_X550_vf)
292 return -EOPNOTSUPP;
293
294 msgbuf[0] = IXGBE_VF_GET_RETA;
295
296 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
297
298 if (err)
299 return err;
300
301 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
302
303 if (err)
304 return err;
305
306 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
307
308 /* If the operation has been refused by a PF return -EPERM */
309 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
310 return -EPERM;
311
312 /* If we didn't get an ACK there must have been
313 * some sort of mailbox error so we should treat it
314 * as such.
315 */
316 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
317 return IXGBE_ERR_MBX;
318
319 /* ixgbevf doesn't support more than 2 queues at the moment */
320 if (num_rx_queues > 1)
321 mask = 0x1;
322
323 for (i = 0; i < dwords; i++)
324 for (j = 0; j < 16; j++)
325 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
326
327 return 0;
328 }
329
330 /**
331 * ixgbevf_get_rss_key_locked - get the RSS Random Key
332 * @hw: pointer to the HW structure
333 * @rss_key: buffer to fill with RSS Hash Key contents.
334 *
335 * The "rss_key" buffer should be big enough to contain 10 registers.
336 *
337 * Returns: 0 on success.
338 * if API doesn't support this operation - (-EOPNOTSUPP).
339 */
340 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
341 {
342 int err;
343 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
344
345 /* We currently support the RSS Random Key retrieval for 82599 and x540
346 * devices only.
347 *
348 * Thus return an error if API doesn't support RSS Random Key retrieval
349 * or if the operation is not supported for this device type.
350 */
351 if (hw->api_version != ixgbe_mbox_api_12 ||
352 hw->mac.type >= ixgbe_mac_X550_vf)
353 return -EOPNOTSUPP;
354
355 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
356 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
357
358 if (err)
359 return err;
360
361 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
362
363 if (err)
364 return err;
365
366 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
367
368 /* If the operation has been refused by a PF return -EPERM */
369 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
370 return -EPERM;
371
372 /* If we didn't get an ACK there must have been
373 * some sort of mailbox error so we should treat it
374 * as such.
375 */
376 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
377 return IXGBE_ERR_MBX;
378
379 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
380
381 return 0;
382 }
383
384 /**
385 * ixgbevf_set_rar_vf - set device MAC address
386 * @hw: pointer to hardware structure
387 * @index: Receive address register to write
388 * @addr: Address to put into receive address register
389 * @vmdq: Unused in this implementation
390 **/
391 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
392 u32 vmdq)
393 {
394 struct ixgbe_mbx_info *mbx = &hw->mbx;
395 u32 msgbuf[3];
396 u8 *msg_addr = (u8 *)(&msgbuf[1]);
397 s32 ret_val;
398
399 memset(msgbuf, 0, sizeof(msgbuf));
400 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
401 ether_addr_copy(msg_addr, addr);
402 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
403
404 if (!ret_val)
405 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
406
407 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
408
409 /* if nacked the address was rejected, use "perm_addr" */
410 if (!ret_val &&
411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
413 return IXGBE_ERR_MBX;
414 }
415
416 return ret_val;
417 }
418
419 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
420 u32 *msg, u16 size)
421 {
422 struct ixgbe_mbx_info *mbx = &hw->mbx;
423 u32 retmsg[IXGBE_VFMAILBOX_SIZE];
424 s32 retval = mbx->ops.write_posted(hw, msg, size);
425
426 if (!retval)
427 mbx->ops.read_posted(hw, retmsg, size);
428 }
429
430 /**
431 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
432 * @hw: pointer to the HW structure
433 * @netdev: pointer to net device structure
434 *
435 * Updates the Multicast Table Array.
436 **/
437 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
438 struct net_device *netdev)
439 {
440 struct netdev_hw_addr *ha;
441 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
442 u16 *vector_list = (u16 *)&msgbuf[1];
443 u32 cnt, i;
444
445 /* Each entry in the list uses 1 16 bit word. We have 30
446 * 16 bit words available in our HW msg buffer (minus 1 for the
447 * msg type). That's 30 hash values if we pack 'em right. If
448 * there are more than 30 MC addresses to add then punt the
449 * extras for now and then add code to handle more than 30 later.
450 * It would be unusual for a server to request that many multi-cast
451 * addresses except for in large enterprise network environments.
452 */
453
454 cnt = netdev_mc_count(netdev);
455 if (cnt > 30)
456 cnt = 30;
457 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
458 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
459
460 i = 0;
461 netdev_for_each_mc_addr(ha, netdev) {
462 if (i == cnt)
463 break;
464 if (is_link_local_ether_addr(ha->addr))
465 continue;
466
467 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
468 }
469
470 ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
471
472 return 0;
473 }
474
475 /**
476 * ixgbevf_update_xcast_mode - Update Multicast mode
477 * @hw: pointer to the HW structure
478 * @netdev: pointer to net device structure
479 * @xcast_mode: new multicast mode
480 *
481 * Updates the Multicast Mode of VF.
482 **/
483 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
484 struct net_device *netdev, int xcast_mode)
485 {
486 struct ixgbe_mbx_info *mbx = &hw->mbx;
487 u32 msgbuf[2];
488 s32 err;
489
490 switch (hw->api_version) {
491 case ixgbe_mbox_api_12:
492 break;
493 default:
494 return -EOPNOTSUPP;
495 }
496
497 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
498 msgbuf[1] = xcast_mode;
499
500 err = mbx->ops.write_posted(hw, msgbuf, 2);
501 if (err)
502 return err;
503
504 err = mbx->ops.read_posted(hw, msgbuf, 2);
505 if (err)
506 return err;
507
508 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
509 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
510 return -EPERM;
511
512 return 0;
513 }
514
515 /**
516 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
517 * @hw: pointer to the HW structure
518 * @vlan: 12 bit VLAN ID
519 * @vind: unused by VF drivers
520 * @vlan_on: if true then set bit, else clear bit
521 **/
522 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
523 bool vlan_on)
524 {
525 struct ixgbe_mbx_info *mbx = &hw->mbx;
526 u32 msgbuf[2];
527 s32 err;
528
529 msgbuf[0] = IXGBE_VF_SET_VLAN;
530 msgbuf[1] = vlan;
531 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
532 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
533
534 err = mbx->ops.write_posted(hw, msgbuf, 2);
535 if (err)
536 goto mbx_err;
537
538 err = mbx->ops.read_posted(hw, msgbuf, 2);
539 if (err)
540 goto mbx_err;
541
542 /* remove extra bits from the message */
543 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
544 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
545
546 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
547 err = IXGBE_ERR_INVALID_ARGUMENT;
548
549 mbx_err:
550 return err;
551 }
552
553 /**
554 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
555 * @hw: pointer to hardware structure
556 * @speed: Unused in this implementation
557 * @autoneg: Unused in this implementation
558 * @autoneg_wait_to_complete: Unused in this implementation
559 *
560 * Do nothing and return success. VF drivers are not allowed to change
561 * global settings. Maintained for driver compatibility.
562 **/
563 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
564 ixgbe_link_speed speed, bool autoneg,
565 bool autoneg_wait_to_complete)
566 {
567 return 0;
568 }
569
570 /**
571 * ixgbevf_check_mac_link_vf - Get link/speed status
572 * @hw: pointer to hardware structure
573 * @speed: pointer to link speed
574 * @link_up: true is link is up, false otherwise
575 * @autoneg_wait_to_complete: true when waiting for completion is needed
576 *
577 * Reads the links register to determine if link is up and the current speed
578 **/
579 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
580 ixgbe_link_speed *speed,
581 bool *link_up,
582 bool autoneg_wait_to_complete)
583 {
584 struct ixgbe_mbx_info *mbx = &hw->mbx;
585 struct ixgbe_mac_info *mac = &hw->mac;
586 s32 ret_val = 0;
587 u32 links_reg;
588 u32 in_msg = 0;
589
590 /* If we were hit with a reset drop the link */
591 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
592 mac->get_link_status = true;
593
594 if (!mac->get_link_status)
595 goto out;
596
597 /* if link status is down no point in checking to see if pf is up */
598 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
599 if (!(links_reg & IXGBE_LINKS_UP))
600 goto out;
601
602 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
603 * before the link status is correct
604 */
605 if (mac->type == ixgbe_mac_82599_vf) {
606 int i;
607
608 for (i = 0; i < 5; i++) {
609 udelay(100);
610 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
611
612 if (!(links_reg & IXGBE_LINKS_UP))
613 goto out;
614 }
615 }
616
617 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
618 case IXGBE_LINKS_SPEED_10G_82599:
619 *speed = IXGBE_LINK_SPEED_10GB_FULL;
620 break;
621 case IXGBE_LINKS_SPEED_1G_82599:
622 *speed = IXGBE_LINK_SPEED_1GB_FULL;
623 break;
624 case IXGBE_LINKS_SPEED_100_82599:
625 *speed = IXGBE_LINK_SPEED_100_FULL;
626 break;
627 }
628
629 /* if the read failed it could just be a mailbox collision, best wait
630 * until we are called again and don't report an error
631 */
632 if (mbx->ops.read(hw, &in_msg, 1))
633 goto out;
634
635 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
636 /* msg is not CTS and is NACK we must have lost CTS status */
637 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
638 ret_val = -1;
639 goto out;
640 }
641
642 /* the pf is talking, if we timed out in the past we reinit */
643 if (!mbx->timeout) {
644 ret_val = -1;
645 goto out;
646 }
647
648 /* if we passed all the tests above then the link is up and we no
649 * longer need to check for link
650 */
651 mac->get_link_status = false;
652
653 out:
654 *link_up = !mac->get_link_status;
655 return ret_val;
656 }
657
658 /**
659 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
660 * @hw: pointer to the HW structure
661 * @max_size: value to assign to max frame size
662 **/
663 void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
664 {
665 u32 msgbuf[2];
666
667 msgbuf[0] = IXGBE_VF_SET_LPE;
668 msgbuf[1] = max_size;
669 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
670 }
671
672 /**
673 * ixgbevf_negotiate_api_version - Negotiate supported API version
674 * @hw: pointer to the HW structure
675 * @api: integer containing requested API version
676 **/
677 int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
678 {
679 int err;
680 u32 msg[3];
681
682 /* Negotiate the mailbox API version */
683 msg[0] = IXGBE_VF_API_NEGOTIATE;
684 msg[1] = api;
685 msg[2] = 0;
686 err = hw->mbx.ops.write_posted(hw, msg, 3);
687
688 if (!err)
689 err = hw->mbx.ops.read_posted(hw, msg, 3);
690
691 if (!err) {
692 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
693
694 /* Store value and return 0 on success */
695 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
696 hw->api_version = api;
697 return 0;
698 }
699
700 err = IXGBE_ERR_INVALID_ARGUMENT;
701 }
702
703 return err;
704 }
705
706 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
707 unsigned int *default_tc)
708 {
709 int err;
710 u32 msg[5];
711
712 /* do nothing if API doesn't support ixgbevf_get_queues */
713 switch (hw->api_version) {
714 case ixgbe_mbox_api_11:
715 case ixgbe_mbox_api_12:
716 break;
717 default:
718 return 0;
719 }
720
721 /* Fetch queue configuration from the PF */
722 msg[0] = IXGBE_VF_GET_QUEUE;
723 msg[1] = msg[2] = msg[3] = msg[4] = 0;
724 err = hw->mbx.ops.write_posted(hw, msg, 5);
725
726 if (!err)
727 err = hw->mbx.ops.read_posted(hw, msg, 5);
728
729 if (!err) {
730 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
731
732 /* if we we didn't get an ACK there must have been
733 * some sort of mailbox error so we should treat it
734 * as such
735 */
736 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
737 return IXGBE_ERR_MBX;
738
739 /* record and validate values from message */
740 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
741 if (hw->mac.max_tx_queues == 0 ||
742 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
743 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
744
745 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
746 if (hw->mac.max_rx_queues == 0 ||
747 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
748 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
749
750 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
751 /* in case of unknown state assume we cannot tag frames */
752 if (*num_tcs > hw->mac.max_rx_queues)
753 *num_tcs = 1;
754
755 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
756 /* default to queue 0 on out-of-bounds queue number */
757 if (*default_tc >= hw->mac.max_tx_queues)
758 *default_tc = 0;
759 }
760
761 return err;
762 }
763
764 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
765 .init_hw = ixgbevf_init_hw_vf,
766 .reset_hw = ixgbevf_reset_hw_vf,
767 .start_hw = ixgbevf_start_hw_vf,
768 .get_mac_addr = ixgbevf_get_mac_addr_vf,
769 .stop_adapter = ixgbevf_stop_hw_vf,
770 .setup_link = ixgbevf_setup_mac_link_vf,
771 .check_link = ixgbevf_check_mac_link_vf,
772 .set_rar = ixgbevf_set_rar_vf,
773 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
774 .update_xcast_mode = ixgbevf_update_xcast_mode,
775 .set_uc_addr = ixgbevf_set_uc_addr_vf,
776 .set_vfta = ixgbevf_set_vfta_vf,
777 };
778
779 const struct ixgbevf_info ixgbevf_82599_vf_info = {
780 .mac = ixgbe_mac_82599_vf,
781 .mac_ops = &ixgbevf_mac_ops,
782 };
783
784 const struct ixgbevf_info ixgbevf_X540_vf_info = {
785 .mac = ixgbe_mac_X540_vf,
786 .mac_ops = &ixgbevf_mac_ops,
787 };
788
789 const struct ixgbevf_info ixgbevf_X550_vf_info = {
790 .mac = ixgbe_mac_X550_vf,
791 .mac_ops = &ixgbevf_mac_ops,
792 };
793
794 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
795 .mac = ixgbe_mac_X550EM_x_vf,
796 .mac_ops = &ixgbevf_mac_ops,
797 };
This page took 0.047102 seconds and 6 git commands to generate.