1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
81 static const char ibmvnic_driver_name
[] = "ibmvnic";
82 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
89 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
90 static int ibmvnic_remove(struct vio_dev
*);
91 static void release_sub_crqs(struct ibmvnic_adapter
*);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
96 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
97 union sub_crq
*sub_crq
);
98 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
99 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
100 static int enable_scrq_irq(struct ibmvnic_adapter
*,
101 struct ibmvnic_sub_crq_queue
*);
102 static int disable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int pending_scrq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
109 static void send_map_query(struct ibmvnic_adapter
*adapter
);
110 static void send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
111 static void send_request_unmap(struct ibmvnic_adapter
*, u8
);
113 struct ibmvnic_stat
{
114 char name
[ETH_GSTRING_LEN
];
118 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122 static const struct ibmvnic_stat ibmvnic_stats
[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
147 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
148 unsigned long length
, unsigned long *number
,
151 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
154 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
161 /* net_device_ops functions */
163 static void init_rx_pool(struct ibmvnic_adapter
*adapter
,
164 struct ibmvnic_rx_pool
*rx_pool
, int num
, int index
,
165 int buff_size
, int active
)
167 netdev_dbg(adapter
->netdev
,
168 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
169 index
, num
, buff_size
);
171 rx_pool
->index
= index
;
172 rx_pool
->buff_size
= buff_size
;
173 rx_pool
->active
= active
;
176 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
177 struct ibmvnic_long_term_buff
*ltb
, int size
)
179 struct device
*dev
= &adapter
->vdev
->dev
;
182 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
186 dev_err(dev
, "Couldn't alloc long term buffer\n");
189 ltb
->map_id
= adapter
->map_id
;
191 send_request_map(adapter
, ltb
->addr
,
192 ltb
->size
, ltb
->map_id
);
193 init_completion(&adapter
->fw_done
);
194 wait_for_completion(&adapter
->fw_done
);
198 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
199 struct ibmvnic_long_term_buff
*ltb
)
201 struct device
*dev
= &adapter
->vdev
->dev
;
203 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
204 send_request_unmap(adapter
, ltb
->map_id
);
207 static int alloc_rx_pool(struct ibmvnic_adapter
*adapter
,
208 struct ibmvnic_rx_pool
*pool
)
210 struct device
*dev
= &adapter
->vdev
->dev
;
213 pool
->free_map
= kcalloc(pool
->size
, sizeof(int), GFP_KERNEL
);
217 pool
->rx_buff
= kcalloc(pool
->size
, sizeof(struct ibmvnic_rx_buff
),
220 if (!pool
->rx_buff
) {
221 dev_err(dev
, "Couldn't alloc rx buffers\n");
222 kfree(pool
->free_map
);
226 if (alloc_long_term_buff(adapter
, &pool
->long_term_buff
,
227 pool
->size
* pool
->buff_size
)) {
228 kfree(pool
->free_map
);
229 kfree(pool
->rx_buff
);
233 for (i
= 0; i
< pool
->size
; ++i
)
234 pool
->free_map
[i
] = i
;
236 atomic_set(&pool
->available
, 0);
237 pool
->next_alloc
= 0;
243 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
244 struct ibmvnic_rx_pool
*pool
)
246 int count
= pool
->size
- atomic_read(&pool
->available
);
247 struct device
*dev
= &adapter
->vdev
->dev
;
248 int buffers_added
= 0;
249 unsigned long lpar_rc
;
250 union sub_crq sub_crq
;
260 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
261 be32_to_cpu(adapter
->login_rsp_buf
->
264 for (i
= 0; i
< count
; ++i
) {
265 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
267 dev_err(dev
, "Couldn't replenish rx buff\n");
268 adapter
->replenish_no_mem
++;
272 index
= pool
->free_map
[pool
->next_free
];
274 if (pool
->rx_buff
[index
].skb
)
275 dev_err(dev
, "Inconsistent free_map!\n");
277 /* Copy the skb to the long term mapped DMA buffer */
278 offset
= index
* pool
->buff_size
;
279 dst
= pool
->long_term_buff
.buff
+ offset
;
280 memset(dst
, 0, pool
->buff_size
);
281 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
282 pool
->rx_buff
[index
].data
= dst
;
284 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
285 pool
->rx_buff
[index
].dma
= dma_addr
;
286 pool
->rx_buff
[index
].skb
= skb
;
287 pool
->rx_buff
[index
].pool_index
= pool
->index
;
288 pool
->rx_buff
[index
].size
= pool
->buff_size
;
290 memset(&sub_crq
, 0, sizeof(sub_crq
));
291 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
292 sub_crq
.rx_add
.correlator
=
293 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
294 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
295 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
297 /* The length field of the sCRQ is defined to be 24 bits so the
298 * buffer size needs to be left shifted by a byte before it is
299 * converted to big endian to prevent the last byte from being
302 #ifdef __LITTLE_ENDIAN__
305 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
307 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
309 if (lpar_rc
!= H_SUCCESS
)
313 adapter
->replenish_add_buff_success
++;
314 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
316 atomic_add(buffers_added
, &pool
->available
);
320 dev_info(dev
, "replenish pools failure\n");
321 pool
->free_map
[pool
->next_free
] = index
;
322 pool
->rx_buff
[index
].skb
= NULL
;
323 if (!dma_mapping_error(dev
, dma_addr
))
324 dma_unmap_single(dev
, dma_addr
, pool
->buff_size
,
327 dev_kfree_skb_any(skb
);
328 adapter
->replenish_add_buff_failure
++;
329 atomic_add(buffers_added
, &pool
->available
);
332 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
336 if (adapter
->migrated
)
339 adapter
->replenish_task_cycles
++;
340 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
342 if (adapter
->rx_pool
[i
].active
)
343 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
347 static void free_rx_pool(struct ibmvnic_adapter
*adapter
,
348 struct ibmvnic_rx_pool
*pool
)
352 kfree(pool
->free_map
);
353 pool
->free_map
= NULL
;
358 for (i
= 0; i
< pool
->size
; i
++) {
359 if (pool
->rx_buff
[i
].skb
) {
360 dev_kfree_skb_any(pool
->rx_buff
[i
].skb
);
361 pool
->rx_buff
[i
].skb
= NULL
;
364 kfree(pool
->rx_buff
);
365 pool
->rx_buff
= NULL
;
368 static int ibmvnic_open(struct net_device
*netdev
)
370 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
371 struct device
*dev
= &adapter
->vdev
->dev
;
372 struct ibmvnic_tx_pool
*tx_pool
;
373 union ibmvnic_crq crq
;
380 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
382 be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
383 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
384 be32_to_cpu(adapter
->login_rsp_buf
->
385 off_rxadd_buff_size
));
387 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
388 sizeof(struct napi_struct
), GFP_KERNEL
);
390 goto alloc_napi_failed
;
391 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
392 netif_napi_add(netdev
, &adapter
->napi
[i
], ibmvnic_poll
,
394 napi_enable(&adapter
->napi
[i
]);
397 kcalloc(rxadd_subcrqs
, sizeof(struct ibmvnic_rx_pool
), GFP_KERNEL
);
399 if (!adapter
->rx_pool
)
400 goto rx_pool_arr_alloc_failed
;
401 send_map_query(adapter
);
402 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
403 init_rx_pool(adapter
, &adapter
->rx_pool
[i
],
404 IBMVNIC_BUFFS_PER_POOL
, i
,
405 be64_to_cpu(size_array
[i
]), 1);
406 if (alloc_rx_pool(adapter
, &adapter
->rx_pool
[i
])) {
407 dev_err(dev
, "Couldn't alloc rx pool\n");
408 goto rx_pool_alloc_failed
;
412 kcalloc(tx_subcrqs
, sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
414 if (!adapter
->tx_pool
)
415 goto tx_pool_arr_alloc_failed
;
416 for (i
= 0; i
< tx_subcrqs
; i
++) {
417 tx_pool
= &adapter
->tx_pool
[i
];
419 kcalloc(adapter
->max_tx_entries_per_subcrq
,
420 sizeof(struct ibmvnic_tx_buff
), GFP_KERNEL
);
421 if (!tx_pool
->tx_buff
)
422 goto tx_pool_alloc_failed
;
424 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
425 adapter
->max_tx_entries_per_subcrq
*
427 goto tx_ltb_alloc_failed
;
430 kcalloc(adapter
->max_tx_entries_per_subcrq
,
431 sizeof(int), GFP_KERNEL
);
432 if (!tx_pool
->free_map
)
433 goto tx_fm_alloc_failed
;
435 for (j
= 0; j
< adapter
->max_tx_entries_per_subcrq
; j
++)
436 tx_pool
->free_map
[j
] = j
;
438 tx_pool
->consumer_index
= 0;
439 tx_pool
->producer_index
= 0;
441 adapter
->bounce_buffer_size
=
442 (netdev
->mtu
+ ETH_HLEN
- 1) / PAGE_SIZE
+ 1;
443 adapter
->bounce_buffer
= kmalloc(adapter
->bounce_buffer_size
,
445 if (!adapter
->bounce_buffer
)
446 goto bounce_alloc_failed
;
448 adapter
->bounce_buffer_dma
= dma_map_single(dev
, adapter
->bounce_buffer
,
449 adapter
->bounce_buffer_size
,
451 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
452 dev_err(dev
, "Couldn't map tx bounce buffer\n");
453 goto bounce_map_failed
;
455 replenish_pools(adapter
);
457 /* We're ready to receive frames, enable the sub-crq interrupts and
458 * set the logical link state to up
460 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
461 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
463 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
464 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
466 memset(&crq
, 0, sizeof(crq
));
467 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
468 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
469 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_UP
;
470 ibmvnic_send_crq(adapter
, &crq
);
472 netif_start_queue(netdev
);
476 kfree(adapter
->bounce_buffer
);
479 kfree(adapter
->tx_pool
[i
].free_map
);
481 free_long_term_buff(adapter
, &adapter
->tx_pool
[i
].long_term_buff
);
483 kfree(adapter
->tx_pool
[i
].tx_buff
);
484 tx_pool_alloc_failed
:
485 for (j
= 0; j
< i
; j
++) {
486 kfree(adapter
->tx_pool
[j
].tx_buff
);
487 free_long_term_buff(adapter
,
488 &adapter
->tx_pool
[j
].long_term_buff
);
489 kfree(adapter
->tx_pool
[j
].free_map
);
491 kfree(adapter
->tx_pool
);
492 adapter
->tx_pool
= NULL
;
493 tx_pool_arr_alloc_failed
:
495 rx_pool_alloc_failed
:
496 for (j
= 0; j
< i
; j
++) {
497 free_rx_pool(adapter
, &adapter
->rx_pool
[j
]);
498 free_long_term_buff(adapter
,
499 &adapter
->rx_pool
[j
].long_term_buff
);
501 kfree(adapter
->rx_pool
);
502 adapter
->rx_pool
= NULL
;
503 rx_pool_arr_alloc_failed
:
504 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
505 napi_enable(&adapter
->napi
[i
]);
510 static int ibmvnic_close(struct net_device
*netdev
)
512 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
513 struct device
*dev
= &adapter
->vdev
->dev
;
514 union ibmvnic_crq crq
;
517 adapter
->closing
= true;
519 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
520 napi_disable(&adapter
->napi
[i
]);
522 netif_stop_queue(netdev
);
524 if (adapter
->bounce_buffer
) {
525 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
526 dma_unmap_single(&adapter
->vdev
->dev
,
527 adapter
->bounce_buffer_dma
,
528 adapter
->bounce_buffer_size
,
530 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
532 kfree(adapter
->bounce_buffer
);
533 adapter
->bounce_buffer
= NULL
;
536 memset(&crq
, 0, sizeof(crq
));
537 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
538 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
539 crq
.logical_link_state
.link_state
= IBMVNIC_LOGICAL_LNK_DN
;
540 ibmvnic_send_crq(adapter
, &crq
);
542 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
544 kfree(adapter
->tx_pool
[i
].tx_buff
);
545 free_long_term_buff(adapter
,
546 &adapter
->tx_pool
[i
].long_term_buff
);
547 kfree(adapter
->tx_pool
[i
].free_map
);
549 kfree(adapter
->tx_pool
);
550 adapter
->tx_pool
= NULL
;
552 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
554 free_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
555 free_long_term_buff(adapter
,
556 &adapter
->rx_pool
[i
].long_term_buff
);
558 kfree(adapter
->rx_pool
);
559 adapter
->rx_pool
= NULL
;
561 adapter
->closing
= false;
567 * build_hdr_data - creates L2/L3/L4 header data buffer
568 * @hdr_field - bitfield determining needed headers
569 * @skb - socket buffer
570 * @hdr_len - array of header lengths
571 * @tot_len - total length of data
573 * Reads hdr_field to determine which headers are needed by firmware.
574 * Builds a buffer containing these headers. Saves individual header
575 * lengths and total buffer length to be used to build descriptors.
577 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
578 int *hdr_len
, u8
*hdr_data
)
583 hdr_len
[0] = sizeof(struct ethhdr
);
585 if (skb
->protocol
== htons(ETH_P_IP
)) {
586 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
587 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
588 hdr_len
[2] = tcp_hdrlen(skb
);
589 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
590 hdr_len
[2] = sizeof(struct udphdr
);
591 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
592 hdr_len
[1] = sizeof(struct ipv6hdr
);
593 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
594 hdr_len
[2] = tcp_hdrlen(skb
);
595 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
596 hdr_len
[2] = sizeof(struct udphdr
);
599 memset(hdr_data
, 0, 120);
600 if ((hdr_field
>> 6) & 1) {
601 hdr
= skb_mac_header(skb
);
602 memcpy(hdr_data
, hdr
, hdr_len
[0]);
606 if ((hdr_field
>> 5) & 1) {
607 hdr
= skb_network_header(skb
);
608 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
612 if ((hdr_field
>> 4) & 1) {
613 hdr
= skb_transport_header(skb
);
614 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
621 * create_hdr_descs - create header and header extension descriptors
622 * @hdr_field - bitfield determining needed headers
623 * @data - buffer containing header data
624 * @len - length of data buffer
625 * @hdr_len - array of individual header lengths
626 * @scrq_arr - descriptor array
628 * Creates header and, if needed, header extension descriptors and
629 * places them in a descriptor array, scrq_arr
632 static void create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
633 union sub_crq
*scrq_arr
)
635 union sub_crq hdr_desc
;
640 while (tmp_len
> 0) {
641 cur
= hdr_data
+ len
- tmp_len
;
643 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
644 if (cur
!= hdr_data
) {
645 data
= hdr_desc
.hdr_ext
.data
;
646 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
647 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
648 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
649 hdr_desc
.hdr_ext
.len
= tmp
;
651 data
= hdr_desc
.hdr
.data
;
652 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
653 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
654 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
655 hdr_desc
.hdr
.len
= tmp
;
656 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
657 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
658 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
659 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
661 memcpy(data
, cur
, tmp
);
663 *scrq_arr
= hdr_desc
;
669 * build_hdr_descs_arr - build a header descriptor array
670 * @skb - socket buffer
671 * @num_entries - number of descriptors to be sent
672 * @subcrq - first TX descriptor
673 * @hdr_field - bit field determining which headers will be sent
675 * This function will build a TX descriptor array with applicable
676 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
679 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
680 int *num_entries
, u8 hdr_field
)
682 int hdr_len
[3] = {0, 0, 0};
684 u8
*hdr_data
= txbuff
->hdr_data
;
686 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
691 num_entries
+= len
% 29 ? len
/ 29 + 1 : len
/ 29;
692 create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
693 txbuff
->indir_arr
+ 1);
696 static int ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
698 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
699 int queue_num
= skb_get_queue_mapping(skb
);
700 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
701 struct device
*dev
= &adapter
->vdev
->dev
;
702 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
703 struct ibmvnic_tx_pool
*tx_pool
;
704 unsigned int tx_send_failed
= 0;
705 unsigned int tx_map_failed
= 0;
706 unsigned int tx_dropped
= 0;
707 unsigned int tx_packets
= 0;
708 unsigned int tx_bytes
= 0;
709 dma_addr_t data_dma_addr
;
710 struct netdev_queue
*txq
;
711 bool used_bounce
= false;
712 unsigned long lpar_rc
;
713 union sub_crq tx_crq
;
721 tx_pool
= &adapter
->tx_pool
[queue_num
];
722 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
723 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
724 be32_to_cpu(adapter
->login_rsp_buf
->
725 off_txsubm_subcrqs
));
726 if (adapter
->migrated
) {
729 ret
= NETDEV_TX_BUSY
;
733 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
734 offset
= index
* adapter
->req_mtu
;
735 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
736 memset(dst
, 0, adapter
->req_mtu
);
737 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
738 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
740 tx_pool
->consumer_index
=
741 (tx_pool
->consumer_index
+ 1) %
742 adapter
->max_tx_entries_per_subcrq
;
744 tx_buff
= &tx_pool
->tx_buff
[index
];
746 tx_buff
->data_dma
[0] = data_dma_addr
;
747 tx_buff
->data_len
[0] = skb
->len
;
748 tx_buff
->index
= index
;
749 tx_buff
->pool_index
= queue_num
;
750 tx_buff
->last_frag
= true;
751 tx_buff
->used_bounce
= used_bounce
;
753 memset(&tx_crq
, 0, sizeof(tx_crq
));
754 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
755 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
756 tx_crq
.v1
.n_crq_elem
= 1;
758 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
759 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
760 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
761 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
762 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
764 if (adapter
->vlan_header_insertion
) {
765 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
766 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
769 if (skb
->protocol
== htons(ETH_P_IP
)) {
770 if (ip_hdr(skb
)->version
== 4)
771 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
772 else if (ip_hdr(skb
)->version
== 6)
773 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
775 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
776 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
777 else if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
778 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
781 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
782 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
785 /* determine if l2/3/4 headers are sent to firmware */
786 if ((*hdrs
>> 7) & 1 &&
787 (skb
->protocol
== htons(ETH_P_IP
) ||
788 skb
->protocol
== htons(ETH_P_IPV6
))) {
789 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
790 tx_crq
.v1
.n_crq_elem
= num_entries
;
791 tx_buff
->indir_arr
[0] = tx_crq
;
792 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
793 sizeof(tx_buff
->indir_arr
),
795 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
796 if (!firmware_has_feature(FW_FEATURE_CMO
))
797 dev_err(dev
, "tx: unable to map descriptor array\n");
800 ret
= NETDEV_TX_BUSY
;
803 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
804 (u64
)tx_buff
->indir_dma
,
807 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
810 if (lpar_rc
!= H_SUCCESS
) {
811 dev_err(dev
, "tx failed with code %ld\n", lpar_rc
);
813 if (tx_pool
->consumer_index
== 0)
814 tx_pool
->consumer_index
=
815 adapter
->max_tx_entries_per_subcrq
- 1;
817 tx_pool
->consumer_index
--;
821 ret
= NETDEV_TX_BUSY
;
825 tx_bytes
+= skb
->len
;
826 txq
->trans_start
= jiffies
;
830 netdev
->stats
.tx_dropped
+= tx_dropped
;
831 netdev
->stats
.tx_bytes
+= tx_bytes
;
832 netdev
->stats
.tx_packets
+= tx_packets
;
833 adapter
->tx_send_failed
+= tx_send_failed
;
834 adapter
->tx_map_failed
+= tx_map_failed
;
839 static void ibmvnic_set_multi(struct net_device
*netdev
)
841 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
842 struct netdev_hw_addr
*ha
;
843 union ibmvnic_crq crq
;
845 memset(&crq
, 0, sizeof(crq
));
846 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
847 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
849 if (netdev
->flags
& IFF_PROMISC
) {
850 if (!adapter
->promisc_supported
)
853 if (netdev
->flags
& IFF_ALLMULTI
) {
854 /* Accept all multicast */
855 memset(&crq
, 0, sizeof(crq
));
856 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
857 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
858 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
859 ibmvnic_send_crq(adapter
, &crq
);
860 } else if (netdev_mc_empty(netdev
)) {
861 /* Reject all multicast */
862 memset(&crq
, 0, sizeof(crq
));
863 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
864 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
865 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
866 ibmvnic_send_crq(adapter
, &crq
);
868 /* Accept one or more multicast(s) */
869 netdev_for_each_mc_addr(ha
, netdev
) {
870 memset(&crq
, 0, sizeof(crq
));
871 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
872 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
873 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
874 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
876 ibmvnic_send_crq(adapter
, &crq
);
882 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
884 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
885 struct sockaddr
*addr
= p
;
886 union ibmvnic_crq crq
;
888 if (!is_valid_ether_addr(addr
->sa_data
))
889 return -EADDRNOTAVAIL
;
891 memset(&crq
, 0, sizeof(crq
));
892 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
893 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
894 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
895 ibmvnic_send_crq(adapter
, &crq
);
896 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
900 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
902 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
904 if (new_mtu
> adapter
->req_mtu
|| new_mtu
< adapter
->min_mtu
)
907 netdev
->mtu
= new_mtu
;
911 static void ibmvnic_tx_timeout(struct net_device
*dev
)
913 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
916 /* Adapter timed out, resetting it */
917 release_sub_crqs(adapter
);
918 rc
= ibmvnic_reset_crq(adapter
);
920 dev_err(&adapter
->vdev
->dev
, "Adapter timeout, reset failed\n");
922 ibmvnic_send_crq_init(adapter
);
925 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
926 struct ibmvnic_rx_buff
*rx_buff
)
928 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
932 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
933 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
935 atomic_dec(&pool
->available
);
938 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
940 struct net_device
*netdev
= napi
->dev
;
941 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
942 int scrq_num
= (int)(napi
- adapter
->napi
);
943 int frames_processed
= 0;
945 while (frames_processed
< budget
) {
947 struct ibmvnic_rx_buff
*rx_buff
;
953 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
955 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
957 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
959 /* do error checking */
960 if (next
->rx_comp
.rc
) {
961 netdev_err(netdev
, "rx error %x\n", next
->rx_comp
.rc
);
963 next
->rx_comp
.first
= 0;
964 remove_buff_from_pool(adapter
, rx_buff
);
968 length
= be32_to_cpu(next
->rx_comp
.len
);
969 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
970 flags
= next
->rx_comp
.flags
;
972 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
974 skb
->vlan_tci
= be16_to_cpu(next
->rx_comp
.vlan_tci
);
976 next
->rx_comp
.first
= 0;
977 remove_buff_from_pool(adapter
, rx_buff
);
979 skb_put(skb
, length
);
980 skb
->protocol
= eth_type_trans(skb
, netdev
);
982 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
983 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
984 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
988 napi_gro_receive(napi
, skb
); /* send it up */
989 netdev
->stats
.rx_packets
++;
990 netdev
->stats
.rx_bytes
+= length
;
993 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
995 if (frames_processed
< budget
) {
996 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
998 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
999 napi_reschedule(napi
)) {
1000 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
1004 return frames_processed
;
1007 #ifdef CONFIG_NET_POLL_CONTROLLER
1008 static void ibmvnic_netpoll_controller(struct net_device
*dev
)
1010 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1013 replenish_pools(netdev_priv(dev
));
1014 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1015 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
1016 adapter
->rx_scrq
[i
]);
1020 static const struct net_device_ops ibmvnic_netdev_ops
= {
1021 .ndo_open
= ibmvnic_open
,
1022 .ndo_stop
= ibmvnic_close
,
1023 .ndo_start_xmit
= ibmvnic_xmit
,
1024 .ndo_set_rx_mode
= ibmvnic_set_multi
,
1025 .ndo_set_mac_address
= ibmvnic_set_mac
,
1026 .ndo_validate_addr
= eth_validate_addr
,
1027 .ndo_change_mtu
= ibmvnic_change_mtu
,
1028 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
1029 #ifdef CONFIG_NET_POLL_CONTROLLER
1030 .ndo_poll_controller
= ibmvnic_netpoll_controller
,
1034 /* ethtool functions */
1036 static int ibmvnic_get_settings(struct net_device
*netdev
,
1037 struct ethtool_cmd
*cmd
)
1039 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
1041 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
1043 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
1044 cmd
->duplex
= DUPLEX_FULL
;
1045 cmd
->port
= PORT_FIBRE
;
1046 cmd
->phy_address
= 0;
1047 cmd
->transceiver
= XCVR_INTERNAL
;
1048 cmd
->autoneg
= AUTONEG_ENABLE
;
1054 static void ibmvnic_get_drvinfo(struct net_device
*dev
,
1055 struct ethtool_drvinfo
*info
)
1057 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
1058 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
1061 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
1063 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1065 return adapter
->msg_enable
;
1068 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
1070 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1072 adapter
->msg_enable
= data
;
1075 static u32
ibmvnic_get_link(struct net_device
*netdev
)
1077 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1079 /* Don't need to send a query because we request a logical link up at
1080 * init and then we wait for link state indications
1082 return adapter
->logical_link_state
;
1085 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
1086 struct ethtool_ringparam
*ring
)
1088 ring
->rx_max_pending
= 0;
1089 ring
->tx_max_pending
= 0;
1090 ring
->rx_mini_max_pending
= 0;
1091 ring
->rx_jumbo_max_pending
= 0;
1092 ring
->rx_pending
= 0;
1093 ring
->tx_pending
= 0;
1094 ring
->rx_mini_pending
= 0;
1095 ring
->rx_jumbo_pending
= 0;
1098 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1102 if (stringset
!= ETH_SS_STATS
)
1105 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++, data
+= ETH_GSTRING_LEN
)
1106 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
1109 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
1113 return ARRAY_SIZE(ibmvnic_stats
);
1119 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
1120 struct ethtool_stats
*stats
, u64
*data
)
1122 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
1123 union ibmvnic_crq crq
;
1126 memset(&crq
, 0, sizeof(crq
));
1127 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
1128 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
1129 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
1130 crq
.request_statistics
.len
=
1131 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
1132 ibmvnic_send_crq(adapter
, &crq
);
1134 /* Wait for data to be written */
1135 init_completion(&adapter
->stats_done
);
1136 wait_for_completion(&adapter
->stats_done
);
1138 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
1139 data
[i
] = IBMVNIC_GET_STAT(adapter
, ibmvnic_stats
[i
].offset
);
1142 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
1143 .get_settings
= ibmvnic_get_settings
,
1144 .get_drvinfo
= ibmvnic_get_drvinfo
,
1145 .get_msglevel
= ibmvnic_get_msglevel
,
1146 .set_msglevel
= ibmvnic_set_msglevel
,
1147 .get_link
= ibmvnic_get_link
,
1148 .get_ringparam
= ibmvnic_get_ringparam
,
1149 .get_strings
= ibmvnic_get_strings
,
1150 .get_sset_count
= ibmvnic_get_sset_count
,
1151 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
1154 /* Routines for managing CRQs/sCRQs */
1156 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
1157 struct ibmvnic_sub_crq_queue
*scrq
)
1159 struct device
*dev
= &adapter
->vdev
->dev
;
1162 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
1164 /* Close the sub-crqs */
1166 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1167 adapter
->vdev
->unit_address
,
1169 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1171 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1173 free_pages((unsigned long)scrq
->msgs
, 2);
1177 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
1180 struct device
*dev
= &adapter
->vdev
->dev
;
1181 struct ibmvnic_sub_crq_queue
*scrq
;
1184 scrq
= kmalloc(sizeof(*scrq
), GFP_ATOMIC
);
1188 scrq
->msgs
= (union sub_crq
*)__get_free_pages(GFP_KERNEL
, 2);
1189 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
1191 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
1192 goto zero_page_failed
;
1195 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
1197 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
1198 dev_warn(dev
, "Couldn't map crq queue messages page\n");
1202 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
1203 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
1205 if (rc
== H_RESOURCE
)
1206 rc
= ibmvnic_reset_crq(adapter
);
1208 if (rc
== H_CLOSED
) {
1209 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
1211 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
1215 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
1216 if (scrq
->irq
== NO_IRQ
) {
1217 dev_err(dev
, "Error mapping irq\n");
1218 goto map_irq_failed
;
1221 scrq
->adapter
= adapter
;
1222 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
1224 scrq
->rx_skb_top
= NULL
;
1225 spin_lock_init(&scrq
->lock
);
1227 netdev_dbg(adapter
->netdev
,
1228 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1229 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
1235 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
1236 adapter
->vdev
->unit_address
,
1238 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
1240 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
1243 free_pages((unsigned long)scrq
->msgs
, 2);
1250 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
)
1254 if (adapter
->tx_scrq
) {
1255 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1256 if (adapter
->tx_scrq
[i
]) {
1257 free_irq(adapter
->tx_scrq
[i
]->irq
,
1258 adapter
->tx_scrq
[i
]);
1259 release_sub_crq_queue(adapter
,
1260 adapter
->tx_scrq
[i
]);
1262 adapter
->tx_scrq
= NULL
;
1265 if (adapter
->rx_scrq
) {
1266 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1267 if (adapter
->rx_scrq
[i
]) {
1268 free_irq(adapter
->rx_scrq
[i
]->irq
,
1269 adapter
->rx_scrq
[i
]);
1270 release_sub_crq_queue(adapter
,
1271 adapter
->rx_scrq
[i
]);
1273 adapter
->rx_scrq
= NULL
;
1276 adapter
->requested_caps
= 0;
1279 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1280 struct ibmvnic_sub_crq_queue
*scrq
)
1282 struct device
*dev
= &adapter
->vdev
->dev
;
1285 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1286 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1288 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1293 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
1294 struct ibmvnic_sub_crq_queue
*scrq
)
1296 struct device
*dev
= &adapter
->vdev
->dev
;
1299 if (scrq
->hw_irq
> 0x100000000ULL
) {
1300 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
1304 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
1305 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
1307 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1312 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
1313 struct ibmvnic_sub_crq_queue
*scrq
)
1315 struct device
*dev
= &adapter
->vdev
->dev
;
1316 struct ibmvnic_tx_buff
*txbuff
;
1317 union sub_crq
*next
;
1323 while (pending_scrq(adapter
, scrq
)) {
1324 unsigned int pool
= scrq
->pool_index
;
1326 next
= ibmvnic_next_scrq(adapter
, scrq
);
1327 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
1328 if (next
->tx_comp
.rcs
[i
]) {
1329 dev_err(dev
, "tx error %x\n",
1330 next
->tx_comp
.rcs
[i
]);
1333 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
1334 txbuff
= &adapter
->tx_pool
[pool
].tx_buff
[index
];
1336 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
1337 if (!txbuff
->data_dma
[j
])
1340 txbuff
->data_dma
[j
] = 0;
1341 txbuff
->used_bounce
= false;
1343 /* if sub_crq was sent indirectly */
1344 first
= txbuff
->indir_arr
[0].generic
.first
;
1345 if (first
== IBMVNIC_CRQ_CMD
) {
1346 dma_unmap_single(dev
, txbuff
->indir_dma
,
1347 sizeof(txbuff
->indir_arr
),
1351 if (txbuff
->last_frag
)
1352 dev_kfree_skb_any(txbuff
->skb
);
1354 adapter
->tx_pool
[pool
].free_map
[adapter
->tx_pool
[pool
].
1355 producer_index
] = index
;
1356 adapter
->tx_pool
[pool
].producer_index
=
1357 (adapter
->tx_pool
[pool
].producer_index
+ 1) %
1358 adapter
->max_tx_entries_per_subcrq
;
1360 /* remove tx_comp scrq*/
1361 next
->tx_comp
.first
= 0;
1364 enable_scrq_irq(adapter
, scrq
);
1366 if (pending_scrq(adapter
, scrq
)) {
1367 disable_scrq_irq(adapter
, scrq
);
1374 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
1376 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1377 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1379 disable_scrq_irq(adapter
, scrq
);
1380 ibmvnic_complete_tx(adapter
, scrq
);
1385 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
1387 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
1388 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
1390 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
1391 disable_scrq_irq(adapter
, scrq
);
1392 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
1398 static void init_sub_crqs(struct ibmvnic_adapter
*adapter
, int retry
)
1400 struct device
*dev
= &adapter
->vdev
->dev
;
1401 struct ibmvnic_sub_crq_queue
**allqueues
;
1402 int registered_queues
= 0;
1403 union ibmvnic_crq crq
;
1410 /* Sub-CRQ entries are 32 byte long */
1411 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
1413 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
1414 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
1415 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
1416 goto allqueues_failed
;
1419 /* Get the minimum between the queried max and the entries
1420 * that fit in our PAGE_SIZE
1422 adapter
->req_tx_entries_per_subcrq
=
1423 adapter
->max_tx_entries_per_subcrq
> entries_page
?
1424 entries_page
: adapter
->max_tx_entries_per_subcrq
;
1425 adapter
->req_rx_add_entries_per_subcrq
=
1426 adapter
->max_rx_add_entries_per_subcrq
> entries_page
?
1427 entries_page
: adapter
->max_rx_add_entries_per_subcrq
;
1429 /* Choosing the maximum number of queues supported by firmware*/
1430 adapter
->req_tx_queues
= adapter
->max_tx_queues
;
1431 adapter
->req_rx_queues
= adapter
->max_rx_queues
;
1432 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
1434 adapter
->req_mtu
= adapter
->max_mtu
;
1437 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
1439 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_ATOMIC
);
1441 goto allqueues_failed
;
1443 for (i
= 0; i
< total_queues
; i
++) {
1444 allqueues
[i
] = init_sub_crq_queue(adapter
);
1445 if (!allqueues
[i
]) {
1446 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
1449 registered_queues
++;
1452 /* Make sure we were able to register the minimum number of queues */
1453 if (registered_queues
<
1454 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
1455 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
1459 /* Distribute the failed allocated queues*/
1460 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
1461 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
1464 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
1465 adapter
->req_rx_queues
--;
1470 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
1471 adapter
->req_tx_queues
--;
1478 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
1479 sizeof(*adapter
->tx_scrq
), GFP_ATOMIC
);
1480 if (!adapter
->tx_scrq
)
1483 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1484 adapter
->tx_scrq
[i
] = allqueues
[i
];
1485 adapter
->tx_scrq
[i
]->pool_index
= i
;
1486 rc
= request_irq(adapter
->tx_scrq
[i
]->irq
, ibmvnic_interrupt_tx
,
1487 0, "ibmvnic_tx", adapter
->tx_scrq
[i
]);
1489 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
1490 adapter
->tx_scrq
[i
]->irq
, rc
);
1491 goto req_tx_irq_failed
;
1495 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
1496 sizeof(*adapter
->rx_scrq
), GFP_ATOMIC
);
1497 if (!adapter
->rx_scrq
)
1500 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1501 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
1502 adapter
->rx_scrq
[i
]->scrq_num
= i
;
1503 rc
= request_irq(adapter
->rx_scrq
[i
]->irq
, ibmvnic_interrupt_rx
,
1504 0, "ibmvnic_rx", adapter
->rx_scrq
[i
]);
1506 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
1507 adapter
->rx_scrq
[i
]->irq
, rc
);
1508 goto req_rx_irq_failed
;
1512 memset(&crq
, 0, sizeof(crq
));
1513 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1514 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1516 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
1517 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
1518 ibmvnic_send_crq(adapter
, &crq
);
1520 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
1521 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
1522 ibmvnic_send_crq(adapter
, &crq
);
1524 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
1525 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
1526 ibmvnic_send_crq(adapter
, &crq
);
1528 crq
.request_capability
.capability
=
1529 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
1530 crq
.request_capability
.number
=
1531 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
1532 ibmvnic_send_crq(adapter
, &crq
);
1534 crq
.request_capability
.capability
=
1535 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
1536 crq
.request_capability
.number
=
1537 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
1538 ibmvnic_send_crq(adapter
, &crq
);
1540 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
1541 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
1542 ibmvnic_send_crq(adapter
, &crq
);
1544 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
1545 if (adapter
->promisc_supported
) {
1546 crq
.request_capability
.capability
=
1547 cpu_to_be16(PROMISC_REQUESTED
);
1548 crq
.request_capability
.number
= cpu_to_be64(1);
1549 ibmvnic_send_crq(adapter
, &crq
);
1552 crq
.request_capability
.capability
=
1553 cpu_to_be16(PROMISC_REQUESTED
);
1554 crq
.request_capability
.number
= cpu_to_be64(0);
1555 ibmvnic_send_crq(adapter
, &crq
);
1563 for (j
= 0; j
< i
; j
++)
1564 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
1565 i
= adapter
->req_tx_queues
;
1567 for (j
= 0; j
< i
; j
++)
1568 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
1569 kfree(adapter
->rx_scrq
);
1570 adapter
->rx_scrq
= NULL
;
1572 kfree(adapter
->tx_scrq
);
1573 adapter
->tx_scrq
= NULL
;
1575 for (i
= 0; i
< registered_queues
; i
++)
1576 release_sub_crq_queue(adapter
, allqueues
[i
]);
1579 ibmvnic_remove(adapter
->vdev
);
1582 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
1583 struct ibmvnic_sub_crq_queue
*scrq
)
1585 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
1587 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
|| adapter
->closing
)
1593 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
1594 struct ibmvnic_sub_crq_queue
*scrq
)
1596 union sub_crq
*entry
;
1597 unsigned long flags
;
1599 spin_lock_irqsave(&scrq
->lock
, flags
);
1600 entry
= &scrq
->msgs
[scrq
->cur
];
1601 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1602 if (++scrq
->cur
== scrq
->size
)
1607 spin_unlock_irqrestore(&scrq
->lock
, flags
);
1612 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
1614 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
1615 union ibmvnic_crq
*crq
;
1617 crq
= &queue
->msgs
[queue
->cur
];
1618 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
1619 if (++queue
->cur
== queue
->size
)
1628 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
1629 union sub_crq
*sub_crq
)
1631 unsigned int ua
= adapter
->vdev
->unit_address
;
1632 struct device
*dev
= &adapter
->vdev
->dev
;
1633 u64
*u64_crq
= (u64
*)sub_crq
;
1636 netdev_dbg(adapter
->netdev
,
1637 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1638 (unsigned long int)cpu_to_be64(remote_handle
),
1639 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1640 (unsigned long int)cpu_to_be64(u64_crq
[1]),
1641 (unsigned long int)cpu_to_be64(u64_crq
[2]),
1642 (unsigned long int)cpu_to_be64(u64_crq
[3]));
1644 /* Make sure the hypervisor sees the complete request */
1647 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
1648 cpu_to_be64(remote_handle
),
1649 cpu_to_be64(u64_crq
[0]),
1650 cpu_to_be64(u64_crq
[1]),
1651 cpu_to_be64(u64_crq
[2]),
1652 cpu_to_be64(u64_crq
[3]));
1656 dev_warn(dev
, "CRQ Queue closed\n");
1657 dev_err(dev
, "Send error (rc=%d)\n", rc
);
1663 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
1664 u64 remote_handle
, u64 ioba
, u64 num_entries
)
1666 unsigned int ua
= adapter
->vdev
->unit_address
;
1667 struct device
*dev
= &adapter
->vdev
->dev
;
1670 /* Make sure the hypervisor sees the complete request */
1672 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
1673 cpu_to_be64(remote_handle
),
1678 dev_warn(dev
, "CRQ Queue closed\n");
1679 dev_err(dev
, "Send (indirect) error (rc=%d)\n", rc
);
1685 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
1686 union ibmvnic_crq
*crq
)
1688 unsigned int ua
= adapter
->vdev
->unit_address
;
1689 struct device
*dev
= &adapter
->vdev
->dev
;
1690 u64
*u64_crq
= (u64
*)crq
;
1693 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
1694 (unsigned long int)cpu_to_be64(u64_crq
[0]),
1695 (unsigned long int)cpu_to_be64(u64_crq
[1]));
1697 /* Make sure the hypervisor sees the complete request */
1700 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
1701 cpu_to_be64(u64_crq
[0]),
1702 cpu_to_be64(u64_crq
[1]));
1706 dev_warn(dev
, "CRQ Queue closed\n");
1707 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
1713 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
1715 union ibmvnic_crq crq
;
1717 memset(&crq
, 0, sizeof(crq
));
1718 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1719 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
1720 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
1722 return ibmvnic_send_crq(adapter
, &crq
);
1725 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter
*adapter
)
1727 union ibmvnic_crq crq
;
1729 memset(&crq
, 0, sizeof(crq
));
1730 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
1731 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT_COMPLETE
;
1732 netdev_dbg(adapter
->netdev
, "Sending CRQ init complete\n");
1734 return ibmvnic_send_crq(adapter
, &crq
);
1737 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
1739 union ibmvnic_crq crq
;
1741 memset(&crq
, 0, sizeof(crq
));
1742 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
1743 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
1744 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
1746 return ibmvnic_send_crq(adapter
, &crq
);
1749 static void send_login(struct ibmvnic_adapter
*adapter
)
1751 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
1752 struct ibmvnic_login_buffer
*login_buffer
;
1753 struct ibmvnic_inflight_cmd
*inflight_cmd
;
1754 struct device
*dev
= &adapter
->vdev
->dev
;
1755 dma_addr_t rsp_buffer_token
;
1756 dma_addr_t buffer_token
;
1757 size_t rsp_buffer_size
;
1758 union ibmvnic_crq crq
;
1759 unsigned long flags
;
1766 sizeof(struct ibmvnic_login_buffer
) +
1767 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
);
1769 login_buffer
= kmalloc(buffer_size
, GFP_ATOMIC
);
1771 goto buf_alloc_failed
;
1773 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
1775 if (dma_mapping_error(dev
, buffer_token
)) {
1776 dev_err(dev
, "Couldn't map login buffer\n");
1777 goto buf_map_failed
;
1780 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
1781 sizeof(u64
) * adapter
->req_tx_queues
+
1782 sizeof(u64
) * adapter
->req_rx_queues
+
1783 sizeof(u64
) * adapter
->req_rx_queues
+
1784 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
1786 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
1787 if (!login_rsp_buffer
)
1788 goto buf_rsp_alloc_failed
;
1790 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
1791 rsp_buffer_size
, DMA_FROM_DEVICE
);
1792 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
1793 dev_err(dev
, "Couldn't map login rsp buffer\n");
1794 goto buf_rsp_map_failed
;
1796 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
1797 if (!inflight_cmd
) {
1798 dev_err(dev
, "Couldn't allocate inflight_cmd\n");
1799 goto inflight_alloc_failed
;
1801 adapter
->login_buf
= login_buffer
;
1802 adapter
->login_buf_token
= buffer_token
;
1803 adapter
->login_buf_sz
= buffer_size
;
1804 adapter
->login_rsp_buf
= login_rsp_buffer
;
1805 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
1806 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
1808 login_buffer
->len
= cpu_to_be32(buffer_size
);
1809 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
1810 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
1811 login_buffer
->off_txcomp_subcrqs
=
1812 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
1813 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
1814 login_buffer
->off_rxcomp_subcrqs
=
1815 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
1816 sizeof(u64
) * adapter
->req_tx_queues
);
1817 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
1818 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
1820 tx_list_p
= (__be64
*)((char *)login_buffer
+
1821 sizeof(struct ibmvnic_login_buffer
));
1822 rx_list_p
= (__be64
*)((char *)login_buffer
+
1823 sizeof(struct ibmvnic_login_buffer
) +
1824 sizeof(u64
) * adapter
->req_tx_queues
);
1826 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1827 if (adapter
->tx_scrq
[i
]) {
1828 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
1833 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1834 if (adapter
->rx_scrq
[i
]) {
1835 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
1840 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
1841 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
1842 netdev_dbg(adapter
->netdev
, "%016lx\n",
1843 ((unsigned long int *)(adapter
->login_buf
))[i
]);
1846 memset(&crq
, 0, sizeof(crq
));
1847 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
1848 crq
.login
.cmd
= LOGIN
;
1849 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
1850 crq
.login
.len
= cpu_to_be32(buffer_size
);
1852 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
1854 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
1855 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
1856 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
1858 ibmvnic_send_crq(adapter
, &crq
);
1862 inflight_alloc_failed
:
1863 dma_unmap_single(dev
, rsp_buffer_token
, rsp_buffer_size
,
1866 kfree(login_rsp_buffer
);
1867 buf_rsp_alloc_failed
:
1868 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
1870 kfree(login_buffer
);
1875 static void send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
1878 union ibmvnic_crq crq
;
1880 memset(&crq
, 0, sizeof(crq
));
1881 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
1882 crq
.request_map
.cmd
= REQUEST_MAP
;
1883 crq
.request_map
.map_id
= map_id
;
1884 crq
.request_map
.ioba
= cpu_to_be32(addr
);
1885 crq
.request_map
.len
= cpu_to_be32(len
);
1886 ibmvnic_send_crq(adapter
, &crq
);
1889 static void send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
1891 union ibmvnic_crq crq
;
1893 memset(&crq
, 0, sizeof(crq
));
1894 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
1895 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
1896 crq
.request_unmap
.map_id
= map_id
;
1897 ibmvnic_send_crq(adapter
, &crq
);
1900 static void send_map_query(struct ibmvnic_adapter
*adapter
)
1902 union ibmvnic_crq crq
;
1904 memset(&crq
, 0, sizeof(crq
));
1905 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
1906 crq
.query_map
.cmd
= QUERY_MAP
;
1907 ibmvnic_send_crq(adapter
, &crq
);
1910 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1911 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
1913 union ibmvnic_crq crq
;
1915 atomic_set(&adapter
->running_cap_queries
, 0);
1916 memset(&crq
, 0, sizeof(crq
));
1917 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
1918 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
1920 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
1921 atomic_inc(&adapter
->running_cap_queries
);
1922 ibmvnic_send_crq(adapter
, &crq
);
1924 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
1925 atomic_inc(&adapter
->running_cap_queries
);
1926 ibmvnic_send_crq(adapter
, &crq
);
1928 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
1929 atomic_inc(&adapter
->running_cap_queries
);
1930 ibmvnic_send_crq(adapter
, &crq
);
1932 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
1933 atomic_inc(&adapter
->running_cap_queries
);
1934 ibmvnic_send_crq(adapter
, &crq
);
1936 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
1937 atomic_inc(&adapter
->running_cap_queries
);
1938 ibmvnic_send_crq(adapter
, &crq
);
1940 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
1941 atomic_inc(&adapter
->running_cap_queries
);
1942 ibmvnic_send_crq(adapter
, &crq
);
1944 crq
.query_capability
.capability
=
1945 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
1946 atomic_inc(&adapter
->running_cap_queries
);
1947 ibmvnic_send_crq(adapter
, &crq
);
1949 crq
.query_capability
.capability
=
1950 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
1951 atomic_inc(&adapter
->running_cap_queries
);
1952 ibmvnic_send_crq(adapter
, &crq
);
1954 crq
.query_capability
.capability
=
1955 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
1956 atomic_inc(&adapter
->running_cap_queries
);
1957 ibmvnic_send_crq(adapter
, &crq
);
1959 crq
.query_capability
.capability
=
1960 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
1961 atomic_inc(&adapter
->running_cap_queries
);
1962 ibmvnic_send_crq(adapter
, &crq
);
1964 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
1965 atomic_inc(&adapter
->running_cap_queries
);
1966 ibmvnic_send_crq(adapter
, &crq
);
1968 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
1969 atomic_inc(&adapter
->running_cap_queries
);
1970 ibmvnic_send_crq(adapter
, &crq
);
1972 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
1973 atomic_inc(&adapter
->running_cap_queries
);
1974 ibmvnic_send_crq(adapter
, &crq
);
1976 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
1977 atomic_inc(&adapter
->running_cap_queries
);
1978 ibmvnic_send_crq(adapter
, &crq
);
1980 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
1981 atomic_inc(&adapter
->running_cap_queries
);
1982 ibmvnic_send_crq(adapter
, &crq
);
1984 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
1985 atomic_inc(&adapter
->running_cap_queries
);
1986 ibmvnic_send_crq(adapter
, &crq
);
1988 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
1989 atomic_inc(&adapter
->running_cap_queries
);
1990 ibmvnic_send_crq(adapter
, &crq
);
1992 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
1993 atomic_inc(&adapter
->running_cap_queries
);
1994 ibmvnic_send_crq(adapter
, &crq
);
1996 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
1997 atomic_inc(&adapter
->running_cap_queries
);
1998 ibmvnic_send_crq(adapter
, &crq
);
2000 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
2001 atomic_inc(&adapter
->running_cap_queries
);
2002 ibmvnic_send_crq(adapter
, &crq
);
2004 crq
.query_capability
.capability
=
2005 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
2006 atomic_inc(&adapter
->running_cap_queries
);
2007 ibmvnic_send_crq(adapter
, &crq
);
2009 crq
.query_capability
.capability
=
2010 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
2011 atomic_inc(&adapter
->running_cap_queries
);
2012 ibmvnic_send_crq(adapter
, &crq
);
2014 crq
.query_capability
.capability
=
2015 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
2016 atomic_inc(&adapter
->running_cap_queries
);
2017 ibmvnic_send_crq(adapter
, &crq
);
2019 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
2020 atomic_inc(&adapter
->running_cap_queries
);
2021 ibmvnic_send_crq(adapter
, &crq
);
2024 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
2026 struct device
*dev
= &adapter
->vdev
->dev
;
2027 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
2028 union ibmvnic_crq crq
;
2031 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
2032 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
2034 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
2035 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
2036 netdev_dbg(adapter
->netdev
, "%016lx\n",
2037 ((unsigned long int *)(buf
))[i
]);
2039 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
2040 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
2041 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
2042 buf
->tcp_ipv4_chksum
);
2043 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
2044 buf
->tcp_ipv6_chksum
);
2045 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
2046 buf
->udp_ipv4_chksum
);
2047 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
2048 buf
->udp_ipv6_chksum
);
2049 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
2050 buf
->large_tx_ipv4
);
2051 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
2052 buf
->large_tx_ipv6
);
2053 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
2054 buf
->large_rx_ipv4
);
2055 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
2056 buf
->large_rx_ipv6
);
2057 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
2058 buf
->max_ipv4_header_size
);
2059 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
2060 buf
->max_ipv6_header_size
);
2061 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
2062 buf
->max_tcp_header_size
);
2063 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
2064 buf
->max_udp_header_size
);
2065 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
2066 buf
->max_large_tx_size
);
2067 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
2068 buf
->max_large_rx_size
);
2069 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
2070 buf
->ipv6_extension_header
);
2071 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
2072 buf
->tcp_pseudosum_req
);
2073 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
2074 buf
->num_ipv6_ext_headers
);
2075 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
2076 buf
->off_ipv6_ext_headers
);
2078 adapter
->ip_offload_ctrl_tok
=
2079 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
2080 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
2082 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
2083 dev_err(dev
, "Couldn't map ip offload control buffer\n");
2087 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
2088 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
2089 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
2090 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
2091 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
2093 /* large_tx/rx disabled for now, additional features needed */
2094 adapter
->ip_offload_ctrl
.large_tx_ipv4
= 0;
2095 adapter
->ip_offload_ctrl
.large_tx_ipv6
= 0;
2096 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
2097 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
2099 adapter
->netdev
->features
= NETIF_F_GSO
;
2101 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
2102 adapter
->netdev
->features
|= NETIF_F_IP_CSUM
;
2104 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
2105 adapter
->netdev
->features
|= NETIF_F_IPV6_CSUM
;
2107 if ((adapter
->netdev
->features
&
2108 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
2109 adapter
->netdev
->features
|= NETIF_F_RXCSUM
;
2111 memset(&crq
, 0, sizeof(crq
));
2112 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2113 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
2114 crq
.control_ip_offload
.len
=
2115 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
2116 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
2117 ibmvnic_send_crq(adapter
, &crq
);
2120 static void handle_error_info_rsp(union ibmvnic_crq
*crq
,
2121 struct ibmvnic_adapter
*adapter
)
2123 struct device
*dev
= &adapter
->vdev
->dev
;
2124 struct ibmvnic_error_buff
*error_buff
, *tmp
;
2125 unsigned long flags
;
2129 if (!crq
->request_error_rsp
.rc
.code
) {
2130 dev_info(dev
, "Request Error Rsp returned with rc=%x\n",
2131 crq
->request_error_rsp
.rc
.code
);
2135 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2136 list_for_each_entry_safe(error_buff
, tmp
, &adapter
->errors
, list
)
2137 if (error_buff
->error_id
== crq
->request_error_rsp
.error_id
) {
2139 list_del(&error_buff
->list
);
2142 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2145 dev_err(dev
, "Couldn't find error id %x\n",
2146 crq
->request_error_rsp
.error_id
);
2150 dev_err(dev
, "Detailed info for error id %x:",
2151 crq
->request_error_rsp
.error_id
);
2153 for (i
= 0; i
< error_buff
->len
; i
++) {
2154 pr_cont("%02x", (int)error_buff
->buff
[i
]);
2160 dma_unmap_single(dev
, error_buff
->dma
, error_buff
->len
,
2162 kfree(error_buff
->buff
);
2166 static void handle_dump_size_rsp(union ibmvnic_crq
*crq
,
2167 struct ibmvnic_adapter
*adapter
)
2169 int len
= be32_to_cpu(crq
->request_dump_size_rsp
.len
);
2170 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2171 struct device
*dev
= &adapter
->vdev
->dev
;
2172 union ibmvnic_crq newcrq
;
2173 unsigned long flags
;
2175 /* allocate and map buffer */
2176 adapter
->dump_data
= kmalloc(len
, GFP_KERNEL
);
2177 if (!adapter
->dump_data
) {
2178 complete(&adapter
->fw_done
);
2182 adapter
->dump_data_token
= dma_map_single(dev
, adapter
->dump_data
, len
,
2185 if (dma_mapping_error(dev
, adapter
->dump_data_token
)) {
2186 if (!firmware_has_feature(FW_FEATURE_CMO
))
2187 dev_err(dev
, "Couldn't map dump data\n");
2188 kfree(adapter
->dump_data
);
2189 complete(&adapter
->fw_done
);
2193 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2194 if (!inflight_cmd
) {
2195 dma_unmap_single(dev
, adapter
->dump_data_token
, len
,
2197 kfree(adapter
->dump_data
);
2198 complete(&adapter
->fw_done
);
2202 memset(&newcrq
, 0, sizeof(newcrq
));
2203 newcrq
.request_dump
.first
= IBMVNIC_CRQ_CMD
;
2204 newcrq
.request_dump
.cmd
= REQUEST_DUMP
;
2205 newcrq
.request_dump
.ioba
= cpu_to_be32(adapter
->dump_data_token
);
2206 newcrq
.request_dump
.len
= cpu_to_be32(adapter
->dump_data_size
);
2208 memcpy(&inflight_cmd
->crq
, &newcrq
, sizeof(newcrq
));
2210 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2211 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2212 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2214 ibmvnic_send_crq(adapter
, &newcrq
);
2217 static void handle_error_indication(union ibmvnic_crq
*crq
,
2218 struct ibmvnic_adapter
*adapter
)
2220 int detail_len
= be32_to_cpu(crq
->error_indication
.detail_error_sz
);
2221 struct ibmvnic_inflight_cmd
*inflight_cmd
;
2222 struct device
*dev
= &adapter
->vdev
->dev
;
2223 struct ibmvnic_error_buff
*error_buff
;
2224 union ibmvnic_crq new_crq
;
2225 unsigned long flags
;
2227 dev_err(dev
, "Firmware reports %serror id %x, cause %d\n",
2228 crq
->error_indication
.
2229 flags
& IBMVNIC_FATAL_ERROR
? "FATAL " : "",
2230 crq
->error_indication
.error_id
,
2231 crq
->error_indication
.error_cause
);
2233 error_buff
= kmalloc(sizeof(*error_buff
), GFP_ATOMIC
);
2237 error_buff
->buff
= kmalloc(detail_len
, GFP_ATOMIC
);
2238 if (!error_buff
->buff
) {
2243 error_buff
->dma
= dma_map_single(dev
, error_buff
->buff
, detail_len
,
2245 if (dma_mapping_error(dev
, error_buff
->dma
)) {
2246 if (!firmware_has_feature(FW_FEATURE_CMO
))
2247 dev_err(dev
, "Couldn't map error buffer\n");
2248 kfree(error_buff
->buff
);
2253 inflight_cmd
= kmalloc(sizeof(*inflight_cmd
), GFP_ATOMIC
);
2254 if (!inflight_cmd
) {
2255 dma_unmap_single(dev
, error_buff
->dma
, detail_len
,
2257 kfree(error_buff
->buff
);
2262 error_buff
->len
= detail_len
;
2263 error_buff
->error_id
= crq
->error_indication
.error_id
;
2265 spin_lock_irqsave(&adapter
->error_list_lock
, flags
);
2266 list_add_tail(&error_buff
->list
, &adapter
->errors
);
2267 spin_unlock_irqrestore(&adapter
->error_list_lock
, flags
);
2269 memset(&new_crq
, 0, sizeof(new_crq
));
2270 new_crq
.request_error_info
.first
= IBMVNIC_CRQ_CMD
;
2271 new_crq
.request_error_info
.cmd
= REQUEST_ERROR_INFO
;
2272 new_crq
.request_error_info
.ioba
= cpu_to_be32(error_buff
->dma
);
2273 new_crq
.request_error_info
.len
= cpu_to_be32(detail_len
);
2274 new_crq
.request_error_info
.error_id
= crq
->error_indication
.error_id
;
2276 memcpy(&inflight_cmd
->crq
, &crq
, sizeof(crq
));
2278 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
2279 list_add_tail(&inflight_cmd
->list
, &adapter
->inflight
);
2280 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
2282 ibmvnic_send_crq(adapter
, &new_crq
);
2285 static void handle_change_mac_rsp(union ibmvnic_crq
*crq
,
2286 struct ibmvnic_adapter
*adapter
)
2288 struct net_device
*netdev
= adapter
->netdev
;
2289 struct device
*dev
= &adapter
->vdev
->dev
;
2292 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
2294 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
2297 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
2301 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
2302 struct ibmvnic_adapter
*adapter
)
2304 struct device
*dev
= &adapter
->vdev
->dev
;
2308 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
2310 req_value
= &adapter
->req_tx_queues
;
2314 req_value
= &adapter
->req_rx_queues
;
2317 case REQ_RX_ADD_QUEUES
:
2318 req_value
= &adapter
->req_rx_add_queues
;
2321 case REQ_TX_ENTRIES_PER_SUBCRQ
:
2322 req_value
= &adapter
->req_tx_entries_per_subcrq
;
2323 name
= "tx_entries_per_subcrq";
2325 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
2326 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
2327 name
= "rx_add_entries_per_subcrq";
2330 req_value
= &adapter
->req_mtu
;
2333 case PROMISC_REQUESTED
:
2334 req_value
= &adapter
->promisc
;
2338 dev_err(dev
, "Got invalid cap request rsp %d\n",
2339 crq
->request_capability
.capability
);
2343 switch (crq
->request_capability_rsp
.rc
.code
) {
2346 case PARTIALSUCCESS
:
2347 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2349 (long int)be32_to_cpu(crq
->request_capability_rsp
.
2351 release_sub_crqs(adapter
);
2352 *req_value
= be32_to_cpu(crq
->request_capability_rsp
.number
);
2353 complete(&adapter
->init_done
);
2356 dev_err(dev
, "Error %d in request cap rsp\n",
2357 crq
->request_capability_rsp
.rc
.code
);
2361 /* Done receiving requested capabilities, query IP offload support */
2362 if (++adapter
->requested_caps
== 7) {
2363 union ibmvnic_crq newcrq
;
2364 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
2365 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
2366 &adapter
->ip_offload_buf
;
2368 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
2372 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
2373 if (!firmware_has_feature(FW_FEATURE_CMO
))
2374 dev_err(dev
, "Couldn't map offload buffer\n");
2378 memset(&newcrq
, 0, sizeof(newcrq
));
2379 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
2380 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
2381 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
2382 newcrq
.query_ip_offload
.ioba
=
2383 cpu_to_be32(adapter
->ip_offload_tok
);
2385 ibmvnic_send_crq(adapter
, &newcrq
);
2389 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
2390 struct ibmvnic_adapter
*adapter
)
2392 struct device
*dev
= &adapter
->vdev
->dev
;
2393 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
2394 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
2395 union ibmvnic_crq crq
;
2398 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
2400 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
2401 adapter
->login_rsp_buf_sz
, DMA_BIDIRECTIONAL
);
2403 /* If the number of queues requested can't be allocated by the
2404 * server, the login response will return with code 1. We will need
2405 * to resend the login buffer with fewer queues requested.
2407 if (login_rsp_crq
->generic
.rc
.code
) {
2408 adapter
->renegotiate
= true;
2409 complete(&adapter
->init_done
);
2413 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
2414 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
2415 netdev_dbg(adapter
->netdev
, "%016lx\n",
2416 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
2420 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
2421 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
2422 adapter
->req_rx_add_queues
!=
2423 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
2424 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
2425 ibmvnic_remove(adapter
->vdev
);
2428 complete(&adapter
->init_done
);
2430 memset(&crq
, 0, sizeof(crq
));
2431 crq
.request_ras_comp_num
.first
= IBMVNIC_CRQ_CMD
;
2432 crq
.request_ras_comp_num
.cmd
= REQUEST_RAS_COMP_NUM
;
2433 ibmvnic_send_crq(adapter
, &crq
);
2438 static void handle_request_map_rsp(union ibmvnic_crq
*crq
,
2439 struct ibmvnic_adapter
*adapter
)
2441 struct device
*dev
= &adapter
->vdev
->dev
;
2442 u8 map_id
= crq
->request_map_rsp
.map_id
;
2448 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
2449 rx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
2451 rc
= crq
->request_map_rsp
.rc
.code
;
2453 dev_err(dev
, "Error %ld in REQUEST_MAP_RSP\n", rc
);
2455 /* need to find and zero tx/rx_pool map_id */
2456 for (i
= 0; i
< tx_subcrqs
; i
++) {
2457 if (adapter
->tx_pool
[i
].long_term_buff
.map_id
== map_id
)
2458 adapter
->tx_pool
[i
].long_term_buff
.map_id
= 0;
2460 for (i
= 0; i
< rx_subcrqs
; i
++) {
2461 if (adapter
->rx_pool
[i
].long_term_buff
.map_id
== map_id
)
2462 adapter
->rx_pool
[i
].long_term_buff
.map_id
= 0;
2465 complete(&adapter
->fw_done
);
2468 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
2469 struct ibmvnic_adapter
*adapter
)
2471 struct device
*dev
= &adapter
->vdev
->dev
;
2474 rc
= crq
->request_unmap_rsp
.rc
.code
;
2476 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
2479 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
2480 struct ibmvnic_adapter
*adapter
)
2482 struct net_device
*netdev
= adapter
->netdev
;
2483 struct device
*dev
= &adapter
->vdev
->dev
;
2486 rc
= crq
->query_map_rsp
.rc
.code
;
2488 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
2491 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2492 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
2493 crq
->query_map_rsp
.free_pages
);
2496 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
2497 struct ibmvnic_adapter
*adapter
)
2499 struct net_device
*netdev
= adapter
->netdev
;
2500 struct device
*dev
= &adapter
->vdev
->dev
;
2503 atomic_dec(&adapter
->running_cap_queries
);
2504 netdev_dbg(netdev
, "Outstanding queries: %d\n",
2505 atomic_read(&adapter
->running_cap_queries
));
2506 rc
= crq
->query_capability
.rc
.code
;
2508 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
2512 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
2514 adapter
->min_tx_queues
=
2515 be64_to_cpu(crq
->query_capability
.number
);
2516 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
2517 adapter
->min_tx_queues
);
2520 adapter
->min_rx_queues
=
2521 be64_to_cpu(crq
->query_capability
.number
);
2522 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
2523 adapter
->min_rx_queues
);
2525 case MIN_RX_ADD_QUEUES
:
2526 adapter
->min_rx_add_queues
=
2527 be64_to_cpu(crq
->query_capability
.number
);
2528 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
2529 adapter
->min_rx_add_queues
);
2532 adapter
->max_tx_queues
=
2533 be64_to_cpu(crq
->query_capability
.number
);
2534 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
2535 adapter
->max_tx_queues
);
2538 adapter
->max_rx_queues
=
2539 be64_to_cpu(crq
->query_capability
.number
);
2540 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
2541 adapter
->max_rx_queues
);
2543 case MAX_RX_ADD_QUEUES
:
2544 adapter
->max_rx_add_queues
=
2545 be64_to_cpu(crq
->query_capability
.number
);
2546 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
2547 adapter
->max_rx_add_queues
);
2549 case MIN_TX_ENTRIES_PER_SUBCRQ
:
2550 adapter
->min_tx_entries_per_subcrq
=
2551 be64_to_cpu(crq
->query_capability
.number
);
2552 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
2553 adapter
->min_tx_entries_per_subcrq
);
2555 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
2556 adapter
->min_rx_add_entries_per_subcrq
=
2557 be64_to_cpu(crq
->query_capability
.number
);
2558 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
2559 adapter
->min_rx_add_entries_per_subcrq
);
2561 case MAX_TX_ENTRIES_PER_SUBCRQ
:
2562 adapter
->max_tx_entries_per_subcrq
=
2563 be64_to_cpu(crq
->query_capability
.number
);
2564 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
2565 adapter
->max_tx_entries_per_subcrq
);
2567 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
2568 adapter
->max_rx_add_entries_per_subcrq
=
2569 be64_to_cpu(crq
->query_capability
.number
);
2570 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
2571 adapter
->max_rx_add_entries_per_subcrq
);
2573 case TCP_IP_OFFLOAD
:
2574 adapter
->tcp_ip_offload
=
2575 be64_to_cpu(crq
->query_capability
.number
);
2576 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
2577 adapter
->tcp_ip_offload
);
2579 case PROMISC_SUPPORTED
:
2580 adapter
->promisc_supported
=
2581 be64_to_cpu(crq
->query_capability
.number
);
2582 netdev_dbg(netdev
, "promisc_supported = %lld\n",
2583 adapter
->promisc_supported
);
2586 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2587 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
2590 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
2591 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
2593 case MAX_MULTICAST_FILTERS
:
2594 adapter
->max_multicast_filters
=
2595 be64_to_cpu(crq
->query_capability
.number
);
2596 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
2597 adapter
->max_multicast_filters
);
2599 case VLAN_HEADER_INSERTION
:
2600 adapter
->vlan_header_insertion
=
2601 be64_to_cpu(crq
->query_capability
.number
);
2602 if (adapter
->vlan_header_insertion
)
2603 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
2604 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
2605 adapter
->vlan_header_insertion
);
2607 case MAX_TX_SG_ENTRIES
:
2608 adapter
->max_tx_sg_entries
=
2609 be64_to_cpu(crq
->query_capability
.number
);
2610 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
2611 adapter
->max_tx_sg_entries
);
2613 case RX_SG_SUPPORTED
:
2614 adapter
->rx_sg_supported
=
2615 be64_to_cpu(crq
->query_capability
.number
);
2616 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
2617 adapter
->rx_sg_supported
);
2619 case OPT_TX_COMP_SUB_QUEUES
:
2620 adapter
->opt_tx_comp_sub_queues
=
2621 be64_to_cpu(crq
->query_capability
.number
);
2622 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
2623 adapter
->opt_tx_comp_sub_queues
);
2625 case OPT_RX_COMP_QUEUES
:
2626 adapter
->opt_rx_comp_queues
=
2627 be64_to_cpu(crq
->query_capability
.number
);
2628 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
2629 adapter
->opt_rx_comp_queues
);
2631 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
2632 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
2633 be64_to_cpu(crq
->query_capability
.number
);
2634 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2635 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
2637 case OPT_TX_ENTRIES_PER_SUBCRQ
:
2638 adapter
->opt_tx_entries_per_subcrq
=
2639 be64_to_cpu(crq
->query_capability
.number
);
2640 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
2641 adapter
->opt_tx_entries_per_subcrq
);
2643 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
2644 adapter
->opt_rxba_entries_per_subcrq
=
2645 be64_to_cpu(crq
->query_capability
.number
);
2646 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
2647 adapter
->opt_rxba_entries_per_subcrq
);
2649 case TX_RX_DESC_REQ
:
2650 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
2651 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
2652 adapter
->tx_rx_desc_req
);
2656 netdev_err(netdev
, "Got invalid cap rsp %d\n",
2657 crq
->query_capability
.capability
);
2661 if (atomic_read(&adapter
->running_cap_queries
) == 0)
2662 complete(&adapter
->init_done
);
2663 /* We're done querying the capabilities, initialize sub-crqs */
2666 static void handle_control_ras_rsp(union ibmvnic_crq
*crq
,
2667 struct ibmvnic_adapter
*adapter
)
2669 u8 correlator
= crq
->control_ras_rsp
.correlator
;
2670 struct device
*dev
= &adapter
->vdev
->dev
;
2674 if (crq
->control_ras_rsp
.rc
.code
) {
2675 dev_warn(dev
, "Control ras failed rc=%d\n",
2676 crq
->control_ras_rsp
.rc
.code
);
2680 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
2681 if (adapter
->ras_comps
[i
].correlator
== correlator
) {
2688 dev_warn(dev
, "Correlator not found on control_ras_rsp\n");
2692 switch (crq
->control_ras_rsp
.op
) {
2693 case IBMVNIC_TRACE_LEVEL
:
2694 adapter
->ras_comps
[i
].trace_level
= crq
->control_ras
.level
;
2696 case IBMVNIC_ERROR_LEVEL
:
2697 adapter
->ras_comps
[i
].error_check_level
=
2698 crq
->control_ras
.level
;
2700 case IBMVNIC_TRACE_PAUSE
:
2701 adapter
->ras_comp_int
[i
].paused
= 1;
2703 case IBMVNIC_TRACE_RESUME
:
2704 adapter
->ras_comp_int
[i
].paused
= 0;
2706 case IBMVNIC_TRACE_ON
:
2707 adapter
->ras_comps
[i
].trace_on
= 1;
2709 case IBMVNIC_TRACE_OFF
:
2710 adapter
->ras_comps
[i
].trace_on
= 0;
2712 case IBMVNIC_CHG_TRACE_BUFF_SZ
:
2713 /* trace_buff_sz is 3 bytes, stuff it into an int */
2714 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[0] = 0;
2715 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[1] =
2716 crq
->control_ras_rsp
.trace_buff_sz
[0];
2717 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[2] =
2718 crq
->control_ras_rsp
.trace_buff_sz
[1];
2719 ((u8
*)(&adapter
->ras_comps
[i
].trace_buff_size
))[3] =
2720 crq
->control_ras_rsp
.trace_buff_sz
[2];
2723 dev_err(dev
, "invalid op %d on control_ras_rsp",
2724 crq
->control_ras_rsp
.op
);
2728 static int ibmvnic_fw_comp_open(struct inode
*inode
, struct file
*file
)
2730 file
->private_data
= inode
->i_private
;
2734 static ssize_t
trace_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2737 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2738 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2739 struct device
*dev
= &adapter
->vdev
->dev
;
2740 struct ibmvnic_fw_trace_entry
*trace
;
2741 int num
= ras_comp_int
->num
;
2742 union ibmvnic_crq crq
;
2743 dma_addr_t trace_tok
;
2745 if (*ppos
>= be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2749 dma_alloc_coherent(dev
,
2750 be32_to_cpu(adapter
->ras_comps
[num
].
2751 trace_buff_size
), &trace_tok
,
2754 dev_err(dev
, "Couldn't alloc trace buffer\n");
2758 memset(&crq
, 0, sizeof(crq
));
2759 crq
.collect_fw_trace
.first
= IBMVNIC_CRQ_CMD
;
2760 crq
.collect_fw_trace
.cmd
= COLLECT_FW_TRACE
;
2761 crq
.collect_fw_trace
.correlator
= adapter
->ras_comps
[num
].correlator
;
2762 crq
.collect_fw_trace
.ioba
= cpu_to_be32(trace_tok
);
2763 crq
.collect_fw_trace
.len
= adapter
->ras_comps
[num
].trace_buff_size
;
2764 ibmvnic_send_crq(adapter
, &crq
);
2766 init_completion(&adapter
->fw_done
);
2767 wait_for_completion(&adapter
->fw_done
);
2769 if (*ppos
+ len
> be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
))
2771 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
) -
2774 copy_to_user(user_buf
, &((u8
*)trace
)[*ppos
], len
);
2776 dma_free_coherent(dev
,
2777 be32_to_cpu(adapter
->ras_comps
[num
].trace_buff_size
),
2783 static const struct file_operations trace_ops
= {
2784 .owner
= THIS_MODULE
,
2785 .open
= ibmvnic_fw_comp_open
,
2789 static ssize_t
paused_read(struct file
*file
, char __user
*user_buf
, size_t len
,
2792 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2793 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2794 int num
= ras_comp_int
->num
;
2795 char buff
[5]; /* 1 or 0 plus \n and \0 */
2798 size
= sprintf(buff
, "%d\n", adapter
->ras_comp_int
[num
].paused
);
2803 copy_to_user(user_buf
, buff
, size
);
2808 static ssize_t
paused_write(struct file
*file
, const char __user
*user_buf
,
2809 size_t len
, loff_t
*ppos
)
2811 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2812 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2813 int num
= ras_comp_int
->num
;
2814 union ibmvnic_crq crq
;
2816 char buff
[9]; /* decimal max int plus \n and \0 */
2818 copy_from_user(buff
, user_buf
, sizeof(buff
));
2819 val
= kstrtoul(buff
, 10, NULL
);
2821 adapter
->ras_comp_int
[num
].paused
= val
? 1 : 0;
2823 memset(&crq
, 0, sizeof(crq
));
2824 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2825 crq
.control_ras
.cmd
= CONTROL_RAS
;
2826 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2827 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_PAUSE
: IBMVNIC_TRACE_RESUME
;
2828 ibmvnic_send_crq(adapter
, &crq
);
2833 static const struct file_operations paused_ops
= {
2834 .owner
= THIS_MODULE
,
2835 .open
= ibmvnic_fw_comp_open
,
2836 .read
= paused_read
,
2837 .write
= paused_write
,
2840 static ssize_t
tracing_read(struct file
*file
, char __user
*user_buf
,
2841 size_t len
, loff_t
*ppos
)
2843 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2844 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2845 int num
= ras_comp_int
->num
;
2846 char buff
[5]; /* 1 or 0 plus \n and \0 */
2849 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_on
);
2854 copy_to_user(user_buf
, buff
, size
);
2859 static ssize_t
tracing_write(struct file
*file
, const char __user
*user_buf
,
2860 size_t len
, loff_t
*ppos
)
2862 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2863 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2864 int num
= ras_comp_int
->num
;
2865 union ibmvnic_crq crq
;
2867 char buff
[9]; /* decimal max int plus \n and \0 */
2869 copy_from_user(buff
, user_buf
, sizeof(buff
));
2870 val
= kstrtoul(buff
, 10, NULL
);
2872 memset(&crq
, 0, sizeof(crq
));
2873 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2874 crq
.control_ras
.cmd
= CONTROL_RAS
;
2875 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2876 crq
.control_ras
.op
= val
? IBMVNIC_TRACE_ON
: IBMVNIC_TRACE_OFF
;
2881 static const struct file_operations tracing_ops
= {
2882 .owner
= THIS_MODULE
,
2883 .open
= ibmvnic_fw_comp_open
,
2884 .read
= tracing_read
,
2885 .write
= tracing_write
,
2888 static ssize_t
error_level_read(struct file
*file
, char __user
*user_buf
,
2889 size_t len
, loff_t
*ppos
)
2891 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2892 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2893 int num
= ras_comp_int
->num
;
2894 char buff
[5]; /* decimal max char plus \n and \0 */
2897 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].error_check_level
);
2902 copy_to_user(user_buf
, buff
, size
);
2907 static ssize_t
error_level_write(struct file
*file
, const char __user
*user_buf
,
2908 size_t len
, loff_t
*ppos
)
2910 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2911 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2912 int num
= ras_comp_int
->num
;
2913 union ibmvnic_crq crq
;
2915 char buff
[9]; /* decimal max int plus \n and \0 */
2917 copy_from_user(buff
, user_buf
, sizeof(buff
));
2918 val
= kstrtoul(buff
, 10, NULL
);
2923 memset(&crq
, 0, sizeof(crq
));
2924 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2925 crq
.control_ras
.cmd
= CONTROL_RAS
;
2926 crq
.control_ras
.correlator
= adapter
->ras_comps
[num
].correlator
;
2927 crq
.control_ras
.op
= IBMVNIC_ERROR_LEVEL
;
2928 crq
.control_ras
.level
= val
;
2929 ibmvnic_send_crq(adapter
, &crq
);
2934 static const struct file_operations error_level_ops
= {
2935 .owner
= THIS_MODULE
,
2936 .open
= ibmvnic_fw_comp_open
,
2937 .read
= error_level_read
,
2938 .write
= error_level_write
,
2941 static ssize_t
trace_level_read(struct file
*file
, char __user
*user_buf
,
2942 size_t len
, loff_t
*ppos
)
2944 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2945 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2946 int num
= ras_comp_int
->num
;
2947 char buff
[5]; /* decimal max char plus \n and \0 */
2950 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_level
);
2954 copy_to_user(user_buf
, buff
, size
);
2959 static ssize_t
trace_level_write(struct file
*file
, const char __user
*user_buf
,
2960 size_t len
, loff_t
*ppos
)
2962 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2963 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2964 union ibmvnic_crq crq
;
2966 char buff
[9]; /* decimal max int plus \n and \0 */
2968 copy_from_user(buff
, user_buf
, sizeof(buff
));
2969 val
= kstrtoul(buff
, 10, NULL
);
2973 memset(&crq
, 0, sizeof(crq
));
2974 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
2975 crq
.control_ras
.cmd
= CONTROL_RAS
;
2976 crq
.control_ras
.correlator
=
2977 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
2978 crq
.control_ras
.op
= IBMVNIC_TRACE_LEVEL
;
2979 crq
.control_ras
.level
= val
;
2980 ibmvnic_send_crq(adapter
, &crq
);
2985 static const struct file_operations trace_level_ops
= {
2986 .owner
= THIS_MODULE
,
2987 .open
= ibmvnic_fw_comp_open
,
2988 .read
= trace_level_read
,
2989 .write
= trace_level_write
,
2992 static ssize_t
trace_buff_size_read(struct file
*file
, char __user
*user_buf
,
2993 size_t len
, loff_t
*ppos
)
2995 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
2996 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
2997 int num
= ras_comp_int
->num
;
2998 char buff
[9]; /* decimal max int plus \n and \0 */
3001 size
= sprintf(buff
, "%d\n", adapter
->ras_comps
[num
].trace_buff_size
);
3005 copy_to_user(user_buf
, buff
, size
);
3010 static ssize_t
trace_buff_size_write(struct file
*file
,
3011 const char __user
*user_buf
, size_t len
,
3014 struct ibmvnic_fw_comp_internal
*ras_comp_int
= file
->private_data
;
3015 struct ibmvnic_adapter
*adapter
= ras_comp_int
->adapter
;
3016 union ibmvnic_crq crq
;
3018 char buff
[9]; /* decimal max int plus \n and \0 */
3020 copy_from_user(buff
, user_buf
, sizeof(buff
));
3021 val
= kstrtoul(buff
, 10, NULL
);
3023 memset(&crq
, 0, sizeof(crq
));
3024 crq
.control_ras
.first
= IBMVNIC_CRQ_CMD
;
3025 crq
.control_ras
.cmd
= CONTROL_RAS
;
3026 crq
.control_ras
.correlator
=
3027 adapter
->ras_comps
[ras_comp_int
->num
].correlator
;
3028 crq
.control_ras
.op
= IBMVNIC_CHG_TRACE_BUFF_SZ
;
3029 /* trace_buff_sz is 3 bytes, stuff an int into it */
3030 crq
.control_ras
.trace_buff_sz
[0] = ((u8
*)(&val
))[5];
3031 crq
.control_ras
.trace_buff_sz
[1] = ((u8
*)(&val
))[6];
3032 crq
.control_ras
.trace_buff_sz
[2] = ((u8
*)(&val
))[7];
3033 ibmvnic_send_crq(adapter
, &crq
);
3038 static const struct file_operations trace_size_ops
= {
3039 .owner
= THIS_MODULE
,
3040 .open
= ibmvnic_fw_comp_open
,
3041 .read
= trace_buff_size_read
,
3042 .write
= trace_buff_size_write
,
3045 static void handle_request_ras_comps_rsp(union ibmvnic_crq
*crq
,
3046 struct ibmvnic_adapter
*adapter
)
3048 struct device
*dev
= &adapter
->vdev
->dev
;
3049 struct dentry
*dir_ent
;
3053 debugfs_remove_recursive(adapter
->ras_comps_ent
);
3055 adapter
->ras_comps_ent
= debugfs_create_dir("ras_comps",
3056 adapter
->debugfs_dir
);
3057 if (!adapter
->ras_comps_ent
|| IS_ERR(adapter
->ras_comps_ent
)) {
3058 dev_info(dev
, "debugfs create ras_comps dir failed\n");
3062 for (i
= 0; i
< adapter
->ras_comp_num
; i
++) {
3063 dir_ent
= debugfs_create_dir(adapter
->ras_comps
[i
].name
,
3064 adapter
->ras_comps_ent
);
3065 if (!dir_ent
|| IS_ERR(dir_ent
)) {
3066 dev_info(dev
, "debugfs create %s dir failed\n",
3067 adapter
->ras_comps
[i
].name
);
3071 adapter
->ras_comp_int
[i
].adapter
= adapter
;
3072 adapter
->ras_comp_int
[i
].num
= i
;
3073 adapter
->ras_comp_int
[i
].desc_blob
.data
=
3074 &adapter
->ras_comps
[i
].description
;
3075 adapter
->ras_comp_int
[i
].desc_blob
.size
=
3076 sizeof(adapter
->ras_comps
[i
].description
);
3078 /* Don't need to remember the dentry's because the debugfs dir
3079 * gets removed recursively
3081 ent
= debugfs_create_blob("description", S_IRUGO
, dir_ent
,
3082 &adapter
->ras_comp_int
[i
].desc_blob
);
3083 ent
= debugfs_create_file("trace_buf_size", S_IRUGO
| S_IWUSR
,
3084 dir_ent
, &adapter
->ras_comp_int
[i
],
3086 ent
= debugfs_create_file("trace_level",
3088 (adapter
->ras_comps
[i
].trace_level
!=
3089 0xFF ? S_IWUSR
: 0),
3090 dir_ent
, &adapter
->ras_comp_int
[i
],
3092 ent
= debugfs_create_file("error_level",
3095 ras_comps
[i
].error_check_level
!=
3096 0xFF ? S_IWUSR
: 0),
3097 dir_ent
, &adapter
->ras_comp_int
[i
],
3099 ent
= debugfs_create_file("tracing", S_IRUGO
| S_IWUSR
,
3100 dir_ent
, &adapter
->ras_comp_int
[i
],
3102 ent
= debugfs_create_file("paused", S_IRUGO
| S_IWUSR
,
3103 dir_ent
, &adapter
->ras_comp_int
[i
],
3105 ent
= debugfs_create_file("trace", S_IRUGO
, dir_ent
,
3106 &adapter
->ras_comp_int
[i
],
3111 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq
*crq
,
3112 struct ibmvnic_adapter
*adapter
)
3114 int len
= adapter
->ras_comp_num
* sizeof(struct ibmvnic_fw_component
);
3115 struct device
*dev
= &adapter
->vdev
->dev
;
3116 union ibmvnic_crq newcrq
;
3118 adapter
->ras_comps
= dma_alloc_coherent(dev
, len
,
3119 &adapter
->ras_comps_tok
,
3121 if (!adapter
->ras_comps
) {
3122 if (!firmware_has_feature(FW_FEATURE_CMO
))
3123 dev_err(dev
, "Couldn't alloc fw comps buffer\n");
3127 adapter
->ras_comp_int
= kmalloc(adapter
->ras_comp_num
*
3128 sizeof(struct ibmvnic_fw_comp_internal
),
3130 if (!adapter
->ras_comp_int
)
3131 dma_free_coherent(dev
, len
, adapter
->ras_comps
,
3132 adapter
->ras_comps_tok
);
3134 memset(&newcrq
, 0, sizeof(newcrq
));
3135 newcrq
.request_ras_comps
.first
= IBMVNIC_CRQ_CMD
;
3136 newcrq
.request_ras_comps
.cmd
= REQUEST_RAS_COMPS
;
3137 newcrq
.request_ras_comps
.ioba
= cpu_to_be32(adapter
->ras_comps_tok
);
3138 newcrq
.request_ras_comps
.len
= cpu_to_be32(len
);
3139 ibmvnic_send_crq(adapter
, &newcrq
);
3142 static void ibmvnic_free_inflight(struct ibmvnic_adapter
*adapter
)
3144 struct ibmvnic_inflight_cmd
*inflight_cmd
, *tmp1
;
3145 struct device
*dev
= &adapter
->vdev
->dev
;
3146 struct ibmvnic_error_buff
*error_buff
, *tmp2
;
3147 unsigned long flags
;
3148 unsigned long flags2
;
3150 spin_lock_irqsave(&adapter
->inflight_lock
, flags
);
3151 list_for_each_entry_safe(inflight_cmd
, tmp1
, &adapter
->inflight
, list
) {
3152 switch (inflight_cmd
->crq
.generic
.cmd
) {
3154 dma_unmap_single(dev
, adapter
->login_buf_token
,
3155 adapter
->login_buf_sz
,
3157 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
3158 adapter
->login_rsp_buf_sz
,
3160 kfree(adapter
->login_rsp_buf
);
3161 kfree(adapter
->login_buf
);
3164 complete(&adapter
->fw_done
);
3166 case REQUEST_ERROR_INFO
:
3167 spin_lock_irqsave(&adapter
->error_list_lock
, flags2
);
3168 list_for_each_entry_safe(error_buff
, tmp2
,
3169 &adapter
->errors
, list
) {
3170 dma_unmap_single(dev
, error_buff
->dma
,
3173 kfree(error_buff
->buff
);
3174 list_del(&error_buff
->list
);
3177 spin_unlock_irqrestore(&adapter
->error_list_lock
,
3181 list_del(&inflight_cmd
->list
);
3182 kfree(inflight_cmd
);
3184 spin_unlock_irqrestore(&adapter
->inflight_lock
, flags
);
3187 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
3188 struct ibmvnic_adapter
*adapter
)
3190 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
3191 struct net_device
*netdev
= adapter
->netdev
;
3192 struct device
*dev
= &adapter
->vdev
->dev
;
3195 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
3196 ((unsigned long int *)crq
)[0],
3197 ((unsigned long int *)crq
)[1]);
3198 switch (gen_crq
->first
) {
3199 case IBMVNIC_CRQ_INIT_RSP
:
3200 switch (gen_crq
->cmd
) {
3201 case IBMVNIC_CRQ_INIT
:
3202 dev_info(dev
, "Partner initialized\n");
3203 /* Send back a response */
3204 rc
= ibmvnic_send_crq_init_complete(adapter
);
3206 send_version_xchg(adapter
);
3208 dev_err(dev
, "Can't send initrsp rc=%ld\n", rc
);
3210 case IBMVNIC_CRQ_INIT_COMPLETE
:
3211 dev_info(dev
, "Partner initialization complete\n");
3212 send_version_xchg(adapter
);
3215 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
3218 case IBMVNIC_CRQ_XPORT_EVENT
:
3219 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
3220 dev_info(dev
, "Re-enabling adapter\n");
3221 adapter
->migrated
= true;
3222 ibmvnic_free_inflight(adapter
);
3223 release_sub_crqs(adapter
);
3224 rc
= ibmvnic_reenable_crq_queue(adapter
);
3226 dev_err(dev
, "Error after enable rc=%ld\n", rc
);
3227 adapter
->migrated
= false;
3228 rc
= ibmvnic_send_crq_init(adapter
);
3230 dev_err(dev
, "Error sending init rc=%ld\n", rc
);
3232 /* The adapter lost the connection */
3233 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
3235 ibmvnic_free_inflight(adapter
);
3236 release_sub_crqs(adapter
);
3239 case IBMVNIC_CRQ_CMD_RSP
:
3242 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
3247 switch (gen_crq
->cmd
) {
3248 case VERSION_EXCHANGE_RSP
:
3249 rc
= crq
->version_exchange_rsp
.rc
.code
;
3251 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
3254 dev_info(dev
, "Partner protocol version is %d\n",
3255 crq
->version_exchange_rsp
.version
);
3256 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
3259 be16_to_cpu(crq
->version_exchange_rsp
.version
);
3260 send_cap_queries(adapter
);
3262 case QUERY_CAPABILITY_RSP
:
3263 handle_query_cap_rsp(crq
, adapter
);
3266 handle_query_map_rsp(crq
, adapter
);
3268 case REQUEST_MAP_RSP
:
3269 handle_request_map_rsp(crq
, adapter
);
3271 case REQUEST_UNMAP_RSP
:
3272 handle_request_unmap_rsp(crq
, adapter
);
3274 case REQUEST_CAPABILITY_RSP
:
3275 handle_request_cap_rsp(crq
, adapter
);
3278 netdev_dbg(netdev
, "Got Login Response\n");
3279 handle_login_rsp(crq
, adapter
);
3281 case LOGICAL_LINK_STATE_RSP
:
3282 netdev_dbg(netdev
, "Got Logical Link State Response\n");
3283 adapter
->logical_link_state
=
3284 crq
->logical_link_state_rsp
.link_state
;
3286 case LINK_STATE_INDICATION
:
3287 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
3288 adapter
->phys_link_state
=
3289 crq
->link_state_indication
.phys_link_state
;
3290 adapter
->logical_link_state
=
3291 crq
->link_state_indication
.logical_link_state
;
3293 case CHANGE_MAC_ADDR_RSP
:
3294 netdev_dbg(netdev
, "Got MAC address change Response\n");
3295 handle_change_mac_rsp(crq
, adapter
);
3297 case ERROR_INDICATION
:
3298 netdev_dbg(netdev
, "Got Error Indication\n");
3299 handle_error_indication(crq
, adapter
);
3301 case REQUEST_ERROR_RSP
:
3302 netdev_dbg(netdev
, "Got Error Detail Response\n");
3303 handle_error_info_rsp(crq
, adapter
);
3305 case REQUEST_STATISTICS_RSP
:
3306 netdev_dbg(netdev
, "Got Statistics Response\n");
3307 complete(&adapter
->stats_done
);
3309 case REQUEST_DUMP_SIZE_RSP
:
3310 netdev_dbg(netdev
, "Got Request Dump Size Response\n");
3311 handle_dump_size_rsp(crq
, adapter
);
3313 case REQUEST_DUMP_RSP
:
3314 netdev_dbg(netdev
, "Got Request Dump Response\n");
3315 complete(&adapter
->fw_done
);
3317 case QUERY_IP_OFFLOAD_RSP
:
3318 netdev_dbg(netdev
, "Got Query IP offload Response\n");
3319 handle_query_ip_offload_rsp(adapter
);
3321 case MULTICAST_CTRL_RSP
:
3322 netdev_dbg(netdev
, "Got multicast control Response\n");
3324 case CONTROL_IP_OFFLOAD_RSP
:
3325 netdev_dbg(netdev
, "Got Control IP offload Response\n");
3326 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
3327 sizeof(adapter
->ip_offload_ctrl
),
3329 /* We're done with the queries, perform the login */
3330 send_login(adapter
);
3332 case REQUEST_RAS_COMP_NUM_RSP
:
3333 netdev_dbg(netdev
, "Got Request RAS Comp Num Response\n");
3334 if (crq
->request_ras_comp_num_rsp
.rc
.code
== 10) {
3335 netdev_dbg(netdev
, "Request RAS Comp Num not supported\n");
3338 adapter
->ras_comp_num
=
3339 be32_to_cpu(crq
->request_ras_comp_num_rsp
.num_components
);
3340 handle_request_ras_comp_num_rsp(crq
, adapter
);
3342 case REQUEST_RAS_COMPS_RSP
:
3343 netdev_dbg(netdev
, "Got Request RAS Comps Response\n");
3344 handle_request_ras_comps_rsp(crq
, adapter
);
3346 case CONTROL_RAS_RSP
:
3347 netdev_dbg(netdev
, "Got Control RAS Response\n");
3348 handle_control_ras_rsp(crq
, adapter
);
3350 case COLLECT_FW_TRACE_RSP
:
3351 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
3352 complete(&adapter
->fw_done
);
3355 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
3360 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
3362 struct ibmvnic_adapter
*adapter
= instance
;
3363 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3364 struct vio_dev
*vdev
= adapter
->vdev
;
3365 union ibmvnic_crq
*crq
;
3366 unsigned long flags
;
3369 spin_lock_irqsave(&queue
->lock
, flags
);
3370 vio_disable_interrupts(vdev
);
3372 /* Pull all the valid messages off the CRQ */
3373 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
3374 ibmvnic_handle_crq(crq
, adapter
);
3375 crq
->generic
.first
= 0;
3377 vio_enable_interrupts(vdev
);
3378 crq
= ibmvnic_next_crq(adapter
);
3380 vio_disable_interrupts(vdev
);
3381 ibmvnic_handle_crq(crq
, adapter
);
3382 crq
->generic
.first
= 0;
3387 spin_unlock_irqrestore(&queue
->lock
, flags
);
3391 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
3393 struct vio_dev
*vdev
= adapter
->vdev
;
3397 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
3398 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3401 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
3406 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
3408 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3409 struct device
*dev
= &adapter
->vdev
->dev
;
3410 struct vio_dev
*vdev
= adapter
->vdev
;
3415 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3416 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3418 /* Clean out the queue */
3419 memset(crq
->msgs
, 0, PAGE_SIZE
);
3422 /* And re-open it again */
3423 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3424 crq
->msg_token
, PAGE_SIZE
);
3427 /* Adapter is good, but other end is not ready */
3428 dev_warn(dev
, "Partner adapter not ready\n");
3430 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
3435 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter
*adapter
)
3437 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3438 struct vio_dev
*vdev
= adapter
->vdev
;
3441 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
3442 free_irq(vdev
->irq
, adapter
);
3444 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3445 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3447 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
3449 free_page((unsigned long)crq
->msgs
);
3452 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter
*adapter
)
3454 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
3455 struct device
*dev
= &adapter
->vdev
->dev
;
3456 struct vio_dev
*vdev
= adapter
->vdev
;
3457 int rc
, retrc
= -ENOMEM
;
3459 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
3460 /* Should we allocate more than one page? */
3465 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
3466 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
3468 if (dma_mapping_error(dev
, crq
->msg_token
))
3471 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
3472 crq
->msg_token
, PAGE_SIZE
);
3474 if (rc
== H_RESOURCE
)
3475 /* maybe kexecing and resource is busy. try a reset */
3476 rc
= ibmvnic_reset_crq(adapter
);
3479 if (rc
== H_CLOSED
) {
3480 dev_warn(dev
, "Partner adapter not ready\n");
3482 dev_warn(dev
, "Error %d opening adapter\n", rc
);
3483 goto reg_crq_failed
;
3488 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
3489 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
3492 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
3494 goto req_irq_failed
;
3497 rc
= vio_enable_interrupts(vdev
);
3499 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
3500 goto req_irq_failed
;
3504 spin_lock_init(&crq
->lock
);
3510 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
3511 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
3513 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3515 free_page((unsigned long)crq
->msgs
);
3519 /* debugfs for dump */
3520 static int ibmvnic_dump_show(struct seq_file
*seq
, void *v
)
3522 struct net_device
*netdev
= seq
->private;
3523 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3524 struct device
*dev
= &adapter
->vdev
->dev
;
3525 union ibmvnic_crq crq
;
3527 memset(&crq
, 0, sizeof(crq
));
3528 crq
.request_dump_size
.first
= IBMVNIC_CRQ_CMD
;
3529 crq
.request_dump_size
.cmd
= REQUEST_DUMP_SIZE
;
3530 ibmvnic_send_crq(adapter
, &crq
);
3532 init_completion(&adapter
->fw_done
);
3533 wait_for_completion(&adapter
->fw_done
);
3535 seq_write(seq
, adapter
->dump_data
, adapter
->dump_data_size
);
3537 dma_unmap_single(dev
, adapter
->dump_data_token
, adapter
->dump_data_size
,
3540 kfree(adapter
->dump_data
);
3545 static int ibmvnic_dump_open(struct inode
*inode
, struct file
*file
)
3547 return single_open(file
, ibmvnic_dump_show
, inode
->i_private
);
3550 static const struct file_operations ibmvnic_dump_ops
= {
3551 .owner
= THIS_MODULE
,
3552 .open
= ibmvnic_dump_open
,
3554 .llseek
= seq_lseek
,
3555 .release
= single_release
,
3558 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
3560 struct ibmvnic_adapter
*adapter
;
3561 struct net_device
*netdev
;
3562 unsigned char *mac_addr_p
;
3564 char buf
[16]; /* debugfs name buf */
3567 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
3570 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
3571 VETH_MAC_ADDR
, NULL
);
3574 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3575 __FILE__
, __LINE__
);
3579 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
3580 IBMVNIC_MAX_TX_QUEUES
);
3584 adapter
= netdev_priv(netdev
);
3585 dev_set_drvdata(&dev
->dev
, netdev
);
3586 adapter
->vdev
= dev
;
3587 adapter
->netdev
= netdev
;
3589 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
3590 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3591 netdev
->irq
= dev
->irq
;
3592 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
3593 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
3594 SET_NETDEV_DEV(netdev
, &dev
->dev
);
3596 spin_lock_init(&adapter
->stats_lock
);
3598 rc
= ibmvnic_init_crq_queue(adapter
);
3600 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n", rc
);
3604 INIT_LIST_HEAD(&adapter
->errors
);
3605 INIT_LIST_HEAD(&adapter
->inflight
);
3606 spin_lock_init(&adapter
->error_list_lock
);
3607 spin_lock_init(&adapter
->inflight_lock
);
3609 adapter
->stats_token
= dma_map_single(&dev
->dev
, &adapter
->stats
,
3610 sizeof(struct ibmvnic_statistics
),
3612 if (dma_mapping_error(&dev
->dev
, adapter
->stats_token
)) {
3613 if (!firmware_has_feature(FW_FEATURE_CMO
))
3614 dev_err(&dev
->dev
, "Couldn't map stats buffer\n");
3618 snprintf(buf
, sizeof(buf
), "ibmvnic_%x", dev
->unit_address
);
3619 ent
= debugfs_create_dir(buf
, NULL
);
3620 if (!ent
|| IS_ERR(ent
)) {
3621 dev_info(&dev
->dev
, "debugfs create directory failed\n");
3622 adapter
->debugfs_dir
= NULL
;
3624 adapter
->debugfs_dir
= ent
;
3625 ent
= debugfs_create_file("dump", S_IRUGO
, adapter
->debugfs_dir
,
3626 netdev
, &ibmvnic_dump_ops
);
3627 if (!ent
|| IS_ERR(ent
)) {
3629 "debugfs create dump file failed\n");
3630 adapter
->debugfs_dump
= NULL
;
3632 adapter
->debugfs_dump
= ent
;
3635 ibmvnic_send_crq_init(adapter
);
3637 init_completion(&adapter
->init_done
);
3638 wait_for_completion(&adapter
->init_done
);
3641 adapter
->renegotiate
= false;
3643 init_sub_crqs(adapter
, 0);
3644 reinit_completion(&adapter
->init_done
);
3645 wait_for_completion(&adapter
->init_done
);
3647 if (adapter
->renegotiate
) {
3648 release_sub_crqs(adapter
);
3649 send_cap_queries(adapter
);
3651 reinit_completion(&adapter
->init_done
);
3652 wait_for_completion(&adapter
->init_done
);
3654 } while (adapter
->renegotiate
);
3656 /* if init_sub_crqs is partially successful, retry */
3657 while (!adapter
->tx_scrq
|| !adapter
->rx_scrq
) {
3658 init_sub_crqs(adapter
, 1);
3660 reinit_completion(&adapter
->init_done
);
3661 wait_for_completion(&adapter
->init_done
);
3664 netdev
->real_num_tx_queues
= adapter
->req_tx_queues
;
3666 rc
= register_netdev(netdev
);
3668 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
3671 dev_info(&dev
->dev
, "ibmvnic registered\n");
3676 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3677 debugfs_remove_recursive(adapter
->debugfs_dir
);
3679 ibmvnic_release_crq_queue(adapter
);
3681 free_netdev(netdev
);
3685 static int ibmvnic_remove(struct vio_dev
*dev
)
3687 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
3688 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3690 unregister_netdev(netdev
);
3692 release_sub_crqs(adapter
);
3694 ibmvnic_release_crq_queue(adapter
);
3696 if (adapter
->debugfs_dir
&& !IS_ERR(adapter
->debugfs_dir
))
3697 debugfs_remove_recursive(adapter
->debugfs_dir
);
3699 if (adapter
->ras_comps
)
3700 dma_free_coherent(&dev
->dev
,
3701 adapter
->ras_comp_num
*
3702 sizeof(struct ibmvnic_fw_component
),
3703 adapter
->ras_comps
, adapter
->ras_comps_tok
);
3705 kfree(adapter
->ras_comp_int
);
3707 free_netdev(netdev
);
3708 dev_set_drvdata(&dev
->dev
, NULL
);
3713 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
3715 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
3716 struct ibmvnic_adapter
*adapter
;
3717 struct iommu_table
*tbl
;
3718 unsigned long ret
= 0;
3721 tbl
= get_iommu_table_base(&vdev
->dev
);
3723 /* netdev inits at probe time along with the structures we need below*/
3725 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
3727 adapter
= netdev_priv(netdev
);
3729 ret
+= PAGE_SIZE
; /* the crq message queue */
3730 ret
+= adapter
->bounce_buffer_size
;
3731 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
3733 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
3734 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
3736 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
3738 ret
+= adapter
->rx_pool
[i
].size
*
3739 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
3744 static int ibmvnic_resume(struct device
*dev
)
3746 struct net_device
*netdev
= dev_get_drvdata(dev
);
3747 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
3750 /* kick the interrupt handlers just in case we lost an interrupt */
3751 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
3752 ibmvnic_interrupt_rx(adapter
->rx_scrq
[i
]->irq
,
3753 adapter
->rx_scrq
[i
]);
3758 static struct vio_device_id ibmvnic_device_table
[] = {
3759 {"network", "IBM,vnic"},
3762 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
3764 static const struct dev_pm_ops ibmvnic_pm_ops
= {
3765 .resume
= ibmvnic_resume
3768 static struct vio_driver ibmvnic_driver
= {
3769 .id_table
= ibmvnic_device_table
,
3770 .probe
= ibmvnic_probe
,
3771 .remove
= ibmvnic_remove
,
3772 .get_desired_dma
= ibmvnic_get_desired_dma
,
3773 .name
= ibmvnic_driver_name
,
3774 .pm
= &ibmvnic_pm_ops
,
3777 /* module functions */
3778 static int __init
ibmvnic_module_init(void)
3780 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
3781 IBMVNIC_DRIVER_VERSION
);
3783 return vio_register_driver(&ibmvnic_driver
);
3786 static void __exit
ibmvnic_module_exit(void)
3788 vio_unregister_driver(&ibmvnic_driver
);
3791 module_init(ibmvnic_module_init
);
3792 module_exit(ibmvnic_module_exit
);