2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
11 #include <linux/ipv6.h>
12 #include <net/checksum.h>
13 #include <linux/printk.h>
17 #define QLCNIC_TX_ETHER_PKT 0x01
18 #define QLCNIC_TX_TCP_PKT 0x02
19 #define QLCNIC_TX_UDP_PKT 0x03
20 #define QLCNIC_TX_IP_PKT 0x04
21 #define QLCNIC_TX_TCP_LSO 0x05
22 #define QLCNIC_TX_TCP_LSO6 0x06
23 #define QLCNIC_TX_ENCAP_PKT 0x07
24 #define QLCNIC_TX_ENCAP_LSO 0x08
25 #define QLCNIC_TX_TCPV6_PKT 0x0b
26 #define QLCNIC_TX_UDPV6_PKT 0x0c
28 #define QLCNIC_FLAGS_VLAN_TAGGED 0x10
29 #define QLCNIC_FLAGS_VLAN_OOB 0x40
31 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
32 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
33 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
34 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
35 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
36 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
38 #define qlcnic_set_tx_port(_desc, _port) \
39 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
41 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
42 ((_desc)->flags_opcode |= \
43 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
45 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
46 ((_desc)->nfrags__length = \
47 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
49 /* owner bits of status_desc */
50 #define STATUS_OWNER_HOST (0x1ULL << 56)
51 #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
54 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
55 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
56 53-55 desc_cnt, 56-57 owner, 58-63 opcode
58 #define qlcnic_get_sts_port(sts_data) \
60 #define qlcnic_get_sts_status(sts_data) \
61 (((sts_data) >> 4) & 0x0F)
62 #define qlcnic_get_sts_type(sts_data) \
63 (((sts_data) >> 8) & 0x0F)
64 #define qlcnic_get_sts_totallength(sts_data) \
65 (((sts_data) >> 12) & 0xFFFF)
66 #define qlcnic_get_sts_refhandle(sts_data) \
67 (((sts_data) >> 28) & 0xFFFF)
68 #define qlcnic_get_sts_prot(sts_data) \
69 (((sts_data) >> 44) & 0x0F)
70 #define qlcnic_get_sts_pkt_offset(sts_data) \
71 (((sts_data) >> 48) & 0x1F)
72 #define qlcnic_get_sts_desc_cnt(sts_data) \
73 (((sts_data) >> 53) & 0x7)
74 #define qlcnic_get_sts_opcode(sts_data) \
75 (((sts_data) >> 58) & 0x03F)
77 #define qlcnic_get_lro_sts_refhandle(sts_data) \
78 ((sts_data) & 0x07FFF)
79 #define qlcnic_get_lro_sts_length(sts_data) \
80 (((sts_data) >> 16) & 0x0FFFF)
81 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
82 (((sts_data) >> 32) & 0x0FF)
83 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
84 (((sts_data) >> 40) & 0x0FF)
85 #define qlcnic_get_lro_sts_timestamp(sts_data) \
86 (((sts_data) >> 48) & 0x1)
87 #define qlcnic_get_lro_sts_type(sts_data) \
88 (((sts_data) >> 49) & 0x7)
89 #define qlcnic_get_lro_sts_push_flag(sts_data) \
90 (((sts_data) >> 52) & 0x1)
91 #define qlcnic_get_lro_sts_seq_number(sts_data) \
92 ((sts_data) & 0x0FFFFFFFF)
93 #define qlcnic_get_lro_sts_mss(sts_data1) \
94 ((sts_data1 >> 32) & 0x0FFFF)
96 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
98 /* opcode field in status_desc */
99 #define QLCNIC_SYN_OFFLOAD 0x03
100 #define QLCNIC_RXPKT_DESC 0x04
101 #define QLCNIC_OLD_RXPKT_DESC 0x3f
102 #define QLCNIC_RESPONSE_DESC 0x05
103 #define QLCNIC_LRO_DESC 0x12
105 #define QLCNIC_TX_POLL_BUDGET 128
106 #define QLCNIC_TCP_HDR_SIZE 20
107 #define QLCNIC_TCP_TS_OPTION_SIZE 12
108 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
109 #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
111 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
113 /* for status field in status_desc */
114 #define STATUS_CKSUM_LOOP 0
115 #define STATUS_CKSUM_OK 2
117 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
118 #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
119 #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
120 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
121 #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
122 #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
123 #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
124 #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
125 #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
126 #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
127 #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
128 #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
129 #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
131 static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
134 static struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*,
135 struct qlcnic_host_rds_ring
*,
138 static inline u8
qlcnic_mac_hash(u64 mac
, u16 vlan
)
140 return (u8
)((mac
& 0xff) ^ ((mac
>> 40) & 0xff) ^ (vlan
& 0xff));
143 static inline u32
qlcnic_get_ref_handle(struct qlcnic_adapter
*adapter
,
144 u16 handle
, u8 ring_id
)
146 if (qlcnic_83xx_check(adapter
))
147 return handle
| (ring_id
<< 15);
152 static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data
)
154 return (qlcnic_get_sts_status(sts_data
) == STATUS_CKSUM_LOOP
) ? 1 : 0;
157 static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter
*adapter
,
158 struct qlcnic_filter
*fil
,
159 void *addr
, u16 vlan_id
)
164 op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
165 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
169 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
170 ret
= qlcnic_sre_macaddr_change(adapter
, addr
, vlan_id
, op
);
172 hlist_del(&fil
->fnode
);
173 adapter
->rx_fhash
.fnum
--;
177 static struct qlcnic_filter
*qlcnic_find_mac_filter(struct hlist_head
*head
,
178 void *addr
, u16 vlan_id
)
180 struct qlcnic_filter
*tmp_fil
= NULL
;
181 struct hlist_node
*n
;
183 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
184 if (ether_addr_equal(tmp_fil
->faddr
, addr
) &&
185 tmp_fil
->vlan_id
== vlan_id
)
192 static void qlcnic_add_lb_filter(struct qlcnic_adapter
*adapter
,
193 struct sk_buff
*skb
, int loopback_pkt
, u16 vlan_id
)
195 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
196 struct qlcnic_filter
*fil
, *tmp_fil
;
197 struct hlist_head
*head
;
203 if (!qlcnic_sriov_pf_check(adapter
) || (vlan_id
== 0xffff))
206 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
207 hindex
= qlcnic_mac_hash(src_addr
, vlan_id
) &
208 (adapter
->fhash
.fbucket_size
- 1);
211 if (adapter
->rx_fhash
.fnum
>= adapter
->rx_fhash
.fmax
)
214 head
= &(adapter
->rx_fhash
.fhead
[hindex
]);
216 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
218 time
= tmp_fil
->ftime
;
219 if (time_after(jiffies
, QLCNIC_READD_AGE
* HZ
+ time
))
220 tmp_fil
->ftime
= jiffies
;
224 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
228 fil
->ftime
= jiffies
;
229 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
230 fil
->vlan_id
= vlan_id
;
231 spin_lock(&adapter
->rx_mac_learn_lock
);
232 hlist_add_head(&(fil
->fnode
), head
);
233 adapter
->rx_fhash
.fnum
++;
234 spin_unlock(&adapter
->rx_mac_learn_lock
);
236 head
= &adapter
->fhash
.fhead
[hindex
];
238 spin_lock(&adapter
->mac_learn_lock
);
240 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
242 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
243 ret
= qlcnic_sre_macaddr_change(adapter
,
247 hlist_del(&tmp_fil
->fnode
);
248 adapter
->fhash
.fnum
--;
251 spin_unlock(&adapter
->mac_learn_lock
);
256 spin_unlock(&adapter
->mac_learn_lock
);
258 head
= &adapter
->rx_fhash
.fhead
[hindex
];
260 spin_lock(&adapter
->rx_mac_learn_lock
);
262 tmp_fil
= qlcnic_find_mac_filter(head
, &src_addr
, vlan_id
);
264 qlcnic_delete_rx_list_mac(adapter
, tmp_fil
, &src_addr
,
267 spin_unlock(&adapter
->rx_mac_learn_lock
);
271 void qlcnic_82xx_change_filter(struct qlcnic_adapter
*adapter
, u64
*uaddr
,
274 struct cmd_desc_type0
*hwdesc
;
275 struct qlcnic_nic_req
*req
;
276 struct qlcnic_mac_req
*mac_req
;
277 struct qlcnic_vlan_req
*vlan_req
;
278 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
282 producer
= tx_ring
->producer
;
283 hwdesc
= &tx_ring
->desc_head
[tx_ring
->producer
];
285 req
= (struct qlcnic_nic_req
*)hwdesc
;
286 memset(req
, 0, sizeof(struct qlcnic_nic_req
));
287 req
->qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
289 word
= QLCNIC_MAC_EVENT
| ((u64
)(adapter
->portnum
) << 16);
290 req
->req_hdr
= cpu_to_le64(word
);
292 mac_req
= (struct qlcnic_mac_req
*)&(req
->words
[0]);
293 mac_req
->op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
294 memcpy(mac_req
->mac_addr
, uaddr
, ETH_ALEN
);
296 vlan_req
= (struct qlcnic_vlan_req
*)&req
->words
[1];
297 vlan_req
->vlan_id
= cpu_to_le16(vlan_id
);
299 tx_ring
->producer
= get_next_index(producer
, tx_ring
->num_desc
);
303 static void qlcnic_send_filter(struct qlcnic_adapter
*adapter
,
304 struct cmd_desc_type0
*first_desc
,
307 struct vlan_ethhdr
*vh
= (struct vlan_ethhdr
*)(skb
->data
);
308 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
309 u16 protocol
= ntohs(skb
->protocol
);
310 struct qlcnic_filter
*fil
, *tmp_fil
;
311 struct hlist_head
*head
;
312 struct hlist_node
*n
;
317 if (ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
320 if (adapter
->flags
& QLCNIC_VLAN_FILTERING
) {
321 if (protocol
== ETH_P_8021Q
) {
322 vh
= (struct vlan_ethhdr
*)skb
->data
;
323 vlan_id
= ntohs(vh
->h_vlan_TCI
);
324 } else if (skb_vlan_tag_present(skb
)) {
325 vlan_id
= skb_vlan_tag_get(skb
);
329 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
330 hval
= qlcnic_mac_hash(src_addr
, vlan_id
);
331 hindex
= hval
& (adapter
->fhash
.fbucket_size
- 1);
332 head
= &(adapter
->fhash
.fhead
[hindex
]);
334 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
335 if (ether_addr_equal(tmp_fil
->faddr
, (u8
*)&src_addr
) &&
336 tmp_fil
->vlan_id
== vlan_id
) {
337 if (jiffies
> (QLCNIC_READD_AGE
* HZ
+ tmp_fil
->ftime
))
338 qlcnic_change_filter(adapter
, &src_addr
,
340 tmp_fil
->ftime
= jiffies
;
345 if (unlikely(adapter
->fhash
.fnum
>= adapter
->fhash
.fmax
)) {
346 adapter
->stats
.mac_filter_limit_overrun
++;
350 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
354 qlcnic_change_filter(adapter
, &src_addr
, vlan_id
);
355 fil
->ftime
= jiffies
;
356 fil
->vlan_id
= vlan_id
;
357 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
358 spin_lock(&adapter
->mac_learn_lock
);
359 hlist_add_head(&(fil
->fnode
), head
);
360 adapter
->fhash
.fnum
++;
361 spin_unlock(&adapter
->mac_learn_lock
);
364 #define QLCNIC_ENCAP_VXLAN_PKT BIT_0
365 #define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
366 #define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
367 #define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
368 #define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
369 #define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
371 static int qlcnic_tx_encap_pkt(struct qlcnic_adapter
*adapter
,
372 struct cmd_desc_type0
*first_desc
,
374 struct qlcnic_host_tx_ring
*tx_ring
)
376 u8 opcode
= 0, inner_hdr_len
= 0, outer_hdr_len
= 0, total_hdr_len
= 0;
377 int copied
, copy_len
, descr_size
;
378 u32 producer
= tx_ring
->producer
;
379 struct cmd_desc_type0
*hwdesc
;
380 u16 flags
= 0, encap_descr
= 0;
382 opcode
= QLCNIC_TX_ETHER_PKT
;
383 encap_descr
= QLCNIC_ENCAP_VXLAN_PKT
;
385 if (skb_is_gso(skb
)) {
386 inner_hdr_len
= skb_inner_transport_header(skb
) +
387 inner_tcp_hdrlen(skb
) -
388 skb_inner_mac_header(skb
);
390 /* VXLAN header size = 8 */
391 outer_hdr_len
= skb_transport_offset(skb
) + 8 +
392 sizeof(struct udphdr
);
393 first_desc
->outer_hdr_length
= outer_hdr_len
;
394 total_hdr_len
= inner_hdr_len
+ outer_hdr_len
;
395 encap_descr
|= QLCNIC_ENCAP_DO_L3_CSUM
|
396 QLCNIC_ENCAP_DO_L4_CSUM
;
397 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
398 first_desc
->hdr_length
= inner_hdr_len
;
400 /* Copy inner and outer headers in Tx descriptor(s)
401 * If total_hdr_len > cmd_desc_type0, use multiple
405 descr_size
= (int)sizeof(struct cmd_desc_type0
);
406 while (copied
< total_hdr_len
) {
407 copy_len
= min(descr_size
, (total_hdr_len
- copied
));
408 hwdesc
= &tx_ring
->desc_head
[producer
];
409 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
410 skb_copy_from_linear_data_offset(skb
, copied
,
414 producer
= get_next_index(producer
, tx_ring
->num_desc
);
417 tx_ring
->producer
= producer
;
419 /* Make sure updated tx_ring->producer is visible
420 * for qlcnic_tx_avail()
423 adapter
->stats
.encap_lso_frames
++;
425 opcode
= QLCNIC_TX_ENCAP_LSO
;
426 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
427 if (inner_ip_hdr(skb
)->version
== 6) {
428 if (inner_ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
429 encap_descr
|= QLCNIC_ENCAP_INNER_L4_UDP
;
431 if (inner_ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
432 encap_descr
|= QLCNIC_ENCAP_INNER_L4_UDP
;
435 adapter
->stats
.encap_tx_csummed
++;
436 opcode
= QLCNIC_TX_ENCAP_PKT
;
439 /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
440 if (ip_hdr(skb
)->version
== 6)
441 encap_descr
|= QLCNIC_ENCAP_OUTER_L3_IP6
;
443 /* outer IP header's size in 32bit words size*/
444 encap_descr
|= (skb_network_header_len(skb
) >> 2) << 6;
446 /* outer IP header offset */
447 encap_descr
|= skb_network_offset(skb
) << 10;
448 first_desc
->encap_descr
= cpu_to_le16(encap_descr
);
450 first_desc
->tcp_hdr_offset
= skb_inner_transport_header(skb
) -
452 first_desc
->ip_hdr_offset
= skb_inner_network_offset(skb
);
454 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
459 static int qlcnic_tx_pkt(struct qlcnic_adapter
*adapter
,
460 struct cmd_desc_type0
*first_desc
, struct sk_buff
*skb
,
461 struct qlcnic_host_tx_ring
*tx_ring
)
463 u8 l4proto
, opcode
= 0, hdr_len
= 0;
464 u16 flags
= 0, vlan_tci
= 0;
465 int copied
, offset
, copy_len
, size
;
466 struct cmd_desc_type0
*hwdesc
;
467 struct vlan_ethhdr
*vh
;
468 u16 protocol
= ntohs(skb
->protocol
);
469 u32 producer
= tx_ring
->producer
;
471 if (protocol
== ETH_P_8021Q
) {
472 vh
= (struct vlan_ethhdr
*)skb
->data
;
473 flags
= QLCNIC_FLAGS_VLAN_TAGGED
;
474 vlan_tci
= ntohs(vh
->h_vlan_TCI
);
475 protocol
= ntohs(vh
->h_vlan_encapsulated_proto
);
476 } else if (skb_vlan_tag_present(skb
)) {
477 flags
= QLCNIC_FLAGS_VLAN_OOB
;
478 vlan_tci
= skb_vlan_tag_get(skb
);
480 if (unlikely(adapter
->tx_pvid
)) {
481 if (vlan_tci
&& !(adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
483 if (vlan_tci
&& (adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
486 flags
= QLCNIC_FLAGS_VLAN_OOB
;
487 vlan_tci
= adapter
->tx_pvid
;
490 qlcnic_set_tx_vlan_tci(first_desc
, vlan_tci
);
491 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
493 if (*(skb
->data
) & BIT_0
) {
495 memcpy(&first_desc
->eth_addr
, skb
->data
, ETH_ALEN
);
497 opcode
= QLCNIC_TX_ETHER_PKT
;
498 if (skb_is_gso(skb
)) {
499 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
500 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
501 first_desc
->hdr_length
= hdr_len
;
502 opcode
= (protocol
== ETH_P_IPV6
) ? QLCNIC_TX_TCP_LSO6
:
505 /* For LSO, we need to copy the MAC/IP/TCP headers into
506 * the descriptor ring */
510 if (flags
& QLCNIC_FLAGS_VLAN_OOB
) {
511 first_desc
->hdr_length
+= VLAN_HLEN
;
512 first_desc
->tcp_hdr_offset
= VLAN_HLEN
;
513 first_desc
->ip_hdr_offset
= VLAN_HLEN
;
515 /* Only in case of TSO on vlan device */
516 flags
|= QLCNIC_FLAGS_VLAN_TAGGED
;
518 /* Create a TSO vlan header template for firmware */
519 hwdesc
= &tx_ring
->desc_head
[producer
];
520 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
522 copy_len
= min((int)sizeof(struct cmd_desc_type0
) -
523 offset
, hdr_len
+ VLAN_HLEN
);
525 vh
= (struct vlan_ethhdr
*)((char *) hwdesc
+ 2);
526 skb_copy_from_linear_data(skb
, vh
, 12);
527 vh
->h_vlan_proto
= htons(ETH_P_8021Q
);
528 vh
->h_vlan_TCI
= htons(vlan_tci
);
530 skb_copy_from_linear_data_offset(skb
, 12,
533 copied
= copy_len
- VLAN_HLEN
;
535 producer
= get_next_index(producer
, tx_ring
->num_desc
);
538 while (copied
< hdr_len
) {
539 size
= (int)sizeof(struct cmd_desc_type0
) - offset
;
540 copy_len
= min(size
, (hdr_len
- copied
));
541 hwdesc
= &tx_ring
->desc_head
[producer
];
542 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
543 skb_copy_from_linear_data_offset(skb
, copied
,
548 producer
= get_next_index(producer
, tx_ring
->num_desc
);
551 tx_ring
->producer
= producer
;
553 adapter
->stats
.lso_frames
++;
555 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
556 if (protocol
== ETH_P_IP
) {
557 l4proto
= ip_hdr(skb
)->protocol
;
559 if (l4proto
== IPPROTO_TCP
)
560 opcode
= QLCNIC_TX_TCP_PKT
;
561 else if (l4proto
== IPPROTO_UDP
)
562 opcode
= QLCNIC_TX_UDP_PKT
;
563 } else if (protocol
== ETH_P_IPV6
) {
564 l4proto
= ipv6_hdr(skb
)->nexthdr
;
566 if (l4proto
== IPPROTO_TCP
)
567 opcode
= QLCNIC_TX_TCPV6_PKT
;
568 else if (l4proto
== IPPROTO_UDP
)
569 opcode
= QLCNIC_TX_UDPV6_PKT
;
572 first_desc
->tcp_hdr_offset
+= skb_transport_offset(skb
);
573 first_desc
->ip_hdr_offset
+= skb_network_offset(skb
);
574 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
579 static int qlcnic_map_tx_skb(struct pci_dev
*pdev
, struct sk_buff
*skb
,
580 struct qlcnic_cmd_buffer
*pbuf
)
582 struct qlcnic_skb_frag
*nf
;
583 struct skb_frag_struct
*frag
;
587 nr_frags
= skb_shinfo(skb
)->nr_frags
;
588 nf
= &pbuf
->frag_array
[0];
590 map
= pci_map_single(pdev
, skb
->data
, skb_headlen(skb
),
592 if (pci_dma_mapping_error(pdev
, map
))
596 nf
->length
= skb_headlen(skb
);
598 for (i
= 0; i
< nr_frags
; i
++) {
599 frag
= &skb_shinfo(skb
)->frags
[i
];
600 nf
= &pbuf
->frag_array
[i
+1];
601 map
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, skb_frag_size(frag
),
603 if (dma_mapping_error(&pdev
->dev
, map
))
607 nf
->length
= skb_frag_size(frag
);
614 nf
= &pbuf
->frag_array
[i
+1];
615 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
618 nf
= &pbuf
->frag_array
[0];
619 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
625 static void qlcnic_unmap_buffers(struct pci_dev
*pdev
, struct sk_buff
*skb
,
626 struct qlcnic_cmd_buffer
*pbuf
)
628 struct qlcnic_skb_frag
*nf
= &pbuf
->frag_array
[0];
629 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
631 for (i
= 0; i
< nr_frags
; i
++) {
632 nf
= &pbuf
->frag_array
[i
+1];
633 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
636 nf
= &pbuf
->frag_array
[0];
637 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
641 static inline void qlcnic_clear_cmddesc(u64
*desc
)
648 netdev_tx_t
qlcnic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
650 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
651 struct qlcnic_host_tx_ring
*tx_ring
;
652 struct qlcnic_cmd_buffer
*pbuf
;
653 struct qlcnic_skb_frag
*buffrag
;
654 struct cmd_desc_type0
*hwdesc
, *first_desc
;
655 struct pci_dev
*pdev
;
657 int i
, k
, frag_count
, delta
= 0;
658 u32 producer
, num_txd
;
660 bool l4_is_udp
= false;
662 if (!test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
663 netif_tx_stop_all_queues(netdev
);
664 return NETDEV_TX_BUSY
;
667 if (adapter
->flags
& QLCNIC_MACSPOOF
) {
668 phdr
= (struct ethhdr
*)skb
->data
;
669 if (!ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
673 tx_ring
= &adapter
->tx_ring
[skb_get_queue_mapping(skb
)];
674 num_txd
= tx_ring
->num_desc
;
676 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
678 /* 14 frags supported for normal packet and
679 * 32 frags supported for TSO packet
681 if (!skb_is_gso(skb
) && frag_count
> QLCNIC_MAX_FRAGS_PER_TX
) {
682 for (i
= 0; i
< (frag_count
- QLCNIC_MAX_FRAGS_PER_TX
); i
++)
683 delta
+= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
685 if (!__pskb_pull_tail(skb
, delta
))
688 frag_count
= 1 + skb_shinfo(skb
)->nr_frags
;
691 if (unlikely(qlcnic_tx_avail(tx_ring
) <= TX_STOP_THRESH
)) {
692 netif_tx_stop_queue(tx_ring
->txq
);
693 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
694 netif_tx_start_queue(tx_ring
->txq
);
696 tx_ring
->tx_stats
.xmit_off
++;
697 return NETDEV_TX_BUSY
;
701 producer
= tx_ring
->producer
;
702 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
703 pdev
= adapter
->pdev
;
704 first_desc
= &tx_ring
->desc_head
[producer
];
705 hwdesc
= &tx_ring
->desc_head
[producer
];
706 qlcnic_clear_cmddesc((u64
*)hwdesc
);
708 if (qlcnic_map_tx_skb(pdev
, skb
, pbuf
)) {
709 adapter
->stats
.tx_dma_map_error
++;
714 pbuf
->frag_count
= frag_count
;
716 qlcnic_set_tx_frags_len(first_desc
, frag_count
, skb
->len
);
717 qlcnic_set_tx_port(first_desc
, adapter
->portnum
);
719 for (i
= 0; i
< frag_count
; i
++) {
722 if ((k
== 0) && (i
> 0)) {
723 /* move to next desc.*/
724 producer
= get_next_index(producer
, num_txd
);
725 hwdesc
= &tx_ring
->desc_head
[producer
];
726 qlcnic_clear_cmddesc((u64
*)hwdesc
);
727 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
730 buffrag
= &pbuf
->frag_array
[i
];
731 hwdesc
->buffer_length
[k
] = cpu_to_le16(buffrag
->length
);
734 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
737 hwdesc
->addr_buffer2
= cpu_to_le64(buffrag
->dma
);
740 hwdesc
->addr_buffer3
= cpu_to_le64(buffrag
->dma
);
743 hwdesc
->addr_buffer4
= cpu_to_le64(buffrag
->dma
);
748 tx_ring
->producer
= get_next_index(producer
, num_txd
);
751 protocol
= ntohs(skb
->protocol
);
752 if (protocol
== ETH_P_IP
)
753 l4_is_udp
= ip_hdr(skb
)->protocol
== IPPROTO_UDP
;
754 else if (protocol
== ETH_P_IPV6
)
755 l4_is_udp
= ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
;
757 /* Check if it is a VXLAN packet */
758 if (!skb
->encapsulation
|| !l4_is_udp
||
759 !qlcnic_encap_tx_offload(adapter
)) {
760 if (unlikely(qlcnic_tx_pkt(adapter
, first_desc
, skb
,
764 if (unlikely(qlcnic_tx_encap_pkt(adapter
, first_desc
,
769 if (adapter
->drv_mac_learn
)
770 qlcnic_send_filter(adapter
, first_desc
, skb
);
772 tx_ring
->tx_stats
.tx_bytes
+= skb
->len
;
773 tx_ring
->tx_stats
.xmit_called
++;
775 /* Ensure writes are complete before HW fetches Tx descriptors */
777 qlcnic_update_cmd_producer(tx_ring
);
782 qlcnic_unmap_buffers(pdev
, skb
, pbuf
);
784 adapter
->stats
.txdropped
++;
785 dev_kfree_skb_any(skb
);
789 void qlcnic_advert_link_change(struct qlcnic_adapter
*adapter
, int linkup
)
791 struct net_device
*netdev
= adapter
->netdev
;
793 if (adapter
->ahw
->linkup
&& !linkup
) {
794 netdev_info(netdev
, "NIC Link is down\n");
795 adapter
->ahw
->linkup
= 0;
796 netif_carrier_off(netdev
);
797 } else if (!adapter
->ahw
->linkup
&& linkup
) {
798 adapter
->ahw
->linkup
= 1;
800 /* Do not advertise Link up to the stack if device
801 * is in loopback mode
803 if (qlcnic_83xx_check(adapter
) && adapter
->ahw
->lb_mode
) {
804 netdev_info(netdev
, "NIC Link is up for loopback test\n");
808 netdev_info(netdev
, "NIC Link is up\n");
809 netif_carrier_on(netdev
);
813 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter
*adapter
,
814 struct qlcnic_host_rds_ring
*rds_ring
,
815 struct qlcnic_rx_buffer
*buffer
)
819 struct pci_dev
*pdev
= adapter
->pdev
;
821 skb
= netdev_alloc_skb(adapter
->netdev
, rds_ring
->skb_size
);
823 adapter
->stats
.skb_alloc_failure
++;
827 skb_reserve(skb
, NET_IP_ALIGN
);
828 dma
= pci_map_single(pdev
, skb
->data
,
829 rds_ring
->dma_size
, PCI_DMA_FROMDEVICE
);
831 if (pci_dma_mapping_error(pdev
, dma
)) {
832 adapter
->stats
.rx_dma_map_error
++;
833 dev_kfree_skb_any(skb
);
843 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter
*adapter
,
844 struct qlcnic_host_rds_ring
*rds_ring
,
847 struct rcv_desc
*pdesc
;
848 struct qlcnic_rx_buffer
*buffer
;
850 uint32_t producer
, handle
;
851 struct list_head
*head
;
853 if (!spin_trylock(&rds_ring
->lock
))
856 producer
= rds_ring
->producer
;
857 head
= &rds_ring
->free_list
;
858 while (!list_empty(head
)) {
859 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
862 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
866 list_del(&buffer
->list
);
868 /* make a rcv descriptor */
869 pdesc
= &rds_ring
->desc_head
[producer
];
870 handle
= qlcnic_get_ref_handle(adapter
,
871 buffer
->ref_handle
, ring_id
);
872 pdesc
->reference_handle
= cpu_to_le16(handle
);
873 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
874 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
875 producer
= get_next_index(producer
, rds_ring
->num_desc
);
878 rds_ring
->producer
= producer
;
879 writel((producer
- 1) & (rds_ring
->num_desc
- 1),
880 rds_ring
->crb_rcv_producer
);
882 spin_unlock(&rds_ring
->lock
);
885 static int qlcnic_process_cmd_ring(struct qlcnic_adapter
*adapter
,
886 struct qlcnic_host_tx_ring
*tx_ring
,
889 u32 sw_consumer
, hw_consumer
;
890 int i
, done
, count
= 0;
891 struct qlcnic_cmd_buffer
*buffer
;
892 struct pci_dev
*pdev
= adapter
->pdev
;
893 struct net_device
*netdev
= adapter
->netdev
;
894 struct qlcnic_skb_frag
*frag
;
896 if (!spin_trylock(&tx_ring
->tx_clean_lock
))
899 sw_consumer
= tx_ring
->sw_consumer
;
900 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
902 while (sw_consumer
!= hw_consumer
) {
903 buffer
= &tx_ring
->cmd_buf_arr
[sw_consumer
];
905 frag
= &buffer
->frag_array
[0];
906 pci_unmap_single(pdev
, frag
->dma
, frag
->length
,
909 for (i
= 1; i
< buffer
->frag_count
; i
++) {
911 pci_unmap_page(pdev
, frag
->dma
, frag
->length
,
915 tx_ring
->tx_stats
.xmit_finished
++;
916 dev_kfree_skb_any(buffer
->skb
);
920 sw_consumer
= get_next_index(sw_consumer
, tx_ring
->num_desc
);
921 if (++count
>= budget
)
925 tx_ring
->sw_consumer
= sw_consumer
;
927 if (count
&& netif_running(netdev
)) {
929 if (netif_tx_queue_stopped(tx_ring
->txq
) &&
930 netif_carrier_ok(netdev
)) {
931 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
932 netif_tx_wake_queue(tx_ring
->txq
);
933 tx_ring
->tx_stats
.xmit_on
++;
936 adapter
->tx_timeo_cnt
= 0;
939 * If everything is freed up to consumer then check if the ring is full
940 * If the ring is full then check if more needs to be freed and
941 * schedule the call back again.
943 * This happens when there are 2 CPUs. One could be freeing and the
944 * other filling it. If the ring is full when we get out of here and
945 * the card has already interrupted the host then the host can miss the
948 * There is still a possible race condition and the host could miss an
949 * interrupt. The card has to take care of this.
951 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
952 done
= (sw_consumer
== hw_consumer
);
954 spin_unlock(&tx_ring
->tx_clean_lock
);
959 static int qlcnic_poll(struct napi_struct
*napi
, int budget
)
961 int tx_complete
, work_done
;
962 struct qlcnic_host_sds_ring
*sds_ring
;
963 struct qlcnic_adapter
*adapter
;
964 struct qlcnic_host_tx_ring
*tx_ring
;
966 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
967 adapter
= sds_ring
->adapter
;
968 tx_ring
= sds_ring
->tx_ring
;
970 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
,
972 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
974 /* Check if we need a repoll */
978 if (work_done
< budget
) {
979 napi_complete(&sds_ring
->napi
);
980 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
981 qlcnic_enable_sds_intr(adapter
, sds_ring
);
982 qlcnic_enable_tx_intr(adapter
, tx_ring
);
989 static int qlcnic_tx_poll(struct napi_struct
*napi
, int budget
)
991 struct qlcnic_host_tx_ring
*tx_ring
;
992 struct qlcnic_adapter
*adapter
;
995 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
996 adapter
= tx_ring
->adapter
;
998 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1000 napi_complete(&tx_ring
->napi
);
1001 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1002 qlcnic_enable_tx_intr(adapter
, tx_ring
);
1004 /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
1011 static int qlcnic_rx_poll(struct napi_struct
*napi
, int budget
)
1013 struct qlcnic_host_sds_ring
*sds_ring
;
1014 struct qlcnic_adapter
*adapter
;
1017 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1018 adapter
= sds_ring
->adapter
;
1020 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
1022 if (work_done
< budget
) {
1023 napi_complete(&sds_ring
->napi
);
1024 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1025 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1031 static void qlcnic_handle_linkevent(struct qlcnic_adapter
*adapter
,
1032 struct qlcnic_fw_msg
*msg
)
1035 u16 cable_len
, link_speed
;
1036 u8 link_status
, module
, duplex
, autoneg
, lb_status
= 0;
1037 struct net_device
*netdev
= adapter
->netdev
;
1039 adapter
->ahw
->has_link_events
= 1;
1041 cable_OUI
= msg
->body
[1] & 0xffffffff;
1042 cable_len
= (msg
->body
[1] >> 32) & 0xffff;
1043 link_speed
= (msg
->body
[1] >> 48) & 0xffff;
1045 link_status
= msg
->body
[2] & 0xff;
1046 duplex
= (msg
->body
[2] >> 16) & 0xff;
1047 autoneg
= (msg
->body
[2] >> 24) & 0xff;
1048 lb_status
= (msg
->body
[2] >> 32) & 0x3;
1050 module
= (msg
->body
[2] >> 8) & 0xff;
1051 if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE
)
1052 dev_info(&netdev
->dev
,
1053 "unsupported cable: OUI 0x%x, length %d\n",
1054 cable_OUI
, cable_len
);
1055 else if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN
)
1056 dev_info(&netdev
->dev
, "unsupported cable length %d\n",
1059 if (!link_status
&& (lb_status
== QLCNIC_ILB_MODE
||
1060 lb_status
== QLCNIC_ELB_MODE
))
1061 adapter
->ahw
->loopback_state
|= QLCNIC_LINKEVENT
;
1063 qlcnic_advert_link_change(adapter
, link_status
);
1065 if (duplex
== LINKEVENT_FULL_DUPLEX
)
1066 adapter
->ahw
->link_duplex
= DUPLEX_FULL
;
1068 adapter
->ahw
->link_duplex
= DUPLEX_HALF
;
1070 adapter
->ahw
->module_type
= module
;
1071 adapter
->ahw
->link_autoneg
= autoneg
;
1074 adapter
->ahw
->link_speed
= link_speed
;
1076 adapter
->ahw
->link_speed
= SPEED_UNKNOWN
;
1077 adapter
->ahw
->link_duplex
= DUPLEX_UNKNOWN
;
1081 static void qlcnic_handle_fw_message(int desc_cnt
, int index
,
1082 struct qlcnic_host_sds_ring
*sds_ring
)
1084 struct qlcnic_fw_msg msg
;
1085 struct status_desc
*desc
;
1086 struct qlcnic_adapter
*adapter
;
1088 int i
= 0, opcode
, ret
;
1090 while (desc_cnt
> 0 && i
< 8) {
1091 desc
= &sds_ring
->desc_head
[index
];
1092 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[0]);
1093 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[1]);
1095 index
= get_next_index(index
, sds_ring
->num_desc
);
1099 adapter
= sds_ring
->adapter
;
1100 dev
= &adapter
->pdev
->dev
;
1101 opcode
= qlcnic_get_nic_msg_opcode(msg
.body
[0]);
1104 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE
:
1105 qlcnic_handle_linkevent(adapter
, &msg
);
1107 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK
:
1108 ret
= (u32
)(msg
.body
[1]);
1111 adapter
->ahw
->loopback_state
|= QLCNIC_LB_RESPONSE
;
1114 dev_info(dev
, "loopback already in progress\n");
1115 adapter
->ahw
->diag_cnt
= -EINPROGRESS
;
1118 dev_info(dev
, "loopback cable is not connected\n");
1119 adapter
->ahw
->diag_cnt
= -ENODEV
;
1123 "loopback configure request failed, err %x\n",
1125 adapter
->ahw
->diag_cnt
= -EIO
;
1129 case QLCNIC_C2H_OPCODE_GET_DCB_AEN
:
1130 qlcnic_dcb_aen_handler(adapter
->dcb
, (void *)&msg
);
1137 static struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*adapter
,
1138 struct qlcnic_host_rds_ring
*ring
,
1139 u16 index
, u16 cksum
)
1141 struct qlcnic_rx_buffer
*buffer
;
1142 struct sk_buff
*skb
;
1144 buffer
= &ring
->rx_buf_arr
[index
];
1145 if (unlikely(buffer
->skb
== NULL
)) {
1150 pci_unmap_single(adapter
->pdev
, buffer
->dma
, ring
->dma_size
,
1151 PCI_DMA_FROMDEVICE
);
1154 if (likely((adapter
->netdev
->features
& NETIF_F_RXCSUM
) &&
1155 (cksum
== STATUS_CKSUM_OK
|| cksum
== STATUS_CKSUM_LOOP
))) {
1156 adapter
->stats
.csummed
++;
1157 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1159 skb_checksum_none_assert(skb
);
1168 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter
*adapter
,
1169 struct sk_buff
*skb
, u16
*vlan_tag
)
1171 struct ethhdr
*eth_hdr
;
1173 if (!__vlan_get_tag(skb
, vlan_tag
)) {
1174 eth_hdr
= (struct ethhdr
*)skb
->data
;
1175 memmove(skb
->data
+ VLAN_HLEN
, eth_hdr
, ETH_ALEN
* 2);
1176 skb_pull(skb
, VLAN_HLEN
);
1178 if (!adapter
->rx_pvid
)
1181 if (*vlan_tag
== adapter
->rx_pvid
) {
1182 /* Outer vlan tag. Packet should follow non-vlan path */
1186 if (adapter
->flags
& QLCNIC_TAGGING_ENABLED
)
1192 static struct qlcnic_rx_buffer
*
1193 qlcnic_process_rcv(struct qlcnic_adapter
*adapter
,
1194 struct qlcnic_host_sds_ring
*sds_ring
, int ring
,
1197 struct net_device
*netdev
= adapter
->netdev
;
1198 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1199 struct qlcnic_rx_buffer
*buffer
;
1200 struct sk_buff
*skb
;
1201 struct qlcnic_host_rds_ring
*rds_ring
;
1202 int index
, length
, cksum
, pkt_offset
, is_lb_pkt
;
1203 u16 vid
= 0xffff, t_vid
;
1205 if (unlikely(ring
>= adapter
->max_rds_rings
))
1208 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1210 index
= qlcnic_get_sts_refhandle(sts_data0
);
1211 if (unlikely(index
>= rds_ring
->num_desc
))
1214 buffer
= &rds_ring
->rx_buf_arr
[index
];
1215 length
= qlcnic_get_sts_totallength(sts_data0
);
1216 cksum
= qlcnic_get_sts_status(sts_data0
);
1217 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1219 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1223 if (adapter
->rx_mac_learn
) {
1225 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1226 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1229 if (length
> rds_ring
->skb_size
)
1230 skb_put(skb
, rds_ring
->skb_size
);
1232 skb_put(skb
, length
);
1235 skb_pull(skb
, pkt_offset
);
1237 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1238 adapter
->stats
.rxdropped
++;
1243 skb
->protocol
= eth_type_trans(skb
, netdev
);
1246 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1248 napi_gro_receive(&sds_ring
->napi
, skb
);
1250 adapter
->stats
.rx_pkts
++;
1251 adapter
->stats
.rxbytes
+= length
;
1256 #define QLC_TCP_HDR_SIZE 20
1257 #define QLC_TCP_TS_OPTION_SIZE 12
1258 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1260 static struct qlcnic_rx_buffer
*
1261 qlcnic_process_lro(struct qlcnic_adapter
*adapter
,
1262 int ring
, u64 sts_data0
, u64 sts_data1
)
1264 struct net_device
*netdev
= adapter
->netdev
;
1265 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1266 struct qlcnic_rx_buffer
*buffer
;
1267 struct sk_buff
*skb
;
1268 struct qlcnic_host_rds_ring
*rds_ring
;
1270 struct ipv6hdr
*ipv6h
;
1272 bool push
, timestamp
;
1273 int index
, l2_hdr_offset
, l4_hdr_offset
, is_lb_pkt
;
1274 u16 lro_length
, length
, data_offset
, t_vid
, vid
= 0xffff;
1277 if (unlikely(ring
>= adapter
->max_rds_rings
))
1280 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1282 index
= qlcnic_get_lro_sts_refhandle(sts_data0
);
1283 if (unlikely(index
>= rds_ring
->num_desc
))
1286 buffer
= &rds_ring
->rx_buf_arr
[index
];
1288 timestamp
= qlcnic_get_lro_sts_timestamp(sts_data0
);
1289 lro_length
= qlcnic_get_lro_sts_length(sts_data0
);
1290 l2_hdr_offset
= qlcnic_get_lro_sts_l2_hdr_offset(sts_data0
);
1291 l4_hdr_offset
= qlcnic_get_lro_sts_l4_hdr_offset(sts_data0
);
1292 push
= qlcnic_get_lro_sts_push_flag(sts_data0
);
1293 seq_number
= qlcnic_get_lro_sts_seq_number(sts_data1
);
1295 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1299 if (adapter
->rx_mac_learn
) {
1301 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1302 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, t_vid
);
1306 data_offset
= l4_hdr_offset
+ QLC_TCP_TS_HDR_SIZE
;
1308 data_offset
= l4_hdr_offset
+ QLC_TCP_HDR_SIZE
;
1310 skb_put(skb
, lro_length
+ data_offset
);
1311 skb_pull(skb
, l2_hdr_offset
);
1313 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1314 adapter
->stats
.rxdropped
++;
1319 skb
->protocol
= eth_type_trans(skb
, netdev
);
1321 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1322 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1323 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1324 length
= (th
->doff
<< 2) + lro_length
;
1325 ipv6h
->payload_len
= htons(length
);
1327 iph
= (struct iphdr
*)skb
->data
;
1328 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1329 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1330 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1331 iph
->tot_len
= htons(length
);
1335 th
->seq
= htonl(seq_number
);
1338 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1339 skb_shinfo(skb
)->gso_size
= qlcnic_get_lro_sts_mss(sts_data1
);
1340 if (skb
->protocol
== htons(ETH_P_IPV6
))
1341 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1343 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1347 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1348 netif_receive_skb(skb
);
1350 adapter
->stats
.lro_pkts
++;
1351 adapter
->stats
.lrobytes
+= length
;
1356 static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
, int max
)
1358 struct qlcnic_host_rds_ring
*rds_ring
;
1359 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1360 struct list_head
*cur
;
1361 struct status_desc
*desc
;
1362 struct qlcnic_rx_buffer
*rxbuf
;
1363 int opcode
, desc_cnt
, count
= 0;
1364 u64 sts_data0
, sts_data1
;
1366 u32 consumer
= sds_ring
->consumer
;
1368 while (count
< max
) {
1369 desc
= &sds_ring
->desc_head
[consumer
];
1370 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1372 if (!(sts_data0
& STATUS_OWNER_HOST
))
1375 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1376 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1378 case QLCNIC_RXPKT_DESC
:
1379 case QLCNIC_OLD_RXPKT_DESC
:
1380 case QLCNIC_SYN_OFFLOAD
:
1381 ring
= qlcnic_get_sts_type(sts_data0
);
1382 rxbuf
= qlcnic_process_rcv(adapter
, sds_ring
, ring
,
1385 case QLCNIC_LRO_DESC
:
1386 ring
= qlcnic_get_lro_sts_type(sts_data0
);
1387 sts_data1
= le64_to_cpu(desc
->status_desc_data
[1]);
1388 rxbuf
= qlcnic_process_lro(adapter
, ring
, sts_data0
,
1391 case QLCNIC_RESPONSE_DESC
:
1392 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1396 WARN_ON(desc_cnt
> 1);
1399 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1401 adapter
->stats
.null_rxbuf
++;
1403 for (; desc_cnt
> 0; desc_cnt
--) {
1404 desc
= &sds_ring
->desc_head
[consumer
];
1405 desc
->status_desc_data
[0] = QLCNIC_DESC_OWNER_FW
;
1406 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1411 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1412 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1413 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1414 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1415 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1417 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1419 spin_lock(&rds_ring
->lock
);
1420 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1421 &rds_ring
->free_list
);
1422 spin_unlock(&rds_ring
->lock
);
1425 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1429 sds_ring
->consumer
= consumer
;
1430 writel(consumer
, sds_ring
->crb_sts_consumer
);
1436 void qlcnic_post_rx_buffers(struct qlcnic_adapter
*adapter
,
1437 struct qlcnic_host_rds_ring
*rds_ring
, u8 ring_id
)
1439 struct rcv_desc
*pdesc
;
1440 struct qlcnic_rx_buffer
*buffer
;
1442 u32 producer
, handle
;
1443 struct list_head
*head
;
1445 producer
= rds_ring
->producer
;
1446 head
= &rds_ring
->free_list
;
1448 while (!list_empty(head
)) {
1450 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
1453 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
1458 list_del(&buffer
->list
);
1460 /* make a rcv descriptor */
1461 pdesc
= &rds_ring
->desc_head
[producer
];
1462 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
1463 handle
= qlcnic_get_ref_handle(adapter
, buffer
->ref_handle
,
1465 pdesc
->reference_handle
= cpu_to_le16(handle
);
1466 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
1467 producer
= get_next_index(producer
, rds_ring
->num_desc
);
1471 rds_ring
->producer
= producer
;
1472 writel((producer
-1) & (rds_ring
->num_desc
-1),
1473 rds_ring
->crb_rcv_producer
);
1477 static void dump_skb(struct sk_buff
*skb
, struct qlcnic_adapter
*adapter
)
1479 if (adapter
->ahw
->msg_enable
& NETIF_MSG_DRV
) {
1482 scnprintf(prefix
, sizeof(prefix
), "%s: %s: ",
1483 dev_name(&adapter
->pdev
->dev
), __func__
);
1485 print_hex_dump_debug(prefix
, DUMP_PREFIX_NONE
, 16, 1,
1486 skb
->data
, skb
->len
, true);
1490 static void qlcnic_process_rcv_diag(struct qlcnic_adapter
*adapter
, int ring
,
1493 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1494 struct sk_buff
*skb
;
1495 struct qlcnic_host_rds_ring
*rds_ring
;
1496 int index
, length
, cksum
, pkt_offset
;
1498 if (unlikely(ring
>= adapter
->max_rds_rings
))
1501 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1503 index
= qlcnic_get_sts_refhandle(sts_data0
);
1504 length
= qlcnic_get_sts_totallength(sts_data0
);
1505 if (unlikely(index
>= rds_ring
->num_desc
))
1508 cksum
= qlcnic_get_sts_status(sts_data0
);
1509 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1511 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1515 if (length
> rds_ring
->skb_size
)
1516 skb_put(skb
, rds_ring
->skb_size
);
1518 skb_put(skb
, length
);
1521 skb_pull(skb
, pkt_offset
);
1523 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1524 adapter
->ahw
->diag_cnt
++;
1526 dump_skb(skb
, adapter
);
1528 dev_kfree_skb_any(skb
);
1529 adapter
->stats
.rx_pkts
++;
1530 adapter
->stats
.rxbytes
+= length
;
1535 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1537 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1538 struct status_desc
*desc
;
1540 int ring
, opcode
, desc_cnt
;
1542 u32 consumer
= sds_ring
->consumer
;
1544 desc
= &sds_ring
->desc_head
[consumer
];
1545 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1547 if (!(sts_data0
& STATUS_OWNER_HOST
))
1550 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1551 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1553 case QLCNIC_RESPONSE_DESC
:
1554 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1557 ring
= qlcnic_get_sts_type(sts_data0
);
1558 qlcnic_process_rcv_diag(adapter
, ring
, sts_data0
);
1562 for (; desc_cnt
> 0; desc_cnt
--) {
1563 desc
= &sds_ring
->desc_head
[consumer
];
1564 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1565 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1568 sds_ring
->consumer
= consumer
;
1569 writel(consumer
, sds_ring
->crb_sts_consumer
);
1572 int qlcnic_82xx_napi_add(struct qlcnic_adapter
*adapter
,
1573 struct net_device
*netdev
)
1576 struct qlcnic_host_sds_ring
*sds_ring
;
1577 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1578 struct qlcnic_host_tx_ring
*tx_ring
;
1580 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->drv_sds_rings
))
1583 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1584 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1585 if (qlcnic_check_multi_tx(adapter
) &&
1586 !adapter
->ahw
->diag_test
) {
1587 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_rx_poll
,
1590 if (ring
== (adapter
->drv_sds_rings
- 1))
1591 netif_napi_add(netdev
, &sds_ring
->napi
,
1595 netif_napi_add(netdev
, &sds_ring
->napi
,
1601 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1602 qlcnic_free_sds_rings(recv_ctx
);
1606 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1607 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1608 tx_ring
= &adapter
->tx_ring
[ring
];
1609 netif_tx_napi_add(netdev
, &tx_ring
->napi
, qlcnic_tx_poll
,
1617 void qlcnic_82xx_napi_del(struct qlcnic_adapter
*adapter
)
1620 struct qlcnic_host_sds_ring
*sds_ring
;
1621 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1622 struct qlcnic_host_tx_ring
*tx_ring
;
1624 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1625 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1626 netif_napi_del(&sds_ring
->napi
);
1629 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1631 if (qlcnic_check_multi_tx(adapter
) && !adapter
->ahw
->diag_test
) {
1632 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1633 tx_ring
= &adapter
->tx_ring
[ring
];
1634 netif_napi_del(&tx_ring
->napi
);
1638 qlcnic_free_tx_rings(adapter
);
1641 void qlcnic_82xx_napi_enable(struct qlcnic_adapter
*adapter
)
1644 struct qlcnic_host_sds_ring
*sds_ring
;
1645 struct qlcnic_host_tx_ring
*tx_ring
;
1646 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1648 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1651 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1652 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1653 napi_enable(&sds_ring
->napi
);
1654 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1657 if (qlcnic_check_multi_tx(adapter
) &&
1658 (adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1659 !adapter
->ahw
->diag_test
) {
1660 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1661 tx_ring
= &adapter
->tx_ring
[ring
];
1662 napi_enable(&tx_ring
->napi
);
1663 qlcnic_enable_tx_intr(adapter
, tx_ring
);
1668 void qlcnic_82xx_napi_disable(struct qlcnic_adapter
*adapter
)
1671 struct qlcnic_host_sds_ring
*sds_ring
;
1672 struct qlcnic_host_tx_ring
*tx_ring
;
1673 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1675 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1678 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
1679 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1680 qlcnic_disable_sds_intr(adapter
, sds_ring
);
1681 napi_synchronize(&sds_ring
->napi
);
1682 napi_disable(&sds_ring
->napi
);
1685 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
1686 !adapter
->ahw
->diag_test
&&
1687 qlcnic_check_multi_tx(adapter
)) {
1688 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
1689 tx_ring
= &adapter
->tx_ring
[ring
];
1690 qlcnic_disable_tx_intr(adapter
, tx_ring
);
1691 napi_synchronize(&tx_ring
->napi
);
1692 napi_disable(&tx_ring
->napi
);
1697 #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1698 #define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1700 static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data
, int lro_pkt
)
1703 return (sts_data
& QLC_83XX_LRO_LB_PKT
) ? 1 : 0;
1705 return (sts_data
& QLC_83XX_NORMAL_LB_PKT
) ? 1 : 0;
1708 #define QLCNIC_ENCAP_LENGTH_MASK 0x7f
1710 static inline u8
qlcnic_encap_length(u64 sts_data
)
1712 return sts_data
& QLCNIC_ENCAP_LENGTH_MASK
;
1715 static struct qlcnic_rx_buffer
*
1716 qlcnic_83xx_process_rcv(struct qlcnic_adapter
*adapter
,
1717 struct qlcnic_host_sds_ring
*sds_ring
,
1718 u8 ring
, u64 sts_data
[])
1720 struct net_device
*netdev
= adapter
->netdev
;
1721 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1722 struct qlcnic_rx_buffer
*buffer
;
1723 struct sk_buff
*skb
;
1724 struct qlcnic_host_rds_ring
*rds_ring
;
1725 int index
, length
, cksum
, is_lb_pkt
;
1729 if (unlikely(ring
>= adapter
->max_rds_rings
))
1732 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1734 index
= qlcnic_83xx_hndl(sts_data
[0]);
1735 if (unlikely(index
>= rds_ring
->num_desc
))
1738 buffer
= &rds_ring
->rx_buf_arr
[index
];
1739 length
= qlcnic_83xx_pktln(sts_data
[0]);
1740 cksum
= qlcnic_83xx_csum_status(sts_data
[1]);
1741 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1745 if (length
> rds_ring
->skb_size
)
1746 skb_put(skb
, rds_ring
->skb_size
);
1748 skb_put(skb
, length
);
1750 err
= qlcnic_check_rx_tagging(adapter
, skb
, &vid
);
1752 if (adapter
->rx_mac_learn
) {
1753 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 0);
1754 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, vid
);
1757 if (unlikely(err
)) {
1758 adapter
->stats
.rxdropped
++;
1763 skb
->protocol
= eth_type_trans(skb
, netdev
);
1765 if (qlcnic_encap_length(sts_data
[1]) &&
1766 skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1767 skb
->csum_level
= 1;
1768 adapter
->stats
.encap_rx_csummed
++;
1772 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1774 napi_gro_receive(&sds_ring
->napi
, skb
);
1776 adapter
->stats
.rx_pkts
++;
1777 adapter
->stats
.rxbytes
+= length
;
1782 static struct qlcnic_rx_buffer
*
1783 qlcnic_83xx_process_lro(struct qlcnic_adapter
*adapter
,
1784 u8 ring
, u64 sts_data
[])
1786 struct net_device
*netdev
= adapter
->netdev
;
1787 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1788 struct qlcnic_rx_buffer
*buffer
;
1789 struct sk_buff
*skb
;
1790 struct qlcnic_host_rds_ring
*rds_ring
;
1792 struct ipv6hdr
*ipv6h
;
1795 int l2_hdr_offset
, l4_hdr_offset
;
1796 int index
, is_lb_pkt
;
1797 u16 lro_length
, length
, data_offset
, gso_size
;
1801 if (unlikely(ring
>= adapter
->max_rds_rings
))
1804 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1806 index
= qlcnic_83xx_hndl(sts_data
[0]);
1807 if (unlikely(index
>= rds_ring
->num_desc
))
1810 buffer
= &rds_ring
->rx_buf_arr
[index
];
1812 lro_length
= qlcnic_83xx_lro_pktln(sts_data
[0]);
1813 l2_hdr_offset
= qlcnic_83xx_l2_hdr_off(sts_data
[1]);
1814 l4_hdr_offset
= qlcnic_83xx_l4_hdr_off(sts_data
[1]);
1815 push
= qlcnic_83xx_is_psh_bit(sts_data
[1]);
1817 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1821 if (qlcnic_83xx_is_tstamp(sts_data
[1]))
1822 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_TS_HDR_SIZE
;
1824 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_HDR_SIZE
;
1826 skb_put(skb
, lro_length
+ data_offset
);
1827 skb_pull(skb
, l2_hdr_offset
);
1829 err
= qlcnic_check_rx_tagging(adapter
, skb
, &vid
);
1831 if (adapter
->rx_mac_learn
) {
1832 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 1);
1833 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
, vid
);
1836 if (unlikely(err
)) {
1837 adapter
->stats
.rxdropped
++;
1842 skb
->protocol
= eth_type_trans(skb
, netdev
);
1843 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1844 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1845 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1847 length
= (th
->doff
<< 2) + lro_length
;
1848 ipv6h
->payload_len
= htons(length
);
1850 iph
= (struct iphdr
*)skb
->data
;
1851 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1852 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1853 csum_replace2(&iph
->check
, iph
->tot_len
, htons(length
));
1854 iph
->tot_len
= htons(length
);
1860 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1861 gso_size
= qlcnic_83xx_get_lro_sts_mss(sts_data
[0]);
1862 skb_shinfo(skb
)->gso_size
= gso_size
;
1863 if (skb
->protocol
== htons(ETH_P_IPV6
))
1864 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1866 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1870 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1872 netif_receive_skb(skb
);
1874 adapter
->stats
.lro_pkts
++;
1875 adapter
->stats
.lrobytes
+= length
;
1879 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
1882 struct qlcnic_host_rds_ring
*rds_ring
;
1883 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1884 struct list_head
*cur
;
1885 struct status_desc
*desc
;
1886 struct qlcnic_rx_buffer
*rxbuf
= NULL
;
1889 int count
= 0, opcode
;
1890 u32 consumer
= sds_ring
->consumer
;
1892 while (count
< max
) {
1893 desc
= &sds_ring
->desc_head
[consumer
];
1894 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1895 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1898 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1899 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
1902 case QLC_83XX_REG_DESC
:
1903 rxbuf
= qlcnic_83xx_process_rcv(adapter
, sds_ring
,
1906 case QLC_83XX_LRO_DESC
:
1907 rxbuf
= qlcnic_83xx_process_lro(adapter
, ring
,
1911 dev_info(&adapter
->pdev
->dev
,
1912 "Unknown opcode: 0x%x\n", opcode
);
1917 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1919 adapter
->stats
.null_rxbuf
++;
1921 desc
= &sds_ring
->desc_head
[consumer
];
1922 /* Reset the descriptor */
1923 desc
->status_desc_data
[1] = 0;
1924 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1927 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1928 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1929 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1930 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1931 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1933 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1935 spin_lock(&rds_ring
->lock
);
1936 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1937 &rds_ring
->free_list
);
1938 spin_unlock(&rds_ring
->lock
);
1940 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1943 sds_ring
->consumer
= consumer
;
1944 writel(consumer
, sds_ring
->crb_sts_consumer
);
1949 static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct
*napi
, int budget
)
1953 struct qlcnic_host_sds_ring
*sds_ring
;
1954 struct qlcnic_adapter
*adapter
;
1955 struct qlcnic_host_tx_ring
*tx_ring
;
1957 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1958 adapter
= sds_ring
->adapter
;
1959 /* tx ring count = 1 */
1960 tx_ring
= adapter
->tx_ring
;
1962 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1963 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1965 /* Check if we need a repoll */
1969 if (work_done
< budget
) {
1970 napi_complete(&sds_ring
->napi
);
1971 qlcnic_enable_sds_intr(adapter
, sds_ring
);
1977 static int qlcnic_83xx_poll(struct napi_struct
*napi
, int budget
)
1981 struct qlcnic_host_sds_ring
*sds_ring
;
1982 struct qlcnic_adapter
*adapter
;
1983 struct qlcnic_host_tx_ring
*tx_ring
;
1985 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1986 adapter
= sds_ring
->adapter
;
1987 /* tx ring count = 1 */
1988 tx_ring
= adapter
->tx_ring
;
1990 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1991 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1993 /* Check if we need a repoll */
1997 if (work_done
< budget
) {
1998 napi_complete(&sds_ring
->napi
);
1999 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2005 static int qlcnic_83xx_msix_tx_poll(struct napi_struct
*napi
, int budget
)
2008 struct qlcnic_host_tx_ring
*tx_ring
;
2009 struct qlcnic_adapter
*adapter
;
2011 budget
= QLCNIC_TX_POLL_BUDGET
;
2012 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
2013 adapter
= tx_ring
->adapter
;
2014 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
2016 napi_complete(&tx_ring
->napi
);
2017 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
2018 qlcnic_enable_tx_intr(adapter
, tx_ring
);
2027 static int qlcnic_83xx_rx_poll(struct napi_struct
*napi
, int budget
)
2030 struct qlcnic_host_sds_ring
*sds_ring
;
2031 struct qlcnic_adapter
*adapter
;
2033 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
2034 adapter
= sds_ring
->adapter
;
2035 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
2036 if (work_done
< budget
) {
2037 napi_complete(&sds_ring
->napi
);
2038 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
2039 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2045 void qlcnic_83xx_napi_enable(struct qlcnic_adapter
*adapter
)
2048 struct qlcnic_host_sds_ring
*sds_ring
;
2049 struct qlcnic_host_tx_ring
*tx_ring
;
2050 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2052 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
2055 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2056 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2057 napi_enable(&sds_ring
->napi
);
2058 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
2059 qlcnic_enable_sds_intr(adapter
, sds_ring
);
2062 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2063 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2064 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2065 tx_ring
= &adapter
->tx_ring
[ring
];
2066 napi_enable(&tx_ring
->napi
);
2067 qlcnic_enable_tx_intr(adapter
, tx_ring
);
2072 void qlcnic_83xx_napi_disable(struct qlcnic_adapter
*adapter
)
2075 struct qlcnic_host_sds_ring
*sds_ring
;
2076 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2077 struct qlcnic_host_tx_ring
*tx_ring
;
2079 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
2082 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2083 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2084 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
2085 qlcnic_disable_sds_intr(adapter
, sds_ring
);
2086 napi_synchronize(&sds_ring
->napi
);
2087 napi_disable(&sds_ring
->napi
);
2090 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2091 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2092 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2093 tx_ring
= &adapter
->tx_ring
[ring
];
2094 qlcnic_disable_tx_intr(adapter
, tx_ring
);
2095 napi_synchronize(&tx_ring
->napi
);
2096 napi_disable(&tx_ring
->napi
);
2101 int qlcnic_83xx_napi_add(struct qlcnic_adapter
*adapter
,
2102 struct net_device
*netdev
)
2105 struct qlcnic_host_sds_ring
*sds_ring
;
2106 struct qlcnic_host_tx_ring
*tx_ring
;
2107 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2109 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->drv_sds_rings
))
2112 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2113 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2114 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
2115 if (!(adapter
->flags
& QLCNIC_TX_INTR_SHARED
))
2116 netif_napi_add(netdev
, &sds_ring
->napi
,
2117 qlcnic_83xx_rx_poll
,
2120 netif_napi_add(netdev
, &sds_ring
->napi
,
2121 qlcnic_83xx_msix_sriov_vf_poll
,
2125 netif_napi_add(netdev
, &sds_ring
->napi
,
2131 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
2132 qlcnic_free_sds_rings(recv_ctx
);
2136 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2137 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2138 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2139 tx_ring
= &adapter
->tx_ring
[ring
];
2140 netif_tx_napi_add(netdev
, &tx_ring
->napi
,
2141 qlcnic_83xx_msix_tx_poll
,
2149 void qlcnic_83xx_napi_del(struct qlcnic_adapter
*adapter
)
2152 struct qlcnic_host_sds_ring
*sds_ring
;
2153 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2154 struct qlcnic_host_tx_ring
*tx_ring
;
2156 for (ring
= 0; ring
< adapter
->drv_sds_rings
; ring
++) {
2157 sds_ring
= &recv_ctx
->sds_rings
[ring
];
2158 netif_napi_del(&sds_ring
->napi
);
2161 qlcnic_free_sds_rings(adapter
->recv_ctx
);
2163 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
) &&
2164 !(adapter
->flags
& QLCNIC_TX_INTR_SHARED
)) {
2165 for (ring
= 0; ring
< adapter
->drv_tx_rings
; ring
++) {
2166 tx_ring
= &adapter
->tx_ring
[ring
];
2167 netif_napi_del(&tx_ring
->napi
);
2171 qlcnic_free_tx_rings(adapter
);
2174 static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter
*adapter
,
2175 int ring
, u64 sts_data
[])
2177 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
2178 struct sk_buff
*skb
;
2179 struct qlcnic_host_rds_ring
*rds_ring
;
2182 if (unlikely(ring
>= adapter
->max_rds_rings
))
2185 rds_ring
= &recv_ctx
->rds_rings
[ring
];
2186 index
= qlcnic_83xx_hndl(sts_data
[0]);
2187 if (unlikely(index
>= rds_ring
->num_desc
))
2190 length
= qlcnic_83xx_pktln(sts_data
[0]);
2192 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
2196 if (length
> rds_ring
->skb_size
)
2197 skb_put(skb
, rds_ring
->skb_size
);
2199 skb_put(skb
, length
);
2201 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
2202 adapter
->ahw
->diag_cnt
++;
2204 dump_skb(skb
, adapter
);
2206 dev_kfree_skb_any(skb
);
2210 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
2212 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
2213 struct status_desc
*desc
;
2216 u32 consumer
= sds_ring
->consumer
;
2218 desc
= &sds_ring
->desc_head
[consumer
];
2219 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
2220 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
2221 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
2225 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
2226 qlcnic_83xx_process_rcv_diag(adapter
, ring
, sts_data
);
2227 desc
= &sds_ring
->desc_head
[consumer
];
2228 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
2229 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
2230 sds_ring
->consumer
= consumer
;
2231 writel(consumer
, sds_ring
->crb_sts_consumer
);