Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_io.c
CommitLineData
577ae39d
JK
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
c70001a9
SC
8#include <linux/netdevice.h>
9#include <linux/if_vlan.h>
10#include <net/ip.h>
11#include <linux/ipv6.h>
1bcac3b0 12#include <net/checksum.h>
c70001a9
SC
13
14#include "qlcnic.h"
15
381709de
SS
16#define QLCNIC_TX_ETHER_PKT 0x01
17#define QLCNIC_TX_TCP_PKT 0x02
18#define QLCNIC_TX_UDP_PKT 0x03
19#define QLCNIC_TX_IP_PKT 0x04
20#define QLCNIC_TX_TCP_LSO 0x05
21#define QLCNIC_TX_TCP_LSO6 0x06
22#define QLCNIC_TX_ENCAP_PKT 0x07
23#define QLCNIC_TX_ENCAP_LSO 0x08
24#define QLCNIC_TX_TCPV6_PKT 0x0b
25#define QLCNIC_TX_UDPV6_PKT 0x0c
26
27#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
28#define QLCNIC_FLAGS_VLAN_OOB 0x40
c70001a9
SC
29
30#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
31 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
32#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
33 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
34#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
35 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
36
37#define qlcnic_set_tx_port(_desc, _port) \
38 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
39
40#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
41 ((_desc)->flags_opcode |= \
42 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
43
44#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
45 ((_desc)->nfrags__length = \
46 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
47
48/* owner bits of status_desc */
49#define STATUS_OWNER_HOST (0x1ULL << 56)
50#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
51
52/* Status descriptor:
53 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
54 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
55 53-55 desc_cnt, 56-57 owner, 58-63 opcode
56 */
57#define qlcnic_get_sts_port(sts_data) \
58 ((sts_data) & 0x0F)
59#define qlcnic_get_sts_status(sts_data) \
60 (((sts_data) >> 4) & 0x0F)
61#define qlcnic_get_sts_type(sts_data) \
62 (((sts_data) >> 8) & 0x0F)
63#define qlcnic_get_sts_totallength(sts_data) \
64 (((sts_data) >> 12) & 0xFFFF)
65#define qlcnic_get_sts_refhandle(sts_data) \
66 (((sts_data) >> 28) & 0xFFFF)
67#define qlcnic_get_sts_prot(sts_data) \
68 (((sts_data) >> 44) & 0x0F)
69#define qlcnic_get_sts_pkt_offset(sts_data) \
70 (((sts_data) >> 48) & 0x1F)
71#define qlcnic_get_sts_desc_cnt(sts_data) \
72 (((sts_data) >> 53) & 0x7)
73#define qlcnic_get_sts_opcode(sts_data) \
74 (((sts_data) >> 58) & 0x03F)
75
76#define qlcnic_get_lro_sts_refhandle(sts_data) \
5796bd04 77 ((sts_data) & 0x07FFF)
c70001a9
SC
78#define qlcnic_get_lro_sts_length(sts_data) \
79 (((sts_data) >> 16) & 0x0FFFF)
80#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
81 (((sts_data) >> 32) & 0x0FF)
82#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
83 (((sts_data) >> 40) & 0x0FF)
84#define qlcnic_get_lro_sts_timestamp(sts_data) \
85 (((sts_data) >> 48) & 0x1)
86#define qlcnic_get_lro_sts_type(sts_data) \
87 (((sts_data) >> 49) & 0x7)
88#define qlcnic_get_lro_sts_push_flag(sts_data) \
89 (((sts_data) >> 52) & 0x1)
90#define qlcnic_get_lro_sts_seq_number(sts_data) \
91 ((sts_data) & 0x0FFFFFFFF)
92#define qlcnic_get_lro_sts_mss(sts_data1) \
93 ((sts_data1 >> 32) & 0x0FFFF)
94
99e85879
SS
95#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
96
c70001a9
SC
97/* opcode field in status_desc */
98#define QLCNIC_SYN_OFFLOAD 0x03
99#define QLCNIC_RXPKT_DESC 0x04
100#define QLCNIC_OLD_RXPKT_DESC 0x3f
101#define QLCNIC_RESPONSE_DESC 0x05
102#define QLCNIC_LRO_DESC 0x12
103
4be41e92
SC
104#define QLCNIC_TX_POLL_BUDGET 128
105#define QLCNIC_TCP_HDR_SIZE 20
106#define QLCNIC_TCP_TS_OPTION_SIZE 12
107#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
108#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
109
110#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
111
c70001a9
SC
112/* for status field in status_desc */
113#define STATUS_CKSUM_LOOP 0
114#define STATUS_CKSUM_OK 2
d17dd0d9 115
4be41e92
SC
116#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
117#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
118#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
119#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
120#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
121#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
122#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
123#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
124#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
125#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
126#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
127#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
128#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
129
21041400 130static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
131 int max);
132
133static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
134 struct qlcnic_host_rds_ring *,
135 u16, u16);
4be41e92 136
b3f7de83 137static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
4be41e92 138{
b3f7de83 139 return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
4be41e92
SC
140}
141
142static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
143 u16 handle, u8 ring_id)
144{
15ca140f 145 if (qlcnic_83xx_check(adapter))
4be41e92
SC
146 return handle | (ring_id << 15);
147 else
148 return handle;
149}
150
53643a75
SS
151static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
152{
153 return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
154}
155
e0d138d9
SS
156static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
157 struct qlcnic_filter *fil,
158 void *addr, u16 vlan_id)
159{
160 int ret;
161 u8 op;
162
163 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
164 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
165 if (ret)
166 return;
167
168 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
169 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
170 if (!ret) {
171 hlist_del(&fil->fnode);
172 adapter->rx_fhash.fnum--;
173 }
174}
175
176static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
177 void *addr, u16 vlan_id)
178{
179 struct qlcnic_filter *tmp_fil = NULL;
180 struct hlist_node *n;
181
182 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
6878f79a 183 if (ether_addr_equal(tmp_fil->faddr, addr) &&
e0d138d9
SS
184 tmp_fil->vlan_id == vlan_id)
185 return tmp_fil;
186 }
187
188 return NULL;
189}
190
21041400 191static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
192 struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
53643a75
SS
193{
194 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
195 struct qlcnic_filter *fil, *tmp_fil;
53643a75
SS
196 struct hlist_head *head;
197 unsigned long time;
198 u64 src_addr = 0;
e0d138d9 199 u8 hindex, op;
53643a75
SS
200 int ret;
201
b3f7de83
SC
202 if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
203 vlan_id = 0;
204
53643a75 205 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
b3f7de83 206 hindex = qlcnic_mac_hash(src_addr, vlan_id) &
e0d138d9 207 (adapter->fhash.fbucket_size - 1);
53643a75
SS
208
209 if (loopback_pkt) {
210 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
211 return;
212
53643a75
SS
213 head = &(adapter->rx_fhash.fhead[hindex]);
214
e0d138d9
SS
215 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
216 if (tmp_fil) {
217 time = tmp_fil->ftime;
218 if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
219 tmp_fil->ftime = jiffies;
220 return;
53643a75
SS
221 }
222
223 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
224 if (!fil)
225 return;
226
227 fil->ftime = jiffies;
228 memcpy(fil->faddr, &src_addr, ETH_ALEN);
229 fil->vlan_id = vlan_id;
230 spin_lock(&adapter->rx_mac_learn_lock);
231 hlist_add_head(&(fil->fnode), head);
232 adapter->rx_fhash.fnum++;
233 spin_unlock(&adapter->rx_mac_learn_lock);
234 } else {
e0d138d9 235 head = &adapter->fhash.fhead[hindex];
53643a75 236
e0d138d9 237 spin_lock(&adapter->mac_learn_lock);
53643a75 238
e0d138d9
SS
239 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
240 if (tmp_fil) {
53643a75
SS
241 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
242 ret = qlcnic_sre_macaddr_change(adapter,
243 (u8 *)&src_addr,
244 vlan_id, op);
245 if (!ret) {
e0d138d9
SS
246 hlist_del(&tmp_fil->fnode);
247 adapter->fhash.fnum--;
53643a75 248 }
e0d138d9
SS
249
250 spin_unlock(&adapter->mac_learn_lock);
251
252 return;
53643a75 253 }
e0d138d9
SS
254
255 spin_unlock(&adapter->mac_learn_lock);
256
257 head = &adapter->rx_fhash.fhead[hindex];
258
259 spin_lock(&adapter->rx_mac_learn_lock);
260
261 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
262 if (tmp_fil)
263 qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
264 vlan_id);
265
53643a75
SS
266 spin_unlock(&adapter->rx_mac_learn_lock);
267 }
268}
269
7e2cf4fe 270void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
f80bc8fe 271 u16 vlan_id)
c70001a9
SC
272{
273 struct cmd_desc_type0 *hwdesc;
274 struct qlcnic_nic_req *req;
275 struct qlcnic_mac_req *mac_req;
276 struct qlcnic_vlan_req *vlan_req;
7e2cf4fe 277 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
c70001a9
SC
278 u32 producer;
279 u64 word;
280
281 producer = tx_ring->producer;
282 hwdesc = &tx_ring->desc_head[tx_ring->producer];
283
284 req = (struct qlcnic_nic_req *)hwdesc;
285 memset(req, 0, sizeof(struct qlcnic_nic_req));
286 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
287
288 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
289 req->req_hdr = cpu_to_le64(word);
290
291 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
292 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
4a99ab56 293 memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
c70001a9
SC
294
295 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
f80bc8fe 296 vlan_req->vlan_id = cpu_to_le16(vlan_id);
c70001a9
SC
297
298 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
299 smp_mb();
300}
301
d17dd0d9 302static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
d17dd0d9
SC
303 struct cmd_desc_type0 *first_desc,
304 struct sk_buff *skb)
c70001a9 305{
b3f7de83
SC
306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
307 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
b3f7de83 308 u16 protocol = ntohs(skb->protocol);
c70001a9 309 struct qlcnic_filter *fil, *tmp_fil;
c70001a9 310 struct hlist_head *head;
b3f7de83 311 struct hlist_node *n;
c70001a9 312 u64 src_addr = 0;
f80bc8fe 313 u16 vlan_id = 0;
b3f7de83 314 u8 hindex, hval;
c70001a9 315
d747c333
RB
316 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
317 return;
318
319 if (adapter->flags & QLCNIC_VLAN_FILTERING) {
b3f7de83
SC
320 if (protocol == ETH_P_8021Q) {
321 vh = (struct vlan_ethhdr *)skb->data;
322 vlan_id = ntohs(vh->h_vlan_TCI);
323 } else if (vlan_tx_tag_present(skb)) {
324 vlan_id = vlan_tx_tag_get(skb);
325 }
b3f7de83 326 }
c70001a9 327
c70001a9 328 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
b3f7de83
SC
329 hval = qlcnic_mac_hash(src_addr, vlan_id);
330 hindex = hval & (adapter->fhash.fbucket_size - 1);
c70001a9
SC
331 head = &(adapter->fhash.fhead[hindex]);
332
b67bfe0d 333 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
e2072cdf 334 if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
7e2cf4fe 335 tmp_fil->vlan_id == vlan_id) {
d17dd0d9 336 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
7e2cf4fe
SC
337 qlcnic_change_filter(adapter, &src_addr,
338 vlan_id);
c70001a9
SC
339 tmp_fil->ftime = jiffies;
340 return;
341 }
342 }
343
ddfbac07
SS
344 if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
345 adapter->stats.mac_filter_limit_overrun++;
346 return;
347 }
348
c70001a9
SC
349 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
350 if (!fil)
351 return;
352
7e2cf4fe 353 qlcnic_change_filter(adapter, &src_addr, vlan_id);
c70001a9
SC
354 fil->ftime = jiffies;
355 fil->vlan_id = vlan_id;
356 memcpy(fil->faddr, &src_addr, ETH_ALEN);
357 spin_lock(&adapter->mac_learn_lock);
358 hlist_add_head(&(fil->fnode), head);
359 adapter->fhash.fnum++;
360 spin_unlock(&adapter->mac_learn_lock);
361}
362
381709de
SS
363#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
364#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
365#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
366#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
367#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
368#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
369
370static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
371 struct cmd_desc_type0 *first_desc,
372 struct sk_buff *skb,
373 struct qlcnic_host_tx_ring *tx_ring)
374{
375 u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
376 int copied, copy_len, descr_size;
377 u32 producer = tx_ring->producer;
378 struct cmd_desc_type0 *hwdesc;
379 u16 flags = 0, encap_descr = 0;
380
381 opcode = QLCNIC_TX_ETHER_PKT;
382 encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
383
384 if (skb_is_gso(skb)) {
385 inner_hdr_len = skb_inner_transport_header(skb) +
386 inner_tcp_hdrlen(skb) -
387 skb_inner_mac_header(skb);
388
389 /* VXLAN header size = 8 */
390 outer_hdr_len = skb_transport_offset(skb) + 8 +
391 sizeof(struct udphdr);
392 first_desc->outer_hdr_length = outer_hdr_len;
393 total_hdr_len = inner_hdr_len + outer_hdr_len;
394 encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
395 QLCNIC_ENCAP_DO_L4_CSUM;
396 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
397 first_desc->hdr_length = inner_hdr_len;
398
399 /* Copy inner and outer headers in Tx descriptor(s)
400 * If total_hdr_len > cmd_desc_type0, use multiple
401 * descriptors
402 */
403 copied = 0;
404 descr_size = (int)sizeof(struct cmd_desc_type0);
405 while (copied < total_hdr_len) {
406 copy_len = min(descr_size, (total_hdr_len - copied));
407 hwdesc = &tx_ring->desc_head[producer];
408 tx_ring->cmd_buf_arr[producer].skb = NULL;
409 skb_copy_from_linear_data_offset(skb, copied,
410 (char *)hwdesc,
411 copy_len);
412 copied += copy_len;
413 producer = get_next_index(producer, tx_ring->num_desc);
414 }
415
416 tx_ring->producer = producer;
417
418 /* Make sure updated tx_ring->producer is visible
419 * for qlcnic_tx_avail()
420 */
421 smp_mb();
422 adapter->stats.encap_lso_frames++;
423
424 opcode = QLCNIC_TX_ENCAP_LSO;
425 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
426 if (inner_ip_hdr(skb)->version == 6) {
427 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
428 encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
429 } else {
430 if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
431 encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
432 }
433
434 adapter->stats.encap_tx_csummed++;
435 opcode = QLCNIC_TX_ENCAP_PKT;
436 }
437
438 /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
439 if (ip_hdr(skb)->version == 6)
440 encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
441
442 /* outer IP header's size in 32bit words size*/
443 encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
444
445 /* outer IP header offset */
446 encap_descr |= skb_network_offset(skb) << 10;
447 first_desc->encap_descr = cpu_to_le16(encap_descr);
448
449 first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
450 skb->data;
451 first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
452
453 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
454
455 return 0;
456}
457
d17dd0d9 458static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
012ec812
HM
459 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
460 struct qlcnic_host_tx_ring *tx_ring)
c70001a9 461{
d17dd0d9 462 u8 l4proto, opcode = 0, hdr_len = 0;
c70001a9 463 u16 flags = 0, vlan_tci = 0;
d17dd0d9 464 int copied, offset, copy_len, size;
c70001a9
SC
465 struct cmd_desc_type0 *hwdesc;
466 struct vlan_ethhdr *vh;
c70001a9
SC
467 u16 protocol = ntohs(skb->protocol);
468 u32 producer = tx_ring->producer;
469
470 if (protocol == ETH_P_8021Q) {
471 vh = (struct vlan_ethhdr *)skb->data;
381709de 472 flags = QLCNIC_FLAGS_VLAN_TAGGED;
c70001a9
SC
473 vlan_tci = ntohs(vh->h_vlan_TCI);
474 protocol = ntohs(vh->h_vlan_encapsulated_proto);
475 } else if (vlan_tx_tag_present(skb)) {
381709de 476 flags = QLCNIC_FLAGS_VLAN_OOB;
c70001a9
SC
477 vlan_tci = vlan_tx_tag_get(skb);
478 }
91b7282b 479 if (unlikely(adapter->tx_pvid)) {
c70001a9
SC
480 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
481 return -EIO;
482 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
483 goto set_flags;
484
381709de 485 flags = QLCNIC_FLAGS_VLAN_OOB;
91b7282b 486 vlan_tci = adapter->tx_pvid;
c70001a9
SC
487 }
488set_flags:
489 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
490 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
491
492 if (*(skb->data) & BIT_0) {
493 flags |= BIT_0;
494 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
495 }
381709de 496 opcode = QLCNIC_TX_ETHER_PKT;
3eead213 497 if (skb_is_gso(skb)) {
c70001a9 498 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
c70001a9 499 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
381709de
SS
500 first_desc->hdr_length = hdr_len;
501 opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
502 QLCNIC_TX_TCP_LSO;
c70001a9
SC
503
504 /* For LSO, we need to copy the MAC/IP/TCP headers into
505 * the descriptor ring */
506 copied = 0;
507 offset = 2;
508
381709de
SS
509 if (flags & QLCNIC_FLAGS_VLAN_OOB) {
510 first_desc->hdr_length += VLAN_HLEN;
c70001a9
SC
511 first_desc->tcp_hdr_offset = VLAN_HLEN;
512 first_desc->ip_hdr_offset = VLAN_HLEN;
d17dd0d9 513
c70001a9 514 /* Only in case of TSO on vlan device */
381709de 515 flags |= QLCNIC_FLAGS_VLAN_TAGGED;
c70001a9
SC
516
517 /* Create a TSO vlan header template for firmware */
c70001a9
SC
518 hwdesc = &tx_ring->desc_head[producer];
519 tx_ring->cmd_buf_arr[producer].skb = NULL;
520
521 copy_len = min((int)sizeof(struct cmd_desc_type0) -
d17dd0d9 522 offset, hdr_len + VLAN_HLEN);
c70001a9
SC
523
524 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
525 skb_copy_from_linear_data(skb, vh, 12);
526 vh->h_vlan_proto = htons(ETH_P_8021Q);
527 vh->h_vlan_TCI = htons(vlan_tci);
528
529 skb_copy_from_linear_data_offset(skb, 12,
d17dd0d9
SC
530 (char *)vh + 16,
531 copy_len - 16);
c70001a9
SC
532 copied = copy_len - VLAN_HLEN;
533 offset = 0;
c70001a9
SC
534 producer = get_next_index(producer, tx_ring->num_desc);
535 }
536
537 while (copied < hdr_len) {
d17dd0d9
SC
538 size = (int)sizeof(struct cmd_desc_type0) - offset;
539 copy_len = min(size, (hdr_len - copied));
c70001a9
SC
540 hwdesc = &tx_ring->desc_head[producer];
541 tx_ring->cmd_buf_arr[producer].skb = NULL;
c70001a9 542 skb_copy_from_linear_data_offset(skb, copied,
d17dd0d9
SC
543 (char *)hwdesc +
544 offset, copy_len);
c70001a9
SC
545 copied += copy_len;
546 offset = 0;
c70001a9
SC
547 producer = get_next_index(producer, tx_ring->num_desc);
548 }
549
550 tx_ring->producer = producer;
551 smp_mb();
552 adapter->stats.lso_frames++;
553
554 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c70001a9
SC
555 if (protocol == ETH_P_IP) {
556 l4proto = ip_hdr(skb)->protocol;
557
558 if (l4proto == IPPROTO_TCP)
381709de 559 opcode = QLCNIC_TX_TCP_PKT;
c70001a9 560 else if (l4proto == IPPROTO_UDP)
381709de 561 opcode = QLCNIC_TX_UDP_PKT;
c70001a9
SC
562 } else if (protocol == ETH_P_IPV6) {
563 l4proto = ipv6_hdr(skb)->nexthdr;
564
565 if (l4proto == IPPROTO_TCP)
381709de 566 opcode = QLCNIC_TX_TCPV6_PKT;
c70001a9 567 else if (l4proto == IPPROTO_UDP)
381709de 568 opcode = QLCNIC_TX_UDPV6_PKT;
c70001a9
SC
569 }
570 }
571 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
572 first_desc->ip_hdr_offset += skb_network_offset(skb);
573 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
574
575 return 0;
576}
577
d17dd0d9
SC
578static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
579 struct qlcnic_cmd_buffer *pbuf)
c70001a9
SC
580{
581 struct qlcnic_skb_frag *nf;
582 struct skb_frag_struct *frag;
583 int i, nr_frags;
584 dma_addr_t map;
585
586 nr_frags = skb_shinfo(skb)->nr_frags;
587 nf = &pbuf->frag_array[0];
588
d17dd0d9
SC
589 map = pci_map_single(pdev, skb->data, skb_headlen(skb),
590 PCI_DMA_TODEVICE);
c70001a9
SC
591 if (pci_dma_mapping_error(pdev, map))
592 goto out_err;
593
594 nf->dma = map;
595 nf->length = skb_headlen(skb);
596
597 for (i = 0; i < nr_frags; i++) {
598 frag = &skb_shinfo(skb)->frags[i];
599 nf = &pbuf->frag_array[i+1];
c70001a9
SC
600 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
601 DMA_TO_DEVICE);
602 if (dma_mapping_error(&pdev->dev, map))
603 goto unwind;
604
605 nf->dma = map;
606 nf->length = skb_frag_size(frag);
607 }
608
609 return 0;
610
611unwind:
612 while (--i >= 0) {
613 nf = &pbuf->frag_array[i+1];
614 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
615 }
616
617 nf = &pbuf->frag_array[0];
618 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
619
620out_err:
621 return -ENOMEM;
622}
623
d17dd0d9
SC
624static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
625 struct qlcnic_cmd_buffer *pbuf)
c70001a9
SC
626{
627 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
d17dd0d9 628 int i, nr_frags = skb_shinfo(skb)->nr_frags;
c70001a9
SC
629
630 for (i = 0; i < nr_frags; i++) {
631 nf = &pbuf->frag_array[i+1];
632 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
633 }
634
635 nf = &pbuf->frag_array[0];
636 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
637 pbuf->skb = NULL;
638}
639
d17dd0d9 640static inline void qlcnic_clear_cmddesc(u64 *desc)
c70001a9
SC
641{
642 desc[0] = 0ULL;
643 desc[2] = 0ULL;
644 desc[7] = 0ULL;
645}
646
d17dd0d9 647netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
c70001a9
SC
648{
649 struct qlcnic_adapter *adapter = netdev_priv(netdev);
012ec812 650 struct qlcnic_host_tx_ring *tx_ring;
c70001a9
SC
651 struct qlcnic_cmd_buffer *pbuf;
652 struct qlcnic_skb_frag *buffrag;
653 struct cmd_desc_type0 *hwdesc, *first_desc;
654 struct pci_dev *pdev;
655 struct ethhdr *phdr;
d17dd0d9
SC
656 int i, k, frag_count, delta = 0;
657 u32 producer, num_txd;
381709de
SS
658 u16 protocol;
659 bool l4_is_udp = false;
c70001a9 660
c70001a9 661 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
012ec812 662 netif_tx_stop_all_queues(netdev);
c70001a9
SC
663 return NETDEV_TX_BUSY;
664 }
665
666 if (adapter->flags & QLCNIC_MACSPOOF) {
667 phdr = (struct ethhdr *)skb->data;
668 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
669 goto drop_packet;
670 }
671
18afc102 672 tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
012ec812
HM
673 num_txd = tx_ring->num_desc;
674
c70001a9 675 frag_count = skb_shinfo(skb)->nr_frags + 1;
012ec812 676
c70001a9
SC
677 /* 14 frags supported for normal packet and
678 * 32 frags supported for TSO packet
679 */
680 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
c70001a9
SC
681 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
682 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
683
684 if (!__pskb_pull_tail(skb, delta))
685 goto drop_packet;
686
687 frag_count = 1 + skb_shinfo(skb)->nr_frags;
688 }
689
690 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
012ec812 691 netif_tx_stop_queue(tx_ring->txq);
d17dd0d9 692 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
012ec812 693 netif_tx_start_queue(tx_ring->txq);
d17dd0d9 694 } else {
f27c75b3 695 tx_ring->tx_stats.xmit_off++;
c70001a9
SC
696 return NETDEV_TX_BUSY;
697 }
698 }
699
700 producer = tx_ring->producer;
701 pbuf = &tx_ring->cmd_buf_arr[producer];
c70001a9 702 pdev = adapter->pdev;
d17dd0d9
SC
703 first_desc = &tx_ring->desc_head[producer];
704 hwdesc = &tx_ring->desc_head[producer];
c70001a9
SC
705 qlcnic_clear_cmddesc((u64 *)hwdesc);
706
707 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
708 adapter->stats.tx_dma_map_error++;
709 goto drop_packet;
710 }
711
712 pbuf->skb = skb;
713 pbuf->frag_count = frag_count;
714
715 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
716 qlcnic_set_tx_port(first_desc, adapter->portnum);
717
718 for (i = 0; i < frag_count; i++) {
c70001a9
SC
719 k = i % 4;
720
721 if ((k == 0) && (i > 0)) {
722 /* move to next desc.*/
723 producer = get_next_index(producer, num_txd);
724 hwdesc = &tx_ring->desc_head[producer];
725 qlcnic_clear_cmddesc((u64 *)hwdesc);
726 tx_ring->cmd_buf_arr[producer].skb = NULL;
727 }
728
729 buffrag = &pbuf->frag_array[i];
c70001a9
SC
730 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
731 switch (k) {
732 case 0:
733 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
734 break;
735 case 1:
736 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
737 break;
738 case 2:
739 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
740 break;
741 case 3:
742 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
743 break;
744 }
745 }
746
747 tx_ring->producer = get_next_index(producer, num_txd);
748 smp_mb();
749
381709de
SS
750 protocol = ntohs(skb->protocol);
751 if (protocol == ETH_P_IP)
752 l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
753 else if (protocol == ETH_P_IPV6)
754 l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
755
756 /* Check if it is a VXLAN packet */
757 if (!skb->encapsulation || !l4_is_udp ||
758 !qlcnic_encap_tx_offload(adapter)) {
759 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
760 tx_ring)))
761 goto unwind_buff;
762 } else {
763 if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
764 skb, tx_ring)))
765 goto unwind_buff;
766 }
c70001a9 767
fe1adc6b 768 if (adapter->drv_mac_learn)
4be41e92 769 qlcnic_send_filter(adapter, first_desc, skb);
c70001a9 770
f27c75b3
HM
771 tx_ring->tx_stats.tx_bytes += skb->len;
772 tx_ring->tx_stats.xmit_called++;
c70001a9
SC
773
774 qlcnic_update_cmd_producer(tx_ring);
775
776 return NETDEV_TX_OK;
777
778unwind_buff:
779 qlcnic_unmap_buffers(pdev, skb, pbuf);
780drop_packet:
781 adapter->stats.txdropped++;
782 dev_kfree_skb_any(skb);
783 return NETDEV_TX_OK;
784}
785
7f966452 786void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
c70001a9
SC
787{
788 struct net_device *netdev = adapter->netdev;
789
790 if (adapter->ahw->linkup && !linkup) {
791 netdev_info(netdev, "NIC Link is down\n");
792 adapter->ahw->linkup = 0;
b84caae4 793 netif_carrier_off(netdev);
c70001a9 794 } else if (!adapter->ahw->linkup && linkup) {
092dfcf3
SS
795 adapter->ahw->linkup = 1;
796
797 /* Do not advertise Link up to the stack if device
798 * is in loopback mode
799 */
800 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
801 netdev_info(netdev, "NIC Link is up for loopback test\n");
d9c602f0 802 return;
092dfcf3 803 }
d9c602f0 804
c70001a9 805 netdev_info(netdev, "NIC Link is up\n");
b84caae4 806 netif_carrier_on(netdev);
c70001a9
SC
807 }
808}
809
d17dd0d9
SC
810static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
811 struct qlcnic_host_rds_ring *rds_ring,
812 struct qlcnic_rx_buffer *buffer)
c70001a9
SC
813{
814 struct sk_buff *skb;
815 dma_addr_t dma;
816 struct pci_dev *pdev = adapter->pdev;
817
818 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
819 if (!skb) {
820 adapter->stats.skb_alloc_failure++;
821 return -ENOMEM;
822 }
823
824 skb_reserve(skb, NET_IP_ALIGN);
4be41e92
SC
825 dma = pci_map_single(pdev, skb->data,
826 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
c70001a9
SC
827
828 if (pci_dma_mapping_error(pdev, dma)) {
829 adapter->stats.rx_dma_map_error++;
830 dev_kfree_skb_any(skb);
831 return -ENOMEM;
832 }
833
834 buffer->skb = skb;
835 buffer->dma = dma;
836
837 return 0;
838}
839
840static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
4be41e92
SC
841 struct qlcnic_host_rds_ring *rds_ring,
842 u8 ring_id)
c70001a9
SC
843{
844 struct rcv_desc *pdesc;
845 struct qlcnic_rx_buffer *buffer;
846 int count = 0;
4be41e92 847 uint32_t producer, handle;
c70001a9
SC
848 struct list_head *head;
849
850 if (!spin_trylock(&rds_ring->lock))
851 return;
852
853 producer = rds_ring->producer;
c70001a9 854 head = &rds_ring->free_list;
d17dd0d9 855 while (!list_empty(head)) {
c70001a9
SC
856 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
857
858 if (!buffer->skb) {
859 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
860 break;
861 }
c70001a9
SC
862 count++;
863 list_del(&buffer->list);
864
865 /* make a rcv descriptor */
866 pdesc = &rds_ring->desc_head[producer];
4be41e92
SC
867 handle = qlcnic_get_ref_handle(adapter,
868 buffer->ref_handle, ring_id);
869 pdesc->reference_handle = cpu_to_le16(handle);
c70001a9
SC
870 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
871 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
c70001a9
SC
872 producer = get_next_index(producer, rds_ring->num_desc);
873 }
c70001a9
SC
874 if (count) {
875 rds_ring->producer = producer;
876 writel((producer - 1) & (rds_ring->num_desc - 1),
d17dd0d9 877 rds_ring->crb_rcv_producer);
c70001a9
SC
878 }
879 spin_unlock(&rds_ring->lock);
880}
881
4be41e92
SC
882static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
883 struct qlcnic_host_tx_ring *tx_ring,
884 int budget)
c70001a9
SC
885{
886 u32 sw_consumer, hw_consumer;
d17dd0d9 887 int i, done, count = 0;
c70001a9
SC
888 struct qlcnic_cmd_buffer *buffer;
889 struct pci_dev *pdev = adapter->pdev;
890 struct net_device *netdev = adapter->netdev;
891 struct qlcnic_skb_frag *frag;
c70001a9 892
a02bdd42 893 if (!spin_trylock(&tx_ring->tx_clean_lock))
34e8c406
HM
894 return 1;
895
c70001a9
SC
896 sw_consumer = tx_ring->sw_consumer;
897 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
898
899 while (sw_consumer != hw_consumer) {
900 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
901 if (buffer->skb) {
902 frag = &buffer->frag_array[0];
903 pci_unmap_single(pdev, frag->dma, frag->length,
904 PCI_DMA_TODEVICE);
905 frag->dma = 0ULL;
906 for (i = 1; i < buffer->frag_count; i++) {
907 frag++;
908 pci_unmap_page(pdev, frag->dma, frag->length,
909 PCI_DMA_TODEVICE);
910 frag->dma = 0ULL;
911 }
f27c75b3 912 tx_ring->tx_stats.xmit_finished++;
c70001a9
SC
913 dev_kfree_skb_any(buffer->skb);
914 buffer->skb = NULL;
915 }
916
917 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
4be41e92 918 if (++count >= budget)
c70001a9
SC
919 break;
920 }
921
a02bdd42
SS
922 tx_ring->sw_consumer = sw_consumer;
923
c70001a9 924 if (count && netif_running(netdev)) {
c70001a9 925 smp_mb();
012ec812
HM
926 if (netif_tx_queue_stopped(tx_ring->txq) &&
927 netif_carrier_ok(netdev)) {
c70001a9 928 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
012ec812 929 netif_tx_wake_queue(tx_ring->txq);
f27c75b3 930 tx_ring->tx_stats.xmit_on++;
c70001a9
SC
931 }
932 }
933 adapter->tx_timeo_cnt = 0;
934 }
935 /*
936 * If everything is freed up to consumer then check if the ring is full
937 * If the ring is full then check if more needs to be freed and
938 * schedule the call back again.
939 *
940 * This happens when there are 2 CPUs. One could be freeing and the
941 * other filling it. If the ring is full when we get out of here and
942 * the card has already interrupted the host then the host can miss the
943 * interrupt.
944 *
945 * There is still a possible race condition and the host could miss an
946 * interrupt. The card has to take care of this.
947 */
948 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
949 done = (sw_consumer == hw_consumer);
a02bdd42
SS
950
951 spin_unlock(&tx_ring->tx_clean_lock);
c70001a9
SC
952
953 return done;
954}
955
956static int qlcnic_poll(struct napi_struct *napi, int budget)
957{
4be41e92 958 int tx_complete, work_done;
d17dd0d9
SC
959 struct qlcnic_host_sds_ring *sds_ring;
960 struct qlcnic_adapter *adapter;
012ec812 961 struct qlcnic_host_tx_ring *tx_ring;
c70001a9 962
d17dd0d9
SC
963 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
964 adapter = sds_ring->adapter;
012ec812
HM
965 tx_ring = sds_ring->tx_ring;
966
967 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
4be41e92 968 budget);
c70001a9 969 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
c70001a9
SC
970 if ((work_done < budget) && tx_complete) {
971 napi_complete(&sds_ring->napi);
012ec812 972 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2cc5752e 973 qlcnic_enable_sds_intr(adapter, sds_ring);
012ec812
HM
974 qlcnic_enable_tx_intr(adapter, tx_ring);
975 }
976 }
977
978 return work_done;
979}
980
981static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
982{
983 struct qlcnic_host_tx_ring *tx_ring;
984 struct qlcnic_adapter *adapter;
985 int work_done;
986
987 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
988 adapter = tx_ring->adapter;
989
990 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
991 if (work_done) {
992 napi_complete(&tx_ring->napi);
993 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
994 qlcnic_enable_tx_intr(adapter, tx_ring);
c70001a9
SC
995 }
996
997 return work_done;
998}
999
1000static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
1001{
d17dd0d9
SC
1002 struct qlcnic_host_sds_ring *sds_ring;
1003 struct qlcnic_adapter *adapter;
c70001a9
SC
1004 int work_done;
1005
d17dd0d9
SC
1006 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1007 adapter = sds_ring->adapter;
1008
c70001a9
SC
1009 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1010
1011 if (work_done < budget) {
1012 napi_complete(&sds_ring->napi);
1013 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2cc5752e 1014 qlcnic_enable_sds_intr(adapter, sds_ring);
c70001a9
SC
1015 }
1016
1017 return work_done;
1018}
1019
d17dd0d9
SC
1020static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1021 struct qlcnic_fw_msg *msg)
c70001a9
SC
1022{
1023 u32 cable_OUI;
d17dd0d9
SC
1024 u16 cable_len, link_speed;
1025 u8 link_status, module, duplex, autoneg, lb_status = 0;
c70001a9
SC
1026 struct net_device *netdev = adapter->netdev;
1027
79788450 1028 adapter->ahw->has_link_events = 1;
c70001a9
SC
1029
1030 cable_OUI = msg->body[1] & 0xffffffff;
1031 cable_len = (msg->body[1] >> 32) & 0xffff;
1032 link_speed = (msg->body[1] >> 48) & 0xffff;
1033
1034 link_status = msg->body[2] & 0xff;
1035 duplex = (msg->body[2] >> 16) & 0xff;
1036 autoneg = (msg->body[2] >> 24) & 0xff;
1037 lb_status = (msg->body[2] >> 32) & 0x3;
1038
1039 module = (msg->body[2] >> 8) & 0xff;
1040 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
d17dd0d9
SC
1041 dev_info(&netdev->dev,
1042 "unsupported cable: OUI 0x%x, length %d\n",
1043 cable_OUI, cable_len);
c70001a9
SC
1044 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1045 dev_info(&netdev->dev, "unsupported cable length %d\n",
d17dd0d9 1046 cable_len);
c70001a9
SC
1047
1048 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
1049 lb_status == QLCNIC_ELB_MODE))
1050 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
1051
1052 qlcnic_advert_link_change(adapter, link_status);
1053
1054 if (duplex == LINKEVENT_FULL_DUPLEX)
79788450 1055 adapter->ahw->link_duplex = DUPLEX_FULL;
c70001a9 1056 else
79788450 1057 adapter->ahw->link_duplex = DUPLEX_HALF;
c70001a9 1058
79788450
SC
1059 adapter->ahw->module_type = module;
1060 adapter->ahw->link_autoneg = autoneg;
c70001a9
SC
1061
1062 if (link_status) {
79788450 1063 adapter->ahw->link_speed = link_speed;
c70001a9 1064 } else {
79788450
SC
1065 adapter->ahw->link_speed = SPEED_UNKNOWN;
1066 adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
c70001a9
SC
1067 }
1068}
1069
d17dd0d9
SC
1070static void qlcnic_handle_fw_message(int desc_cnt, int index,
1071 struct qlcnic_host_sds_ring *sds_ring)
c70001a9
SC
1072{
1073 struct qlcnic_fw_msg msg;
1074 struct status_desc *desc;
1075 struct qlcnic_adapter *adapter;
1076 struct device *dev;
1077 int i = 0, opcode, ret;
1078
1079 while (desc_cnt > 0 && i < 8) {
1080 desc = &sds_ring->desc_head[index];
1081 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1082 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1083
1084 index = get_next_index(index, sds_ring->num_desc);
1085 desc_cnt--;
1086 }
1087
1088 adapter = sds_ring->adapter;
1089 dev = &adapter->pdev->dev;
1090 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1091
1092 switch (opcode) {
1093 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1094 qlcnic_handle_linkevent(adapter, &msg);
1095 break;
1096 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
1097 ret = (u32)(msg.body[1]);
1098 switch (ret) {
1099 case 0:
1100 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
1101 break;
1102 case 1:
1103 dev_info(dev, "loopback already in progress\n");
b9c11984 1104 adapter->ahw->diag_cnt = -EINPROGRESS;
c70001a9
SC
1105 break;
1106 case 2:
1107 dev_info(dev, "loopback cable is not connected\n");
b9c11984 1108 adapter->ahw->diag_cnt = -ENODEV;
c70001a9
SC
1109 break;
1110 default:
d17dd0d9
SC
1111 dev_info(dev,
1112 "loopback configure request failed, err %x\n",
1113 ret);
b9c11984 1114 adapter->ahw->diag_cnt = -EIO;
c70001a9
SC
1115 break;
1116 }
1117 break;
2d8ebcab 1118 case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
1de899d3 1119 qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
2d8ebcab 1120 break;
c70001a9
SC
1121 default:
1122 break;
1123 }
1124}
1125
21041400 1126static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1127 struct qlcnic_host_rds_ring *ring,
1128 u16 index, u16 cksum)
c70001a9
SC
1129{
1130 struct qlcnic_rx_buffer *buffer;
1131 struct sk_buff *skb;
1132
4be41e92 1133 buffer = &ring->rx_buf_arr[index];
c70001a9
SC
1134 if (unlikely(buffer->skb == NULL)) {
1135 WARN_ON(1);
1136 return NULL;
1137 }
1138
4be41e92 1139 pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
d17dd0d9 1140 PCI_DMA_FROMDEVICE);
c70001a9
SC
1141
1142 skb = buffer->skb;
c70001a9 1143 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
d17dd0d9 1144 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
c70001a9
SC
1145 adapter->stats.csummed++;
1146 skb->ip_summed = CHECKSUM_UNNECESSARY;
1147 } else {
1148 skb_checksum_none_assert(skb);
1149 }
1150
4be41e92 1151
c70001a9
SC
1152 buffer->skb = NULL;
1153
1154 return skb;
1155}
1156
d17dd0d9
SC
1157static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
1158 struct sk_buff *skb, u16 *vlan_tag)
c70001a9
SC
1159{
1160 struct ethhdr *eth_hdr;
1161
1162 if (!__vlan_get_tag(skb, vlan_tag)) {
d17dd0d9 1163 eth_hdr = (struct ethhdr *)skb->data;
c70001a9
SC
1164 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1165 skb_pull(skb, VLAN_HLEN);
1166 }
91b7282b 1167 if (!adapter->rx_pvid)
c70001a9
SC
1168 return 0;
1169
91b7282b 1170 if (*vlan_tag == adapter->rx_pvid) {
c70001a9
SC
1171 /* Outer vlan tag. Packet should follow non-vlan path */
1172 *vlan_tag = 0xffff;
1173 return 0;
1174 }
1175 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1176 return 0;
1177
1178 return -EINVAL;
1179}
1180
1181static struct qlcnic_rx_buffer *
1182qlcnic_process_rcv(struct qlcnic_adapter *adapter,
d17dd0d9
SC
1183 struct qlcnic_host_sds_ring *sds_ring, int ring,
1184 u64 sts_data0)
c70001a9
SC
1185{
1186 struct net_device *netdev = adapter->netdev;
1187 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1188 struct qlcnic_rx_buffer *buffer;
1189 struct sk_buff *skb;
1190 struct qlcnic_host_rds_ring *rds_ring;
53643a75
SS
1191 int index, length, cksum, pkt_offset, is_lb_pkt;
1192 u16 vid = 0xffff, t_vid;
c70001a9
SC
1193
1194 if (unlikely(ring >= adapter->max_rds_rings))
1195 return NULL;
1196
1197 rds_ring = &recv_ctx->rds_rings[ring];
1198
1199 index = qlcnic_get_sts_refhandle(sts_data0);
1200 if (unlikely(index >= rds_ring->num_desc))
1201 return NULL;
1202
1203 buffer = &rds_ring->rx_buf_arr[index];
c70001a9
SC
1204 length = qlcnic_get_sts_totallength(sts_data0);
1205 cksum = qlcnic_get_sts_status(sts_data0);
1206 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1207
1208 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1209 if (!skb)
1210 return buffer;
1211
ddfbac07 1212 if (adapter->rx_mac_learn) {
53643a75
SS
1213 t_vid = 0;
1214 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
f80bc8fe 1215 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
53643a75
SS
1216 }
1217
c70001a9
SC
1218 if (length > rds_ring->skb_size)
1219 skb_put(skb, rds_ring->skb_size);
1220 else
1221 skb_put(skb, length);
1222
1223 if (pkt_offset)
1224 skb_pull(skb, pkt_offset);
1225
1226 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1227 adapter->stats.rxdropped++;
1228 dev_kfree_skb(skb);
1229 return buffer;
1230 }
1231
1232 skb->protocol = eth_type_trans(skb, netdev);
1233
1234 if (vid != 0xffff)
86a9bad3 1235 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
c70001a9
SC
1236
1237 napi_gro_receive(&sds_ring->napi, skb);
1238
1239 adapter->stats.rx_pkts++;
1240 adapter->stats.rxbytes += length;
1241
1242 return buffer;
1243}
1244
1245#define QLC_TCP_HDR_SIZE 20
1246#define QLC_TCP_TS_OPTION_SIZE 12
1247#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1248
1249static struct qlcnic_rx_buffer *
1250qlcnic_process_lro(struct qlcnic_adapter *adapter,
d17dd0d9 1251 int ring, u64 sts_data0, u64 sts_data1)
c70001a9
SC
1252{
1253 struct net_device *netdev = adapter->netdev;
1254 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1255 struct qlcnic_rx_buffer *buffer;
1256 struct sk_buff *skb;
1257 struct qlcnic_host_rds_ring *rds_ring;
1258 struct iphdr *iph;
776e7bde 1259 struct ipv6hdr *ipv6h;
c70001a9
SC
1260 struct tcphdr *th;
1261 bool push, timestamp;
53643a75
SS
1262 int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
1263 u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
c70001a9 1264 u32 seq_number;
c70001a9 1265
462bed48 1266 if (unlikely(ring >= adapter->max_rds_rings))
c70001a9
SC
1267 return NULL;
1268
1269 rds_ring = &recv_ctx->rds_rings[ring];
1270
1271 index = qlcnic_get_lro_sts_refhandle(sts_data0);
462bed48 1272 if (unlikely(index >= rds_ring->num_desc))
c70001a9
SC
1273 return NULL;
1274
1275 buffer = &rds_ring->rx_buf_arr[index];
1276
1277 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1278 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1279 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1280 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1281 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1282 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1283
1284 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1285 if (!skb)
1286 return buffer;
1287
ddfbac07 1288 if (adapter->rx_mac_learn) {
53643a75
SS
1289 t_vid = 0;
1290 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
f80bc8fe 1291 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
53643a75
SS
1292 }
1293
c70001a9
SC
1294 if (timestamp)
1295 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1296 else
1297 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1298
1299 skb_put(skb, lro_length + data_offset);
c70001a9
SC
1300 skb_pull(skb, l2_hdr_offset);
1301
1302 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1303 adapter->stats.rxdropped++;
1304 dev_kfree_skb(skb);
1305 return buffer;
1306 }
1307
1308 skb->protocol = eth_type_trans(skb, netdev);
776e7bde 1309
069048f1 1310 if (ntohs(skb->protocol) == ETH_P_IPV6) {
776e7bde
SS
1311 ipv6h = (struct ipv6hdr *)skb->data;
1312 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1313 length = (th->doff << 2) + lro_length;
1314 ipv6h->payload_len = htons(length);
1315 } else {
1316 iph = (struct iphdr *)skb->data;
1317 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1318 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1bcac3b0 1319 csum_replace2(&iph->check, iph->tot_len, htons(length));
776e7bde 1320 iph->tot_len = htons(length);
776e7bde
SS
1321 }
1322
c70001a9
SC
1323 th->psh = push;
1324 th->seq = htonl(seq_number);
c70001a9
SC
1325 length = skb->len;
1326
bd69ba79 1327 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
c70001a9 1328 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
bd69ba79
MT
1329 if (skb->protocol == htons(ETH_P_IPV6))
1330 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1331 else
1332 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1333 }
c70001a9
SC
1334
1335 if (vid != 0xffff)
86a9bad3 1336 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
c70001a9
SC
1337 netif_receive_skb(skb);
1338
1339 adapter->stats.lro_pkts++;
1340 adapter->stats.lrobytes += length;
1341
1342 return buffer;
1343}
1344
21041400 1345static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
c70001a9 1346{
d17dd0d9 1347 struct qlcnic_host_rds_ring *rds_ring;
c70001a9
SC
1348 struct qlcnic_adapter *adapter = sds_ring->adapter;
1349 struct list_head *cur;
1350 struct status_desc *desc;
1351 struct qlcnic_rx_buffer *rxbuf;
4be41e92 1352 int opcode, desc_cnt, count = 0;
c70001a9 1353 u64 sts_data0, sts_data1;
4be41e92 1354 u8 ring;
c70001a9
SC
1355 u32 consumer = sds_ring->consumer;
1356
1357 while (count < max) {
1358 desc = &sds_ring->desc_head[consumer];
1359 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1360
1361 if (!(sts_data0 & STATUS_OWNER_HOST))
1362 break;
1363
1364 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1365 opcode = qlcnic_get_sts_opcode(sts_data0);
c70001a9
SC
1366 switch (opcode) {
1367 case QLCNIC_RXPKT_DESC:
1368 case QLCNIC_OLD_RXPKT_DESC:
1369 case QLCNIC_SYN_OFFLOAD:
1370 ring = qlcnic_get_sts_type(sts_data0);
d17dd0d9
SC
1371 rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1372 sts_data0);
c70001a9
SC
1373 break;
1374 case QLCNIC_LRO_DESC:
1375 ring = qlcnic_get_lro_sts_type(sts_data0);
1376 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1377 rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1378 sts_data1);
1379 break;
1380 case QLCNIC_RESPONSE_DESC:
1381 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1382 default:
1383 goto skip;
1384 }
c70001a9
SC
1385 WARN_ON(desc_cnt > 1);
1386
1387 if (likely(rxbuf))
1388 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1389 else
1390 adapter->stats.null_rxbuf++;
c70001a9
SC
1391skip:
1392 for (; desc_cnt > 0; desc_cnt--) {
1393 desc = &sds_ring->desc_head[consumer];
4be41e92 1394 desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
c70001a9
SC
1395 consumer = get_next_index(consumer, sds_ring->num_desc);
1396 }
1397 count++;
1398 }
1399
1400 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
d17dd0d9 1401 rds_ring = &adapter->recv_ctx->rds_rings[ring];
c70001a9
SC
1402 if (!list_empty(&sds_ring->free_list[ring])) {
1403 list_for_each(cur, &sds_ring->free_list[ring]) {
d17dd0d9
SC
1404 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1405 list);
c70001a9
SC
1406 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1407 }
1408 spin_lock(&rds_ring->lock);
1409 list_splice_tail_init(&sds_ring->free_list[ring],
d17dd0d9 1410 &rds_ring->free_list);
c70001a9
SC
1411 spin_unlock(&rds_ring->lock);
1412 }
1413
4be41e92 1414 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
c70001a9
SC
1415 }
1416
1417 if (count) {
1418 sds_ring->consumer = consumer;
1419 writel(consumer, sds_ring->crb_sts_consumer);
1420 }
1421
1422 return count;
1423}
1424
d17dd0d9 1425void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
4be41e92 1426 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
c70001a9
SC
1427{
1428 struct rcv_desc *pdesc;
1429 struct qlcnic_rx_buffer *buffer;
1430 int count = 0;
4be41e92 1431 u32 producer, handle;
c70001a9
SC
1432 struct list_head *head;
1433
1434 producer = rds_ring->producer;
c70001a9 1435 head = &rds_ring->free_list;
d17dd0d9 1436
c70001a9
SC
1437 while (!list_empty(head)) {
1438
1439 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1440
1441 if (!buffer->skb) {
1442 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1443 break;
1444 }
1445
1446 count++;
1447 list_del(&buffer->list);
1448
1449 /* make a rcv descriptor */
1450 pdesc = &rds_ring->desc_head[producer];
1451 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
4be41e92
SC
1452 handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1453 ring_id);
1454 pdesc->reference_handle = cpu_to_le16(handle);
c70001a9 1455 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
c70001a9
SC
1456 producer = get_next_index(producer, rds_ring->num_desc);
1457 }
1458
1459 if (count) {
1460 rds_ring->producer = producer;
1461 writel((producer-1) & (rds_ring->num_desc-1),
d17dd0d9 1462 rds_ring->crb_rcv_producer);
c70001a9
SC
1463 }
1464}
1465
1466static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1467{
1468 int i;
1469 unsigned char *data = skb->data;
1470
d17dd0d9 1471 pr_info(KERN_INFO "\n");
c70001a9
SC
1472 for (i = 0; i < skb->len; i++) {
1473 QLCDB(adapter, DRV, "%02x ", data[i]);
1474 if ((i & 0x0f) == 8)
d17dd0d9 1475 pr_info(KERN_INFO "\n");
c70001a9
SC
1476 }
1477}
1478
1479static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1480 u64 sts_data0)
1481{
1482 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1483 struct sk_buff *skb;
1484 struct qlcnic_host_rds_ring *rds_ring;
1485 int index, length, cksum, pkt_offset;
1486
1487 if (unlikely(ring >= adapter->max_rds_rings))
1488 return;
1489
1490 rds_ring = &recv_ctx->rds_rings[ring];
1491
1492 index = qlcnic_get_sts_refhandle(sts_data0);
1493 length = qlcnic_get_sts_totallength(sts_data0);
1494 if (unlikely(index >= rds_ring->num_desc))
1495 return;
1496
1497 cksum = qlcnic_get_sts_status(sts_data0);
1498 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1499
1500 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1501 if (!skb)
1502 return;
1503
1504 if (length > rds_ring->skb_size)
1505 skb_put(skb, rds_ring->skb_size);
1506 else
1507 skb_put(skb, length);
1508
1509 if (pkt_offset)
1510 skb_pull(skb, pkt_offset);
1511
1512 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
79788450 1513 adapter->ahw->diag_cnt++;
c70001a9
SC
1514 else
1515 dump_skb(skb, adapter);
1516
1517 dev_kfree_skb_any(skb);
1518 adapter->stats.rx_pkts++;
1519 adapter->stats.rxbytes += length;
1520
1521 return;
1522}
1523
7e2cf4fe 1524void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
c70001a9
SC
1525{
1526 struct qlcnic_adapter *adapter = sds_ring->adapter;
1527 struct status_desc *desc;
1528 u64 sts_data0;
1529 int ring, opcode, desc_cnt;
1530
1531 u32 consumer = sds_ring->consumer;
1532
1533 desc = &sds_ring->desc_head[consumer];
1534 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1535
1536 if (!(sts_data0 & STATUS_OWNER_HOST))
1537 return;
1538
1539 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1540 opcode = qlcnic_get_sts_opcode(sts_data0);
1541 switch (opcode) {
1542 case QLCNIC_RESPONSE_DESC:
1543 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1544 break;
1545 default:
1546 ring = qlcnic_get_sts_type(sts_data0);
1547 qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1548 break;
1549 }
1550
1551 for (; desc_cnt > 0; desc_cnt--) {
1552 desc = &sds_ring->desc_head[consumer];
1553 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1554 consumer = get_next_index(consumer, sds_ring->num_desc);
1555 }
1556
1557 sds_ring->consumer = consumer;
1558 writel(consumer, sds_ring->crb_sts_consumer);
1559}
1560
7e2cf4fe
SC
1561int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1562 struct net_device *netdev)
c70001a9 1563{
df95fc44 1564 int ring;
c70001a9
SC
1565 struct qlcnic_host_sds_ring *sds_ring;
1566 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
012ec812 1567 struct qlcnic_host_tx_ring *tx_ring;
c70001a9 1568
34e8c406 1569 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
c70001a9
SC
1570 return -ENOMEM;
1571
34e8c406 1572 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
c70001a9 1573 sds_ring = &recv_ctx->sds_rings[ring];
012ec812 1574 if (qlcnic_check_multi_tx(adapter) &&
80c0e4f3 1575 !adapter->ahw->diag_test) {
d17dd0d9 1576 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
df95fc44 1577 NAPI_POLL_WEIGHT);
012ec812 1578 } else {
34e8c406 1579 if (ring == (adapter->drv_sds_rings - 1))
012ec812
HM
1580 netif_napi_add(netdev, &sds_ring->napi,
1581 qlcnic_poll,
df95fc44 1582 NAPI_POLL_WEIGHT);
012ec812
HM
1583 else
1584 netif_napi_add(netdev, &sds_ring->napi,
1585 qlcnic_rx_poll,
df95fc44 1586 NAPI_POLL_WEIGHT);
012ec812 1587 }
c70001a9
SC
1588 }
1589
4be41e92
SC
1590 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1591 qlcnic_free_sds_rings(recv_ctx);
1592 return -ENOMEM;
1593 }
1594
c2c5e3a0 1595 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
34e8c406 1596 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
012ec812
HM
1597 tx_ring = &adapter->tx_ring[ring];
1598 netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
df95fc44 1599 NAPI_POLL_WEIGHT);
012ec812
HM
1600 }
1601 }
1602
c70001a9
SC
1603 return 0;
1604}
1605
4be41e92 1606void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
c70001a9
SC
1607{
1608 int ring;
1609 struct qlcnic_host_sds_ring *sds_ring;
1610 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
012ec812 1611 struct qlcnic_host_tx_ring *tx_ring;
c70001a9 1612
34e8c406 1613 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
c70001a9
SC
1614 sds_ring = &recv_ctx->sds_rings[ring];
1615 netif_napi_del(&sds_ring->napi);
1616 }
1617
1618 qlcnic_free_sds_rings(adapter->recv_ctx);
012ec812 1619
c2c5e3a0 1620 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
34e8c406 1621 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
012ec812
HM
1622 tx_ring = &adapter->tx_ring[ring];
1623 netif_napi_del(&tx_ring->napi);
1624 }
1625 }
1626
4be41e92 1627 qlcnic_free_tx_rings(adapter);
c70001a9
SC
1628}
1629
7e2cf4fe 1630void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
c70001a9
SC
1631{
1632 int ring;
1633 struct qlcnic_host_sds_ring *sds_ring;
012ec812 1634 struct qlcnic_host_tx_ring *tx_ring;
c70001a9
SC
1635 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1636
1637 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1638 return;
1639
34e8c406 1640 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
c70001a9
SC
1641 sds_ring = &recv_ctx->sds_rings[ring];
1642 napi_enable(&sds_ring->napi);
2cc5752e 1643 qlcnic_enable_sds_intr(adapter, sds_ring);
c70001a9 1644 }
012ec812
HM
1645
1646 if (qlcnic_check_multi_tx(adapter) &&
1647 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
80c0e4f3 1648 !adapter->ahw->diag_test) {
34e8c406 1649 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
012ec812
HM
1650 tx_ring = &adapter->tx_ring[ring];
1651 napi_enable(&tx_ring->napi);
1652 qlcnic_enable_tx_intr(adapter, tx_ring);
1653 }
1654 }
c70001a9
SC
1655}
1656
7e2cf4fe 1657void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
c70001a9
SC
1658{
1659 int ring;
1660 struct qlcnic_host_sds_ring *sds_ring;
012ec812 1661 struct qlcnic_host_tx_ring *tx_ring;
c70001a9
SC
1662 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1663
1664 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1665 return;
1666
34e8c406 1667 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
c70001a9 1668 sds_ring = &recv_ctx->sds_rings[ring];
2cc5752e 1669 qlcnic_disable_sds_intr(adapter, sds_ring);
c70001a9
SC
1670 napi_synchronize(&sds_ring->napi);
1671 napi_disable(&sds_ring->napi);
1672 }
012ec812
HM
1673
1674 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
c2c5e3a0 1675 !adapter->ahw->diag_test &&
012ec812 1676 qlcnic_check_multi_tx(adapter)) {
34e8c406 1677 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
012ec812 1678 tx_ring = &adapter->tx_ring[ring];
2cc5752e 1679 qlcnic_disable_tx_intr(adapter, tx_ring);
012ec812
HM
1680 napi_synchronize(&tx_ring->napi);
1681 napi_disable(&tx_ring->napi);
1682 }
1683 }
c70001a9 1684}
4be41e92 1685
53643a75
SS
1686#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1687#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1688
1689static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1690{
1691 if (lro_pkt)
1692 return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
1693 else
1694 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1695}
1696
2b3d7b75
SS
1697#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
1698
1699static inline u8 qlcnic_encap_length(u64 sts_data)
1700{
1701 return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
1702}
1703
4be41e92
SC
1704static struct qlcnic_rx_buffer *
1705qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1706 struct qlcnic_host_sds_ring *sds_ring,
1707 u8 ring, u64 sts_data[])
1708{
1709 struct net_device *netdev = adapter->netdev;
1710 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1711 struct qlcnic_rx_buffer *buffer;
1712 struct sk_buff *skb;
1713 struct qlcnic_host_rds_ring *rds_ring;
53643a75 1714 int index, length, cksum, is_lb_pkt;
b3f7de83
SC
1715 u16 vid = 0xffff;
1716 int err;
4be41e92
SC
1717
1718 if (unlikely(ring >= adapter->max_rds_rings))
1719 return NULL;
1720
1721 rds_ring = &recv_ctx->rds_rings[ring];
1722
1723 index = qlcnic_83xx_hndl(sts_data[0]);
1724 if (unlikely(index >= rds_ring->num_desc))
1725 return NULL;
1726
1727 buffer = &rds_ring->rx_buf_arr[index];
1728 length = qlcnic_83xx_pktln(sts_data[0]);
1729 cksum = qlcnic_83xx_csum_status(sts_data[1]);
1730 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1731 if (!skb)
1732 return buffer;
1733
1734 if (length > rds_ring->skb_size)
1735 skb_put(skb, rds_ring->skb_size);
1736 else
1737 skb_put(skb, length);
1738
b3f7de83
SC
1739 err = qlcnic_check_rx_tagging(adapter, skb, &vid);
1740
1741 if (adapter->rx_mac_learn) {
1742 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
1743 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
1744 }
1745
1746 if (unlikely(err)) {
4be41e92
SC
1747 adapter->stats.rxdropped++;
1748 dev_kfree_skb(skb);
1749 return buffer;
1750 }
1751
1752 skb->protocol = eth_type_trans(skb, netdev);
1753
2b3d7b75
SS
1754 if (qlcnic_encap_length(sts_data[1]) &&
1755 skb->ip_summed == CHECKSUM_UNNECESSARY) {
1756 skb->encapsulation = 1;
1757 adapter->stats.encap_rx_csummed++;
1758 }
1759
4be41e92 1760 if (vid != 0xffff)
86a9bad3 1761 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4be41e92
SC
1762
1763 napi_gro_receive(&sds_ring->napi, skb);
1764
1765 adapter->stats.rx_pkts++;
1766 adapter->stats.rxbytes += length;
1767
1768 return buffer;
1769}
1770
1771static struct qlcnic_rx_buffer *
1772qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1773 u8 ring, u64 sts_data[])
1774{
1775 struct net_device *netdev = adapter->netdev;
1776 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1777 struct qlcnic_rx_buffer *buffer;
1778 struct sk_buff *skb;
1779 struct qlcnic_host_rds_ring *rds_ring;
1780 struct iphdr *iph;
1781 struct ipv6hdr *ipv6h;
1782 struct tcphdr *th;
1783 bool push;
1784 int l2_hdr_offset, l4_hdr_offset;
53643a75 1785 int index, is_lb_pkt;
99e85879 1786 u16 lro_length, length, data_offset, gso_size;
b3f7de83
SC
1787 u16 vid = 0xffff;
1788 int err;
4be41e92 1789
462bed48 1790 if (unlikely(ring >= adapter->max_rds_rings))
4be41e92
SC
1791 return NULL;
1792
1793 rds_ring = &recv_ctx->rds_rings[ring];
1794
1795 index = qlcnic_83xx_hndl(sts_data[0]);
462bed48 1796 if (unlikely(index >= rds_ring->num_desc))
4be41e92
SC
1797 return NULL;
1798
1799 buffer = &rds_ring->rx_buf_arr[index];
1800
1801 lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1802 l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1803 l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1804 push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1805
1806 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1807 if (!skb)
1808 return buffer;
53643a75 1809
4be41e92
SC
1810 if (qlcnic_83xx_is_tstamp(sts_data[1]))
1811 data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1812 else
1813 data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1814
1815 skb_put(skb, lro_length + data_offset);
1816 skb_pull(skb, l2_hdr_offset);
1817
b3f7de83
SC
1818 err = qlcnic_check_rx_tagging(adapter, skb, &vid);
1819
1820 if (adapter->rx_mac_learn) {
1821 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
1822 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
1823 }
1824
1825 if (unlikely(err)) {
4be41e92
SC
1826 adapter->stats.rxdropped++;
1827 dev_kfree_skb(skb);
1828 return buffer;
1829 }
1830
1831 skb->protocol = eth_type_trans(skb, netdev);
1832 if (ntohs(skb->protocol) == ETH_P_IPV6) {
1833 ipv6h = (struct ipv6hdr *)skb->data;
1834 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1835
1836 length = (th->doff << 2) + lro_length;
1837 ipv6h->payload_len = htons(length);
1838 } else {
1839 iph = (struct iphdr *)skb->data;
1840 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1841 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1bcac3b0 1842 csum_replace2(&iph->check, iph->tot_len, htons(length));
4be41e92 1843 iph->tot_len = htons(length);
4be41e92
SC
1844 }
1845
1846 th->psh = push;
1847 length = skb->len;
1848
99e85879
SS
1849 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1850 gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1851 skb_shinfo(skb)->gso_size = gso_size;
1852 if (skb->protocol == htons(ETH_P_IPV6))
1853 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1854 else
1855 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1856 }
1857
4be41e92 1858 if (vid != 0xffff)
86a9bad3 1859 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4be41e92
SC
1860
1861 netif_receive_skb(skb);
1862
1863 adapter->stats.lro_pkts++;
1864 adapter->stats.lrobytes += length;
1865 return buffer;
1866}
1867
1868static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1869 int max)
1870{
1871 struct qlcnic_host_rds_ring *rds_ring;
1872 struct qlcnic_adapter *adapter = sds_ring->adapter;
1873 struct list_head *cur;
1874 struct status_desc *desc;
1875 struct qlcnic_rx_buffer *rxbuf = NULL;
1876 u8 ring;
1877 u64 sts_data[2];
1878 int count = 0, opcode;
1879 u32 consumer = sds_ring->consumer;
1880
1881 while (count < max) {
1882 desc = &sds_ring->desc_head[consumer];
1883 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1884 opcode = qlcnic_83xx_opcode(sts_data[1]);
1885 if (!opcode)
1886 break;
1887 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1888 ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1889
1890 switch (opcode) {
1891 case QLC_83XX_REG_DESC:
1892 rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1893 ring, sts_data);
1894 break;
1895 case QLC_83XX_LRO_DESC:
1896 rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1897 sts_data);
1898 break;
1899 default:
1900 dev_info(&adapter->pdev->dev,
0b1587b1 1901 "Unknown opcode: 0x%x\n", opcode);
4be41e92
SC
1902 goto skip;
1903 }
1904
1905 if (likely(rxbuf))
1906 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1907 else
1908 adapter->stats.null_rxbuf++;
1909skip:
1910 desc = &sds_ring->desc_head[consumer];
1911 /* Reset the descriptor */
1912 desc->status_desc_data[1] = 0;
1913 consumer = get_next_index(consumer, sds_ring->num_desc);
1914 count++;
1915 }
1916 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1917 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1918 if (!list_empty(&sds_ring->free_list[ring])) {
1919 list_for_each(cur, &sds_ring->free_list[ring]) {
1920 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1921 list);
1922 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1923 }
1924 spin_lock(&rds_ring->lock);
1925 list_splice_tail_init(&sds_ring->free_list[ring],
1926 &rds_ring->free_list);
1927 spin_unlock(&rds_ring->lock);
1928 }
1929 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1930 }
1931 if (count) {
1932 sds_ring->consumer = consumer;
1933 writel(consumer, sds_ring->crb_sts_consumer);
1934 }
1935 return count;
1936}
1937
da6c8063
RB
1938static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1939{
1940 int tx_complete;
1941 int work_done;
1942 struct qlcnic_host_sds_ring *sds_ring;
1943 struct qlcnic_adapter *adapter;
1944 struct qlcnic_host_tx_ring *tx_ring;
1945
1946 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1947 adapter = sds_ring->adapter;
1948 /* tx ring count = 1 */
1949 tx_ring = adapter->tx_ring;
1950
1951 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1952 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1953 if ((work_done < budget) && tx_complete) {
1954 napi_complete(&sds_ring->napi);
2cc5752e 1955 qlcnic_enable_sds_intr(adapter, sds_ring);
da6c8063
RB
1956 }
1957
1958 return work_done;
1959}
1960
4be41e92
SC
1961static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1962{
1963 int tx_complete;
1964 int work_done;
1965 struct qlcnic_host_sds_ring *sds_ring;
1966 struct qlcnic_adapter *adapter;
1967 struct qlcnic_host_tx_ring *tx_ring;
1968
1969 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1970 adapter = sds_ring->adapter;
1971 /* tx ring count = 1 */
1972 tx_ring = adapter->tx_ring;
1973
4be41e92
SC
1974 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1975 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1976 if ((work_done < budget) && tx_complete) {
1977 napi_complete(&sds_ring->napi);
2cc5752e 1978 qlcnic_enable_sds_intr(adapter, sds_ring);
4be41e92
SC
1979 }
1980
1981 return work_done;
1982}
1983
1984static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1985{
1986 int work_done;
1987 struct qlcnic_host_tx_ring *tx_ring;
1988 struct qlcnic_adapter *adapter;
1989
1990 budget = QLCNIC_TX_POLL_BUDGET;
1991 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
1992 adapter = tx_ring->adapter;
1993 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1994 if (work_done) {
1995 napi_complete(&tx_ring->napi);
1996 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
2cc5752e 1997 qlcnic_enable_tx_intr(adapter, tx_ring);
4be41e92
SC
1998 }
1999
2000 return work_done;
2001}
2002
2003static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
2004{
2005 int work_done;
2006 struct qlcnic_host_sds_ring *sds_ring;
2007 struct qlcnic_adapter *adapter;
2008
2009 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
2010 adapter = sds_ring->adapter;
2011 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
2012 if (work_done < budget) {
2013 napi_complete(&sds_ring->napi);
2014 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2cc5752e 2015 qlcnic_enable_sds_intr(adapter, sds_ring);
4be41e92
SC
2016 }
2017
2018 return work_done;
2019}
2020
2021void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
2022{
2023 int ring;
2024 struct qlcnic_host_sds_ring *sds_ring;
2025 struct qlcnic_host_tx_ring *tx_ring;
2026 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2027
2028 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2029 return;
2030
34e8c406 2031 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
4be41e92
SC
2032 sds_ring = &recv_ctx->sds_rings[ring];
2033 napi_enable(&sds_ring->napi);
ac166700 2034 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2cc5752e 2035 qlcnic_enable_sds_intr(adapter, sds_ring);
4be41e92
SC
2036 }
2037
da6c8063
RB
2038 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2039 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
34e8c406 2040 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
4be41e92
SC
2041 tx_ring = &adapter->tx_ring[ring];
2042 napi_enable(&tx_ring->napi);
2cc5752e 2043 qlcnic_enable_tx_intr(adapter, tx_ring);
4be41e92
SC
2044 }
2045 }
2046}
2047
2048void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
2049{
2050 int ring;
2051 struct qlcnic_host_sds_ring *sds_ring;
2052 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2053 struct qlcnic_host_tx_ring *tx_ring;
2054
2055 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2056 return;
2057
34e8c406 2058 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
4be41e92 2059 sds_ring = &recv_ctx->sds_rings[ring];
ac166700 2060 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2cc5752e 2061 qlcnic_disable_sds_intr(adapter, sds_ring);
4be41e92
SC
2062 napi_synchronize(&sds_ring->napi);
2063 napi_disable(&sds_ring->napi);
2064 }
2065
da6c8063
RB
2066 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2067 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
34e8c406 2068 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
4be41e92 2069 tx_ring = &adapter->tx_ring[ring];
2cc5752e 2070 qlcnic_disable_tx_intr(adapter, tx_ring);
4be41e92
SC
2071 napi_synchronize(&tx_ring->napi);
2072 napi_disable(&tx_ring->napi);
2073 }
2074 }
2075}
2076
2077int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
2078 struct net_device *netdev)
2079{
df95fc44 2080 int ring;
4be41e92
SC
2081 struct qlcnic_host_sds_ring *sds_ring;
2082 struct qlcnic_host_tx_ring *tx_ring;
2083 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2084
34e8c406 2085 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
4be41e92
SC
2086 return -ENOMEM;
2087
34e8c406 2088 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
4be41e92 2089 sds_ring = &recv_ctx->sds_rings[ring];
da6c8063 2090 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
df95fc44 2091 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
da6c8063
RB
2092 netif_napi_add(netdev, &sds_ring->napi,
2093 qlcnic_83xx_rx_poll,
df95fc44
MS
2094 NAPI_POLL_WEIGHT);
2095 else
da6c8063
RB
2096 netif_napi_add(netdev, &sds_ring->napi,
2097 qlcnic_83xx_msix_sriov_vf_poll,
df95fc44 2098 NAPI_POLL_WEIGHT);
da6c8063
RB
2099
2100 } else {
4be41e92
SC
2101 netif_napi_add(netdev, &sds_ring->napi,
2102 qlcnic_83xx_poll,
df95fc44 2103 NAPI_POLL_WEIGHT);
da6c8063 2104 }
4be41e92
SC
2105 }
2106
2107 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
2108 qlcnic_free_sds_rings(recv_ctx);
2109 return -ENOMEM;
2110 }
2111
da6c8063
RB
2112 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2113 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
34e8c406 2114 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
4be41e92
SC
2115 tx_ring = &adapter->tx_ring[ring];
2116 netif_napi_add(netdev, &tx_ring->napi,
2117 qlcnic_83xx_msix_tx_poll,
df95fc44 2118 NAPI_POLL_WEIGHT);
4be41e92
SC
2119 }
2120 }
2121
2122 return 0;
2123}
2124
2125void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
2126{
2127 int ring;
2128 struct qlcnic_host_sds_ring *sds_ring;
2129 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2130 struct qlcnic_host_tx_ring *tx_ring;
2131
34e8c406 2132 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
4be41e92
SC
2133 sds_ring = &recv_ctx->sds_rings[ring];
2134 netif_napi_del(&sds_ring->napi);
2135 }
2136
2137 qlcnic_free_sds_rings(adapter->recv_ctx);
2138
da6c8063
RB
2139 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2140 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
34e8c406 2141 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
4be41e92
SC
2142 tx_ring = &adapter->tx_ring[ring];
2143 netif_napi_del(&tx_ring->napi);
2144 }
2145 }
2146
2147 qlcnic_free_tx_rings(adapter);
2148}
2149
21041400 2150static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
2151 int ring, u64 sts_data[])
4be41e92
SC
2152{
2153 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2154 struct sk_buff *skb;
2155 struct qlcnic_host_rds_ring *rds_ring;
2156 int index, length;
2157
2158 if (unlikely(ring >= adapter->max_rds_rings))
2159 return;
2160
2161 rds_ring = &recv_ctx->rds_rings[ring];
2162 index = qlcnic_83xx_hndl(sts_data[0]);
2163 if (unlikely(index >= rds_ring->num_desc))
2164 return;
2165
2166 length = qlcnic_83xx_pktln(sts_data[0]);
2167
2168 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
2169 if (!skb)
2170 return;
2171
2172 if (length > rds_ring->skb_size)
2173 skb_put(skb, rds_ring->skb_size);
2174 else
2175 skb_put(skb, length);
2176
2177 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
2178 adapter->ahw->diag_cnt++;
2179 else
2180 dump_skb(skb, adapter);
2181
2182 dev_kfree_skb_any(skb);
2183 return;
2184}
2185
2186void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
2187{
2188 struct qlcnic_adapter *adapter = sds_ring->adapter;
2189 struct status_desc *desc;
2190 u64 sts_data[2];
2191 int ring, opcode;
2192 u32 consumer = sds_ring->consumer;
2193
2194 desc = &sds_ring->desc_head[consumer];
2195 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
2196 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
2197 opcode = qlcnic_83xx_opcode(sts_data[1]);
2198 if (!opcode)
2199 return;
2200
2201 ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
2202 qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
2203 desc = &sds_ring->desc_head[consumer];
2204 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
2205 consumer = get_next_index(consumer, sds_ring->num_desc);
2206 sds_ring->consumer = consumer;
2207 writel(consumer, sds_ring->crb_sts_consumer);
2208}
This page took 0.594028 seconds and 5 git commands to generate.