1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
35 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
43 #include "bnx2x/bnx2x_reg.h"
44 #include "bnx2x/bnx2x_fw_defs.h"
45 #include "bnx2x/bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version
[] __devinitdata
=
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION
);
62 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
63 static LIST_HEAD(cnic_dev_list
);
64 static LIST_HEAD(cnic_udev_list
);
65 static DEFINE_RWLOCK(cnic_dev_lock
);
66 static DEFINE_MUTEX(cnic_lock
);
68 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
70 static int cnic_service_bnx2(void *, void *);
71 static int cnic_service_bnx2x(void *, void *);
72 static int cnic_ctl(void *, struct cnic_ctl_info
*);
74 static struct cnic_ops cnic_bnx2_ops
= {
75 .cnic_owner
= THIS_MODULE
,
76 .cnic_handler
= cnic_service_bnx2
,
80 static struct cnic_ops cnic_bnx2x_ops
= {
81 .cnic_owner
= THIS_MODULE
,
82 .cnic_handler
= cnic_service_bnx2x
,
86 static struct workqueue_struct
*cnic_wq
;
88 static void cnic_shutdown_rings(struct cnic_dev
*);
89 static void cnic_init_rings(struct cnic_dev
*);
90 static int cnic_cm_set_pg(struct cnic_sock
*);
92 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
94 struct cnic_uio_dev
*udev
= uinfo
->priv
;
97 if (!capable(CAP_NET_ADMIN
))
100 if (udev
->uio_dev
!= -1)
106 if (!dev
|| !test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
111 udev
->uio_dev
= iminor(inode
);
113 cnic_shutdown_rings(dev
);
114 cnic_init_rings(dev
);
120 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
122 struct cnic_uio_dev
*udev
= uinfo
->priv
;
128 static inline void cnic_hold(struct cnic_dev
*dev
)
130 atomic_inc(&dev
->ref_count
);
133 static inline void cnic_put(struct cnic_dev
*dev
)
135 atomic_dec(&dev
->ref_count
);
138 static inline void csk_hold(struct cnic_sock
*csk
)
140 atomic_inc(&csk
->ref_count
);
143 static inline void csk_put(struct cnic_sock
*csk
)
145 atomic_dec(&csk
->ref_count
);
148 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
150 struct cnic_dev
*cdev
;
152 read_lock(&cnic_dev_lock
);
153 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
154 if (netdev
== cdev
->netdev
) {
156 read_unlock(&cnic_dev_lock
);
160 read_unlock(&cnic_dev_lock
);
164 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
166 atomic_inc(&ulp_ops
->ref_count
);
169 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
171 atomic_dec(&ulp_ops
->ref_count
);
174 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
176 struct cnic_local
*cp
= dev
->cnic_priv
;
177 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
178 struct drv_ctl_info info
;
179 struct drv_ctl_io
*io
= &info
.data
.io
;
181 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
182 io
->cid_addr
= cid_addr
;
185 ethdev
->drv_ctl(dev
->netdev
, &info
);
188 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
190 struct cnic_local
*cp
= dev
->cnic_priv
;
191 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
192 struct drv_ctl_info info
;
193 struct drv_ctl_io
*io
= &info
.data
.io
;
195 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
198 ethdev
->drv_ctl(dev
->netdev
, &info
);
201 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
203 struct cnic_local
*cp
= dev
->cnic_priv
;
204 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
205 struct drv_ctl_info info
;
206 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
209 info
.cmd
= DRV_CTL_START_L2_CMD
;
211 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
214 ring
->client_id
= cl_id
;
215 ethdev
->drv_ctl(dev
->netdev
, &info
);
218 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
220 struct cnic_local
*cp
= dev
->cnic_priv
;
221 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
222 struct drv_ctl_info info
;
223 struct drv_ctl_io
*io
= &info
.data
.io
;
225 info
.cmd
= DRV_CTL_IO_WR_CMD
;
228 ethdev
->drv_ctl(dev
->netdev
, &info
);
231 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
233 struct cnic_local
*cp
= dev
->cnic_priv
;
234 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
235 struct drv_ctl_info info
;
236 struct drv_ctl_io
*io
= &info
.data
.io
;
238 info
.cmd
= DRV_CTL_IO_RD_CMD
;
240 ethdev
->drv_ctl(dev
->netdev
, &info
);
244 static int cnic_in_use(struct cnic_sock
*csk
)
246 return test_bit(SK_F_INUSE
, &csk
->flags
);
249 static void cnic_spq_completion(struct cnic_dev
*dev
, int cmd
, u32 count
)
251 struct cnic_local
*cp
= dev
->cnic_priv
;
252 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
253 struct drv_ctl_info info
;
256 info
.data
.credit
.credit_count
= count
;
257 ethdev
->drv_ctl(dev
->netdev
, &info
);
260 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
264 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
265 if (cp
->ctx_tbl
[i
].cid
== cid
) {
273 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
274 struct cnic_sock
*csk
)
276 struct iscsi_path path_req
;
279 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
280 struct cnic_ulp_ops
*ulp_ops
;
281 struct cnic_uio_dev
*udev
= cp
->udev
;
282 int rc
= 0, retry
= 0;
284 if (!udev
|| udev
->uio_dev
== -1)
288 len
= sizeof(path_req
);
289 buf
= (char *) &path_req
;
290 memset(&path_req
, 0, len
);
292 msg_type
= ISCSI_KEVENT_PATH_REQ
;
293 path_req
.handle
= (u64
) csk
->l5_cid
;
294 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
295 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
296 sizeof(struct in6_addr
));
297 path_req
.ip_addr_len
= 16;
299 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
300 sizeof(struct in_addr
));
301 path_req
.ip_addr_len
= 4;
303 path_req
.vlan_id
= csk
->vlan_id
;
304 path_req
.pmtu
= csk
->mtu
;
310 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
312 rc
= ulp_ops
->iscsi_nl_send_msg(
313 cp
->ulp_handle
[CNIC_ULP_ISCSI
],
316 if (rc
== 0 || msg_type
!= ISCSI_KEVENT_PATH_REQ
)
325 static void cnic_cm_upcall(struct cnic_local
*, struct cnic_sock
*, u8
);
327 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
333 case ISCSI_UEVENT_PATH_UPDATE
: {
334 struct cnic_local
*cp
;
336 struct cnic_sock
*csk
;
337 struct iscsi_path
*path_resp
;
339 if (len
< sizeof(*path_resp
))
342 path_resp
= (struct iscsi_path
*) buf
;
344 l5_cid
= (u32
) path_resp
->handle
;
345 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
349 if (!rcu_dereference(cp
->ulp_ops
[CNIC_ULP_L4
])) {
354 csk
= &cp
->csk_tbl
[l5_cid
];
356 if (cnic_in_use(csk
) &&
357 test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
359 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
360 if (test_bit(SK_F_IPV6
, &csk
->flags
))
361 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
362 sizeof(struct in6_addr
));
364 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
365 sizeof(struct in_addr
));
367 if (is_valid_ether_addr(csk
->ha
)) {
369 } else if (!test_bit(SK_F_OFFLD_SCHED
, &csk
->flags
) &&
370 !test_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
372 cnic_cm_upcall(cp
, csk
,
373 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
374 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
386 static int cnic_offld_prep(struct cnic_sock
*csk
)
388 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
391 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
392 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
399 static int cnic_close_prep(struct cnic_sock
*csk
)
401 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
402 smp_mb__after_clear_bit();
404 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
405 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
413 static int cnic_abort_prep(struct cnic_sock
*csk
)
415 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
416 smp_mb__after_clear_bit();
418 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
421 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
422 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
429 static void cnic_uio_stop(void)
431 struct cnic_dev
*dev
;
433 read_lock(&cnic_dev_lock
);
434 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
435 struct cnic_local
*cp
= dev
->cnic_priv
;
437 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
439 read_unlock(&cnic_dev_lock
);
442 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
444 struct cnic_dev
*dev
;
446 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
447 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
450 mutex_lock(&cnic_lock
);
451 if (cnic_ulp_tbl
[ulp_type
]) {
452 pr_err("%s: Type %d has already been registered\n",
454 mutex_unlock(&cnic_lock
);
458 read_lock(&cnic_dev_lock
);
459 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
460 struct cnic_local
*cp
= dev
->cnic_priv
;
462 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
464 read_unlock(&cnic_dev_lock
);
466 atomic_set(&ulp_ops
->ref_count
, 0);
467 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
468 mutex_unlock(&cnic_lock
);
470 /* Prevent race conditions with netdev_event */
472 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
473 struct cnic_local
*cp
= dev
->cnic_priv
;
475 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
476 ulp_ops
->cnic_init(dev
);
483 int cnic_unregister_driver(int ulp_type
)
485 struct cnic_dev
*dev
;
486 struct cnic_ulp_ops
*ulp_ops
;
489 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
490 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
493 mutex_lock(&cnic_lock
);
494 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
496 pr_err("%s: Type %d has not been registered\n",
500 read_lock(&cnic_dev_lock
);
501 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
502 struct cnic_local
*cp
= dev
->cnic_priv
;
504 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
505 pr_err("%s: Type %d still has devices registered\n",
507 read_unlock(&cnic_dev_lock
);
511 read_unlock(&cnic_dev_lock
);
513 if (ulp_type
== CNIC_ULP_ISCSI
)
516 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
518 mutex_unlock(&cnic_lock
);
520 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
525 if (atomic_read(&ulp_ops
->ref_count
) != 0)
526 netdev_warn(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
530 mutex_unlock(&cnic_lock
);
534 static int cnic_start_hw(struct cnic_dev
*);
535 static void cnic_stop_hw(struct cnic_dev
*);
537 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
540 struct cnic_local
*cp
= dev
->cnic_priv
;
541 struct cnic_ulp_ops
*ulp_ops
;
543 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
544 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
547 mutex_lock(&cnic_lock
);
548 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
549 pr_err("%s: Driver with type %d has not been registered\n",
551 mutex_unlock(&cnic_lock
);
554 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
555 pr_err("%s: Type %d has already been registered to this device\n",
557 mutex_unlock(&cnic_lock
);
561 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
562 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
563 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
564 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
567 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
568 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
569 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
571 mutex_unlock(&cnic_lock
);
576 EXPORT_SYMBOL(cnic_register_driver
);
578 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
580 struct cnic_local
*cp
= dev
->cnic_priv
;
583 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
584 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
587 mutex_lock(&cnic_lock
);
588 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
589 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
592 pr_err("%s: device not registered to this ulp type %d\n",
594 mutex_unlock(&cnic_lock
);
597 mutex_unlock(&cnic_lock
);
601 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
606 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
607 netdev_warn(dev
->netdev
, "Failed waiting for ULP up call to complete\n");
611 EXPORT_SYMBOL(cnic_unregister_driver
);
613 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
615 id_tbl
->start
= start_id
;
618 spin_lock_init(&id_tbl
->lock
);
619 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
626 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
628 kfree(id_tbl
->table
);
629 id_tbl
->table
= NULL
;
632 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
637 if (id
>= id_tbl
->max
)
640 spin_lock(&id_tbl
->lock
);
641 if (!test_bit(id
, id_tbl
->table
)) {
642 set_bit(id
, id_tbl
->table
);
645 spin_unlock(&id_tbl
->lock
);
649 /* Returns -1 if not successful */
650 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
654 spin_lock(&id_tbl
->lock
);
655 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
656 if (id
>= id_tbl
->max
) {
658 if (id_tbl
->next
!= 0) {
659 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
660 if (id
>= id_tbl
->next
)
665 if (id
< id_tbl
->max
) {
666 set_bit(id
, id_tbl
->table
);
667 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
671 spin_unlock(&id_tbl
->lock
);
676 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
682 if (id
>= id_tbl
->max
)
685 clear_bit(id
, id_tbl
->table
);
688 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
695 for (i
= 0; i
< dma
->num_pages
; i
++) {
696 if (dma
->pg_arr
[i
]) {
697 dma_free_coherent(&dev
->pcidev
->dev
, BCM_PAGE_SIZE
,
698 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
699 dma
->pg_arr
[i
] = NULL
;
703 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
704 dma
->pgtbl
, dma
->pgtbl_map
);
712 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
715 u32
*page_table
= dma
->pgtbl
;
717 for (i
= 0; i
< dma
->num_pages
; i
++) {
718 /* Each entry needs to be in big endian format. */
719 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
721 *page_table
= (u32
) dma
->pg_map_arr
[i
];
726 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
729 u32
*page_table
= dma
->pgtbl
;
731 for (i
= 0; i
< dma
->num_pages
; i
++) {
732 /* Each entry needs to be in little endian format. */
733 *page_table
= dma
->pg_map_arr
[i
] & 0xffffffff;
735 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
740 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
741 int pages
, int use_pg_tbl
)
744 struct cnic_local
*cp
= dev
->cnic_priv
;
746 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
747 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
748 if (dma
->pg_arr
== NULL
)
751 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
752 dma
->num_pages
= pages
;
754 for (i
= 0; i
< pages
; i
++) {
755 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
759 if (dma
->pg_arr
[i
] == NULL
)
765 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
766 ~(BCM_PAGE_SIZE
- 1);
767 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
768 &dma
->pgtbl_map
, GFP_ATOMIC
);
769 if (dma
->pgtbl
== NULL
)
772 cp
->setup_pgtbl(dev
, dma
);
777 cnic_free_dma(dev
, dma
);
781 static void cnic_free_context(struct cnic_dev
*dev
)
783 struct cnic_local
*cp
= dev
->cnic_priv
;
786 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
787 if (cp
->ctx_arr
[i
].ctx
) {
788 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
790 cp
->ctx_arr
[i
].mapping
);
791 cp
->ctx_arr
[i
].ctx
= NULL
;
796 static void __cnic_free_uio(struct cnic_uio_dev
*udev
)
798 uio_unregister_device(&udev
->cnic_uinfo
);
801 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
802 udev
->l2_buf
, udev
->l2_buf_map
);
807 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
808 udev
->l2_ring
, udev
->l2_ring_map
);
809 udev
->l2_ring
= NULL
;
812 pci_dev_put(udev
->pdev
);
816 static void cnic_free_uio(struct cnic_uio_dev
*udev
)
821 write_lock(&cnic_dev_lock
);
822 list_del_init(&udev
->list
);
823 write_unlock(&cnic_dev_lock
);
824 __cnic_free_uio(udev
);
827 static void cnic_free_resc(struct cnic_dev
*dev
)
829 struct cnic_local
*cp
= dev
->cnic_priv
;
830 struct cnic_uio_dev
*udev
= cp
->udev
;
837 cnic_free_context(dev
);
842 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
843 cnic_free_dma(dev
, &cp
->conn_buf_info
);
844 cnic_free_dma(dev
, &cp
->kwq_info
);
845 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
846 cnic_free_dma(dev
, &cp
->kcq1
.dma
);
847 kfree(cp
->iscsi_tbl
);
848 cp
->iscsi_tbl
= NULL
;
852 cnic_free_id_tbl(&cp
->cid_tbl
);
855 static int cnic_alloc_context(struct cnic_dev
*dev
)
857 struct cnic_local
*cp
= dev
->cnic_priv
;
859 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
862 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
863 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
864 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
865 sizeof(struct cnic_ctx
);
866 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
867 if (cp
->ctx_arr
== NULL
)
871 for (i
= 0; i
< 2; i
++) {
872 u32 j
, reg
, off
, lo
, hi
;
875 off
= BNX2_PG_CTX_MAP
;
877 off
= BNX2_ISCSI_CTX_MAP
;
879 reg
= cnic_reg_rd_ind(dev
, off
);
882 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
883 cp
->ctx_arr
[k
].cid
= j
;
887 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
892 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
894 dma_alloc_coherent(&dev
->pcidev
->dev
,
896 &cp
->ctx_arr
[i
].mapping
,
898 if (cp
->ctx_arr
[i
].ctx
== NULL
)
905 static int cnic_alloc_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
907 int err
, i
, is_bnx2
= 0;
910 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
))
913 err
= cnic_alloc_dma(dev
, &info
->dma
, KCQ_PAGE_CNT
, is_bnx2
);
917 kcq
= (struct kcqe
**) info
->dma
.pg_arr
;
923 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
924 struct bnx2x_bd_chain_next
*next
=
925 (struct bnx2x_bd_chain_next
*) &kcq
[i
][MAX_KCQE_CNT
];
928 if (j
>= KCQ_PAGE_CNT
)
930 next
->addr_hi
= (u64
) info
->dma
.pg_map_arr
[j
] >> 32;
931 next
->addr_lo
= info
->dma
.pg_map_arr
[j
] & 0xffffffff;
936 static int cnic_alloc_uio_rings(struct cnic_dev
*dev
, int pages
)
938 struct cnic_local
*cp
= dev
->cnic_priv
;
939 struct cnic_uio_dev
*udev
;
941 read_lock(&cnic_dev_lock
);
942 list_for_each_entry(udev
, &cnic_udev_list
, list
) {
943 if (udev
->pdev
== dev
->pcidev
) {
946 read_unlock(&cnic_dev_lock
);
950 read_unlock(&cnic_dev_lock
);
952 udev
= kzalloc(sizeof(struct cnic_uio_dev
), GFP_ATOMIC
);
959 udev
->pdev
= dev
->pcidev
;
960 udev
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
961 udev
->l2_ring
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
963 GFP_KERNEL
| __GFP_COMP
);
967 udev
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
968 udev
->l2_buf_size
= PAGE_ALIGN(udev
->l2_buf_size
);
969 udev
->l2_buf
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
971 GFP_KERNEL
| __GFP_COMP
);
975 write_lock(&cnic_dev_lock
);
976 list_add(&udev
->list
, &cnic_udev_list
);
977 write_unlock(&cnic_dev_lock
);
979 pci_dev_get(udev
->pdev
);
986 static int cnic_init_uio(struct cnic_dev
*dev
)
988 struct cnic_local
*cp
= dev
->cnic_priv
;
989 struct cnic_uio_dev
*udev
= cp
->udev
;
990 struct uio_info
*uinfo
;
996 uinfo
= &udev
->cnic_uinfo
;
998 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
999 uinfo
->mem
[0].internal_addr
= dev
->regview
;
1000 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
1001 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
1003 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
1004 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
.gen
&
1006 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
1007 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
1009 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
1011 uinfo
->name
= "bnx2_cnic";
1012 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
1013 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
1015 uinfo
->mem
[1].size
= sizeof(*cp
->bnx2x_def_status_blk
);
1017 uinfo
->name
= "bnx2x_cnic";
1020 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
1022 uinfo
->mem
[2].addr
= (unsigned long) udev
->l2_ring
;
1023 uinfo
->mem
[2].size
= udev
->l2_ring_size
;
1024 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
1026 uinfo
->mem
[3].addr
= (unsigned long) udev
->l2_buf
;
1027 uinfo
->mem
[3].size
= udev
->l2_buf_size
;
1028 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
1030 uinfo
->version
= CNIC_MODULE_VERSION
;
1031 uinfo
->irq
= UIO_IRQ_CUSTOM
;
1033 uinfo
->open
= cnic_uio_open
;
1034 uinfo
->release
= cnic_uio_close
;
1036 if (udev
->uio_dev
== -1) {
1040 ret
= uio_register_device(&udev
->pdev
->dev
, uinfo
);
1043 cnic_init_rings(dev
);
1049 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
1051 struct cnic_local
*cp
= dev
->cnic_priv
;
1054 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
1057 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
1059 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
);
1063 ret
= cnic_alloc_context(dev
);
1067 ret
= cnic_alloc_uio_rings(dev
, 2);
1071 ret
= cnic_init_uio(dev
);
1078 cnic_free_resc(dev
);
1082 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
1084 struct cnic_local
*cp
= dev
->cnic_priv
;
1085 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
1086 int total_mem
, blks
, i
;
1088 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cp
->max_cid_space
;
1089 blks
= total_mem
/ ctx_blk_size
;
1090 if (total_mem
% ctx_blk_size
)
1093 if (blks
> cp
->ethdev
->ctx_tbl_len
)
1096 cp
->ctx_arr
= kcalloc(blks
, sizeof(struct cnic_ctx
), GFP_KERNEL
);
1097 if (cp
->ctx_arr
== NULL
)
1100 cp
->ctx_blks
= blks
;
1101 cp
->ctx_blk_size
= ctx_blk_size
;
1102 if (!BNX2X_CHIP_IS_57710(cp
->chip_id
))
1105 cp
->ctx_align
= ctx_blk_size
;
1107 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1109 for (i
= 0; i
< blks
; i
++) {
1110 cp
->ctx_arr
[i
].ctx
=
1111 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1112 &cp
->ctx_arr
[i
].mapping
,
1114 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1117 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1118 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1119 cnic_free_context(dev
);
1120 cp
->ctx_blk_size
+= cp
->ctx_align
;
1129 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1131 struct cnic_local
*cp
= dev
->cnic_priv
;
1132 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1133 u32 start_cid
= ethdev
->starting_cid
;
1134 int i
, j
, n
, ret
, pages
;
1135 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1137 cp
->iro_arr
= ethdev
->iro_arr
;
1139 cp
->max_cid_space
= MAX_ISCSI_TBL_SZ
;
1140 cp
->iscsi_start_cid
= start_cid
;
1141 if (start_cid
< BNX2X_ISCSI_START_CID
) {
1142 u32 delta
= BNX2X_ISCSI_START_CID
- start_cid
;
1144 cp
->iscsi_start_cid
= BNX2X_ISCSI_START_CID
;
1145 cp
->max_cid_space
+= delta
;
1148 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1153 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1154 cp
->max_cid_space
, GFP_KERNEL
);
1158 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1159 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1160 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1163 pages
= PAGE_ALIGN(cp
->max_cid_space
* CNIC_KWQ16_DATA_SIZE
) /
1166 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1170 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1171 for (i
= 0, j
= 0; i
< cp
->max_cid_space
; i
++) {
1172 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1174 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1175 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1178 if ((i
% n
) == (n
- 1))
1182 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
);
1186 pages
= PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS
*
1187 BNX2X_ISCSI_CONN_BUF_SIZE
) / PAGE_SIZE
;
1188 ret
= cnic_alloc_dma(dev
, &cp
->conn_buf_info
, pages
, 1);
1192 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1193 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1197 ret
= cnic_alloc_bnx2x_context(dev
);
1201 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1203 cp
->l2_rx_ring_size
= 15;
1205 ret
= cnic_alloc_uio_rings(dev
, 4);
1209 ret
= cnic_init_uio(dev
);
1216 cnic_free_resc(dev
);
1220 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1222 return cp
->max_kwq_idx
-
1223 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1226 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1229 struct cnic_local
*cp
= dev
->cnic_priv
;
1230 struct kwqe
*prod_qe
;
1231 u16 prod
, sw_prod
, i
;
1233 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1234 return -EAGAIN
; /* bnx2 is down */
1236 spin_lock_bh(&cp
->cnic_ulp_lock
);
1237 if (num_wqes
> cnic_kwq_avail(cp
) &&
1238 !test_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
)) {
1239 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1243 clear_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
1245 prod
= cp
->kwq_prod_idx
;
1246 sw_prod
= prod
& MAX_KWQ_IDX
;
1247 for (i
= 0; i
< num_wqes
; i
++) {
1248 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1249 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1251 sw_prod
= prod
& MAX_KWQ_IDX
;
1253 cp
->kwq_prod_idx
= prod
;
1255 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1257 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1261 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1262 union l5cm_specific_data
*l5_data
)
1264 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1267 map
= ctx
->kwqe_data_mapping
;
1268 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1269 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1270 return ctx
->kwqe_data
;
1273 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1274 u32 type
, union l5cm_specific_data
*l5_data
)
1276 struct cnic_local
*cp
= dev
->cnic_priv
;
1277 struct l5cm_spe kwqe
;
1278 struct kwqe_16
*kwq
[1];
1281 kwqe
.hdr
.conn_and_cmd_data
=
1282 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1283 BNX2X_HW_CID(cp
, cid
)));
1284 kwqe
.hdr
.type
= cpu_to_le16(type
);
1285 kwqe
.hdr
.reserved1
= 0;
1286 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1287 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1289 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1291 spin_lock_bh(&cp
->cnic_ulp_lock
);
1292 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1293 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1301 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1302 struct kcqe
*cqes
[], u32 num_cqes
)
1304 struct cnic_local
*cp
= dev
->cnic_priv
;
1305 struct cnic_ulp_ops
*ulp_ops
;
1308 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1309 if (likely(ulp_ops
)) {
1310 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1316 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1318 struct cnic_local
*cp
= dev
->cnic_priv
;
1319 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1321 u32 pfid
= cp
->pfid
;
1323 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1324 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1325 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1326 cp
->num_iscsi_tasks
;
1327 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1328 BNX2X_ISCSI_R2TQE_SIZE
;
1329 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1330 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1331 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1332 cp
->num_cqs
= req1
->num_cqs
;
1334 if (!dev
->max_iscsi_conn
)
1337 /* init Tstorm RAM */
1338 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1340 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1342 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1343 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1344 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1345 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1346 req1
->num_tasks_per_conn
);
1348 /* init Ustorm RAM */
1349 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1350 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid
),
1351 req1
->rq_buffer_size
);
1352 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1354 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1355 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1356 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1357 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1358 req1
->num_tasks_per_conn
);
1359 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1361 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1363 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1364 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1366 /* init Xstorm RAM */
1367 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1369 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1370 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1371 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1372 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1373 req1
->num_tasks_per_conn
);
1374 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1376 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid
),
1377 req1
->num_tasks_per_conn
);
1378 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1379 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1381 /* init Cstorm RAM */
1382 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1384 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1385 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1386 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1387 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1388 req1
->num_tasks_per_conn
);
1389 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1391 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1397 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1399 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1400 struct cnic_local
*cp
= dev
->cnic_priv
;
1401 u32 pfid
= cp
->pfid
;
1402 struct iscsi_kcqe kcqe
;
1403 struct kcqe
*cqes
[1];
1405 memset(&kcqe
, 0, sizeof(kcqe
));
1406 if (!dev
->max_iscsi_conn
) {
1407 kcqe
.completion_status
=
1408 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1412 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1413 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1414 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1415 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1416 req2
->error_bit_map
[1]);
1418 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1419 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1420 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1421 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1422 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1423 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1424 req2
->error_bit_map
[1]);
1426 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1427 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1429 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1432 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1433 cqes
[0] = (struct kcqe
*) &kcqe
;
1434 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1439 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1441 struct cnic_local
*cp
= dev
->cnic_priv
;
1442 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1444 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1445 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1447 cnic_free_dma(dev
, &iscsi
->hq_info
);
1448 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1449 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1451 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1455 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1459 struct cnic_local
*cp
= dev
->cnic_priv
;
1460 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1461 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1463 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1470 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1472 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1476 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1477 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1481 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1482 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1489 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1493 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1494 struct regpair
*ctx_addr
)
1496 struct cnic_local
*cp
= dev
->cnic_priv
;
1497 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1498 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1499 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1500 unsigned long align_off
= 0;
1504 if (cp
->ctx_align
) {
1505 unsigned long mask
= cp
->ctx_align
- 1;
1507 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1508 align_off
= cp
->ctx_align
-
1509 (cp
->ctx_arr
[blk
].mapping
& mask
);
1511 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1512 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1513 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1514 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1516 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1518 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1519 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1523 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1526 struct cnic_local
*cp
= dev
->cnic_priv
;
1527 struct iscsi_kwqe_conn_offload1
*req1
=
1528 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1529 struct iscsi_kwqe_conn_offload2
*req2
=
1530 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1531 struct iscsi_kwqe_conn_offload3
*req3
;
1532 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1533 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1535 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
1536 struct iscsi_context
*ictx
;
1537 struct regpair context_addr
;
1538 int i
, j
, n
= 2, n_max
;
1541 if (!req2
->num_additional_wqes
)
1544 n_max
= req2
->num_additional_wqes
+ 2;
1546 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1550 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1552 ictx
->xstorm_ag_context
.hq_prod
= 1;
1554 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1555 ISCSI_DEF_FIRST_BURST_LEN
;
1556 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1557 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1558 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1559 req1
->sq_page_table_addr_lo
;
1560 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1561 req1
->sq_page_table_addr_hi
;
1562 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1563 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1564 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1565 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1566 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1567 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1568 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1569 iscsi
->hq_info
.pgtbl
[0];
1570 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1571 iscsi
->hq_info
.pgtbl
[1];
1572 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1573 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1574 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1575 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1576 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1577 iscsi
->r2tq_info
.pgtbl
[0];
1578 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1579 iscsi
->r2tq_info
.pgtbl
[1];
1580 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1581 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1582 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1583 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1584 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1585 BNX2X_ISCSI_PBL_NOT_CACHED
;
1586 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1587 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1588 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1589 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1591 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1592 /* TSTORM requires the base address of RQ DB & not PTE */
1593 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1594 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1595 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1596 req2
->rq_page_table_addr_hi
;
1597 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1598 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1599 ictx
->tstorm_st_context
.tcp
.flags2
|=
1600 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1601 ictx
->tstorm_st_context
.tcp
.ooo_support_mode
=
1602 TCP_TSTORM_OOO_DROP_AND_PROC_ACK
;
1604 ictx
->timers_context
.flags
|= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1606 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1607 req2
->rq_page_table_addr_lo
;
1608 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1609 req2
->rq_page_table_addr_hi
;
1610 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1611 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1612 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1613 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1614 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1615 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1616 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1617 iscsi
->r2tq_info
.pgtbl
[0];
1618 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1619 iscsi
->r2tq_info
.pgtbl
[1];
1620 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1621 req1
->cq_page_table_addr_lo
;
1622 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1623 req1
->cq_page_table_addr_hi
;
1624 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1625 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1626 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1627 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1628 BNX2X_ISCSI_PBL_NOT_CACHED
;
1629 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1630 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1632 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1636 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1639 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1640 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1641 req3
->qp_first_pte
[j
].hi
;
1642 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1643 req3
->qp_first_pte
[j
].lo
;
1646 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1647 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1648 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1649 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1650 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1651 iscsi
->task_array_info
.pgtbl
[0];
1652 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1653 iscsi
->task_array_info
.pgtbl
[1];
1654 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1655 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1656 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1657 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1658 ISCSI_DEF_MAX_BURST_LEN
;
1659 ictx
->ustorm_st_context
.negotiated_rx
|=
1660 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1661 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1663 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1664 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1665 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1666 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1667 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1668 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1669 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1670 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1671 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1672 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1673 /* CSTORM and USTORM initialization is different, CSTORM requires
1674 * CQ DB base & not PTE addr */
1675 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1676 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1677 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1678 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1679 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1680 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1681 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1683 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1687 ictx
->xstorm_ag_context
.cdu_reserved
=
1688 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1689 ISCSI_CONNECTION_TYPE
);
1690 ictx
->ustorm_ag_context
.cdu_usage
=
1691 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1692 ISCSI_CONNECTION_TYPE
);
1697 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1700 struct iscsi_kwqe_conn_offload1
*req1
;
1701 struct iscsi_kwqe_conn_offload2
*req2
;
1702 struct cnic_local
*cp
= dev
->cnic_priv
;
1703 struct cnic_context
*ctx
;
1704 struct iscsi_kcqe kcqe
;
1705 struct kcqe
*cqes
[1];
1714 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1715 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1716 if ((num
- 2) < req2
->num_additional_wqes
) {
1720 *work
= 2 + req2
->num_additional_wqes
;
1722 l5_cid
= req1
->iscsi_conn_id
;
1723 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1726 memset(&kcqe
, 0, sizeof(kcqe
));
1727 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1728 kcqe
.iscsi_conn_id
= l5_cid
;
1729 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1731 ctx
= &cp
->ctx_tbl
[l5_cid
];
1732 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
)) {
1733 kcqe
.completion_status
=
1734 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY
;
1738 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1739 atomic_dec(&cp
->iscsi_conn
);
1742 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1744 atomic_dec(&cp
->iscsi_conn
);
1748 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1750 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1751 atomic_dec(&cp
->iscsi_conn
);
1755 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1756 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
, cp
->ctx_tbl
[l5_cid
].cid
);
1759 cqes
[0] = (struct kcqe
*) &kcqe
;
1760 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1765 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1767 struct cnic_local
*cp
= dev
->cnic_priv
;
1768 struct iscsi_kwqe_conn_update
*req
=
1769 (struct iscsi_kwqe_conn_update
*) kwqe
;
1771 union l5cm_specific_data l5_data
;
1772 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1775 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1778 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1782 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1784 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1785 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1789 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev
*dev
, u32 l5_cid
)
1791 struct cnic_local
*cp
= dev
->cnic_priv
;
1792 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1793 union l5cm_specific_data l5_data
;
1797 init_waitqueue_head(&ctx
->waitq
);
1799 memset(&l5_data
, 0, sizeof(l5_data
));
1800 hw_cid
= BNX2X_HW_CID(cp
, ctx
->cid
);
1801 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
1802 & SPE_HDR_CONN_TYPE
;
1803 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
1804 SPE_HDR_FUNCTION_ID
);
1806 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
1807 hw_cid
, type
, &l5_data
);
1810 wait_event(ctx
->waitq
, ctx
->wait_cond
);
1815 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1817 struct cnic_local
*cp
= dev
->cnic_priv
;
1818 struct iscsi_kwqe_conn_destroy
*req
=
1819 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1820 u32 l5_cid
= req
->reserved0
;
1821 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1823 struct iscsi_kcqe kcqe
;
1824 struct kcqe
*cqes
[1];
1826 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
1827 goto skip_cfc_delete
;
1829 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
1830 unsigned long delta
= ctx
->timestamp
+ (2 * HZ
) - jiffies
;
1832 if (delta
> (2 * HZ
))
1835 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
1836 queue_delayed_work(cnic_wq
, &cp
->delete_task
, delta
);
1840 ret
= cnic_bnx2x_destroy_ramrod(dev
, l5_cid
);
1843 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1845 atomic_dec(&cp
->iscsi_conn
);
1846 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
1849 memset(&kcqe
, 0, sizeof(kcqe
));
1850 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1851 kcqe
.iscsi_conn_id
= l5_cid
;
1852 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1853 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1855 cqes
[0] = (struct kcqe
*) &kcqe
;
1856 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1861 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
1862 struct l4_kwq_connect_req1
*kwqe1
,
1863 struct l4_kwq_connect_req3
*kwqe3
,
1864 struct l5cm_active_conn_buffer
*conn_buf
)
1866 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
1867 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
1868 &conn_buf
->xstorm_conn_buffer
;
1869 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
1870 &conn_buf
->tstorm_conn_buffer
;
1871 struct regpair context_addr
;
1872 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
1873 struct in6_addr src_ip
, dst_ip
;
1877 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
1878 for (i
= 0; i
< 4; i
++, addrp
++)
1879 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1881 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
1882 for (i
= 0; i
< 4; i
++, addrp
++)
1883 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1885 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
1887 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
1888 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
1889 xstorm_buf
->mss
= 0xffff;
1890 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1891 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
1892 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
1893 xstorm_buf
->pseudo_header_checksum
=
1894 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
1896 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
1897 tstorm_buf
->params
|=
1898 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
1899 if (kwqe3
->ka_timeout
) {
1900 tstorm_buf
->ka_enable
= 1;
1901 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
1902 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
1903 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
1905 tstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1906 tstorm_buf
->snd_buf
= kwqe3
->snd_buf
;
1907 tstorm_buf
->max_rt_time
= 0xffffffff;
1910 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
1912 struct cnic_local
*cp
= dev
->cnic_priv
;
1913 u32 pfid
= cp
->pfid
;
1914 u8
*mac
= dev
->mac_addr
;
1916 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1917 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid
), mac
[0]);
1918 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1919 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid
), mac
[1]);
1920 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1921 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid
), mac
[2]);
1922 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1923 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid
), mac
[3]);
1924 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1925 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid
), mac
[4]);
1926 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1927 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid
), mac
[5]);
1929 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1930 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[5]);
1931 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1932 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
1934 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1935 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[3]);
1936 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1937 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
1939 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1940 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 2,
1942 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1943 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 3,
1947 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
1949 struct cnic_local
*cp
= dev
->cnic_priv
;
1950 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
1951 u16 tstorm_flags
= 0;
1954 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1955 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1958 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1959 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), xstorm_flags
);
1961 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1962 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), tstorm_flags
);
1965 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1968 struct cnic_local
*cp
= dev
->cnic_priv
;
1969 struct l4_kwq_connect_req1
*kwqe1
=
1970 (struct l4_kwq_connect_req1
*) wqes
[0];
1971 struct l4_kwq_connect_req3
*kwqe3
;
1972 struct l5cm_active_conn_buffer
*conn_buf
;
1973 struct l5cm_conn_addr_params
*conn_addr
;
1974 union l5cm_specific_data l5_data
;
1975 u32 l5_cid
= kwqe1
->pg_cid
;
1976 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1977 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1985 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
1995 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
1996 netdev_err(dev
->netdev
, "conn_buf size too big\n");
1999 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2003 memset(conn_buf
, 0, sizeof(*conn_buf
));
2005 conn_addr
= &conn_buf
->conn_addr_buf
;
2006 conn_addr
->remote_addr_0
= csk
->ha
[0];
2007 conn_addr
->remote_addr_1
= csk
->ha
[1];
2008 conn_addr
->remote_addr_2
= csk
->ha
[2];
2009 conn_addr
->remote_addr_3
= csk
->ha
[3];
2010 conn_addr
->remote_addr_4
= csk
->ha
[4];
2011 conn_addr
->remote_addr_5
= csk
->ha
[5];
2013 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
2014 struct l4_kwq_connect_req2
*kwqe2
=
2015 (struct l4_kwq_connect_req2
*) wqes
[1];
2017 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
2018 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
2019 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
2021 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
2022 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
2023 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
2024 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
2026 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
2028 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
2029 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
2030 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
2031 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
2033 conn_addr
->pmtu
= kwqe3
->pmtu
;
2034 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
2036 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
2037 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->pfid
), csk
->vlan_id
);
2039 cnic_bnx2x_set_tcp_timestamp(dev
,
2040 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
2042 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
2043 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2045 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2050 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2052 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
2053 union l5cm_specific_data l5_data
;
2056 memset(&l5_data
, 0, sizeof(l5_data
));
2057 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
2058 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2062 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2064 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
2065 union l5cm_specific_data l5_data
;
2068 memset(&l5_data
, 0, sizeof(l5_data
));
2069 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
2070 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2073 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2075 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
2077 struct kcqe
*cqes
[1];
2079 memset(&kcqe
, 0, sizeof(kcqe
));
2080 kcqe
.pg_host_opaque
= req
->host_opaque
;
2081 kcqe
.pg_cid
= req
->host_opaque
;
2082 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
2083 cqes
[0] = (struct kcqe
*) &kcqe
;
2084 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2088 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2090 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
2092 struct kcqe
*cqes
[1];
2094 memset(&kcqe
, 0, sizeof(kcqe
));
2095 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
2096 kcqe
.pg_cid
= req
->pg_cid
;
2097 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
2098 cqes
[0] = (struct kcqe
*) &kcqe
;
2099 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2103 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2110 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2111 return -EAGAIN
; /* bnx2 is down */
2113 for (i
= 0; i
< num_wqes
; ) {
2115 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2119 case ISCSI_KWQE_OPCODE_INIT1
:
2120 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
2122 case ISCSI_KWQE_OPCODE_INIT2
:
2123 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
2125 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
2126 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2127 num_wqes
- i
, &work
);
2129 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2130 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2132 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2133 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2135 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2136 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2139 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2140 ret
= cnic_bnx2x_close(dev
, kwqe
);
2142 case L4_KWQE_OPCODE_VALUE_RESET
:
2143 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2145 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2146 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2148 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2149 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2151 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2156 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2161 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2168 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2170 struct cnic_local
*cp
= dev
->cnic_priv
;
2176 struct cnic_ulp_ops
*ulp_ops
;
2178 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2179 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
2181 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2184 while (j
< num_cqes
) {
2185 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2187 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
2190 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2195 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2196 ulp_type
= CNIC_ULP_RDMA
;
2197 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2198 ulp_type
= CNIC_ULP_ISCSI
;
2199 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2200 ulp_type
= CNIC_ULP_L4
;
2201 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2204 netdev_err(dev
->netdev
, "Unknown type of KCQE(0x%x)\n",
2210 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2211 if (likely(ulp_ops
)) {
2212 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2213 cp
->completed_kcq
+ i
, j
);
2222 cnic_spq_completion(dev
, DRV_CTL_RET_L5_SPQ_CREDIT_CMD
, comp
);
2225 static u16
cnic_bnx2_next_idx(u16 idx
)
2230 static u16
cnic_bnx2_hw_idx(u16 idx
)
2235 static u16
cnic_bnx2x_next_idx(u16 idx
)
2238 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2244 static u16
cnic_bnx2x_hw_idx(u16 idx
)
2246 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2251 static int cnic_get_kcqes(struct cnic_dev
*dev
, struct kcq_info
*info
)
2253 struct cnic_local
*cp
= dev
->cnic_priv
;
2254 u16 i
, ri
, hw_prod
, last
;
2256 int kcqe_cnt
= 0, last_cnt
= 0;
2258 i
= ri
= last
= info
->sw_prod_idx
;
2260 hw_prod
= *info
->hw_prod_idx_ptr
;
2261 hw_prod
= cp
->hw_idx(hw_prod
);
2263 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2264 kcqe
= &info
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2265 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2266 i
= cp
->next_idx(i
);
2267 ri
= i
& MAX_KCQ_IDX
;
2268 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2269 last_cnt
= kcqe_cnt
;
2274 info
->sw_prod_idx
= last
;
2278 static int cnic_l2_completion(struct cnic_local
*cp
)
2280 u16 hw_cons
, sw_cons
;
2281 struct cnic_uio_dev
*udev
= cp
->udev
;
2282 union eth_rx_cqe
*cqe
, *cqe_ring
= (union eth_rx_cqe
*)
2283 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
2287 if (!test_bit(CNIC_F_BNX2X_CLASS
, &cp
->dev
->flags
))
2290 hw_cons
= *cp
->rx_cons_ptr
;
2291 if ((hw_cons
& BNX2X_MAX_RCQ_DESC_CNT
) == BNX2X_MAX_RCQ_DESC_CNT
)
2294 sw_cons
= cp
->rx_cons
;
2295 while (sw_cons
!= hw_cons
) {
2298 cqe
= &cqe_ring
[sw_cons
& BNX2X_MAX_RCQ_DESC_CNT
];
2299 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
2300 if (cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
) {
2301 cmd
= le32_to_cpu(cqe
->ramrod_cqe
.conn_and_cmd_data
);
2302 cmd
>>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT
;
2303 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
||
2304 cmd
== RAMROD_CMD_ID_ETH_HALT
)
2307 sw_cons
= BNX2X_NEXT_RCQE(sw_cons
);
2312 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2314 u16 rx_cons
, tx_cons
;
2317 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
2320 rx_cons
= *cp
->rx_cons_ptr
;
2321 tx_cons
= *cp
->tx_cons_ptr
;
2322 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2323 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
2324 comp
= cnic_l2_completion(cp
);
2326 cp
->tx_cons
= tx_cons
;
2327 cp
->rx_cons
= rx_cons
;
2330 uio_event_notify(&cp
->udev
->cnic_uinfo
);
2333 clear_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
2336 static u32
cnic_service_bnx2_queues(struct cnic_dev
*dev
)
2338 struct cnic_local
*cp
= dev
->cnic_priv
;
2339 u32 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2342 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2344 while ((kcqe_cnt
= cnic_get_kcqes(dev
, &cp
->kcq1
))) {
2346 service_kcqes(dev
, kcqe_cnt
);
2348 /* Tell compiler that status_blk fields can change. */
2350 if (status_idx
!= *cp
->kcq1
.status_idx_ptr
) {
2351 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2352 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2357 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
);
2359 cnic_chk_pkt_rings(cp
);
2364 static int cnic_service_bnx2(void *data
, void *status_blk
)
2366 struct cnic_dev
*dev
= data
;
2368 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2369 struct status_block
*sblk
= status_blk
;
2371 return sblk
->status_idx
;
2374 return cnic_service_bnx2_queues(dev
);
2377 static void cnic_service_bnx2_msix(unsigned long data
)
2379 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2380 struct cnic_local
*cp
= dev
->cnic_priv
;
2382 cp
->last_status_idx
= cnic_service_bnx2_queues(dev
);
2384 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2388 static void cnic_doirq(struct cnic_dev
*dev
)
2390 struct cnic_local
*cp
= dev
->cnic_priv
;
2392 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2393 u16 prod
= cp
->kcq1
.sw_prod_idx
& MAX_KCQ_IDX
;
2395 prefetch(cp
->status_blk
.gen
);
2396 prefetch(&cp
->kcq1
.kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2398 tasklet_schedule(&cp
->cnic_irq_task
);
2402 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
2404 struct cnic_dev
*dev
= dev_instance
;
2405 struct cnic_local
*cp
= dev
->cnic_priv
;
2415 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
2416 u16 index
, u8 op
, u8 update
)
2418 struct cnic_local
*cp
= dev
->cnic_priv
;
2419 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
2420 COMMAND_REG_INT_ACK
);
2421 struct igu_ack_register igu_ack
;
2423 igu_ack
.status_block_index
= index
;
2424 igu_ack
.sb_id_and_flags
=
2425 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
2426 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
2427 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
2428 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
2430 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
2433 static void cnic_ack_igu_sb(struct cnic_dev
*dev
, u8 igu_sb_id
, u8 segment
,
2434 u16 index
, u8 op
, u8 update
)
2436 struct igu_regular cmd_data
;
2437 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
) * 8;
2439 cmd_data
.sb_id_and_flags
=
2440 (index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
2441 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
2442 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
2443 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
);
2446 CNIC_WR(dev
, igu_addr
, cmd_data
.sb_id_and_flags
);
2449 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
2451 struct cnic_local
*cp
= dev
->cnic_priv
;
2453 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, CSTORM_ID
, 0,
2454 IGU_INT_DISABLE
, 0);
2457 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev
*dev
)
2459 struct cnic_local
*cp
= dev
->cnic_priv
;
2461 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
, 0,
2462 IGU_INT_DISABLE
, 0);
2465 static u32
cnic_service_bnx2x_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
2467 u32 last_status
= *info
->status_idx_ptr
;
2470 while ((kcqe_cnt
= cnic_get_kcqes(dev
, info
))) {
2472 service_kcqes(dev
, kcqe_cnt
);
2474 /* Tell compiler that sblk fields can change. */
2476 if (last_status
== *info
->status_idx_ptr
)
2479 last_status
= *info
->status_idx_ptr
;
2484 static void cnic_service_bnx2x_bh(unsigned long data
)
2486 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2487 struct cnic_local
*cp
= dev
->cnic_priv
;
2490 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2493 status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq1
);
2495 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
+ MAX_KCQ_IDX
);
2496 if (BNX2X_CHIP_IS_E2(cp
->chip_id
))
2497 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
,
2498 status_idx
, IGU_INT_ENABLE
, 1);
2500 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, USTORM_ID
,
2501 status_idx
, IGU_INT_ENABLE
, 1);
2504 static int cnic_service_bnx2x(void *data
, void *status_blk
)
2506 struct cnic_dev
*dev
= data
;
2507 struct cnic_local
*cp
= dev
->cnic_priv
;
2509 if (!(cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2512 cnic_chk_pkt_rings(cp
);
2517 static void cnic_ulp_stop(struct cnic_dev
*dev
)
2519 struct cnic_local
*cp
= dev
->cnic_priv
;
2522 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
2524 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2525 struct cnic_ulp_ops
*ulp_ops
;
2527 mutex_lock(&cnic_lock
);
2528 ulp_ops
= cp
->ulp_ops
[if_type
];
2530 mutex_unlock(&cnic_lock
);
2533 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2534 mutex_unlock(&cnic_lock
);
2536 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2537 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
2539 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2543 static void cnic_ulp_start(struct cnic_dev
*dev
)
2545 struct cnic_local
*cp
= dev
->cnic_priv
;
2548 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2549 struct cnic_ulp_ops
*ulp_ops
;
2551 mutex_lock(&cnic_lock
);
2552 ulp_ops
= cp
->ulp_ops
[if_type
];
2553 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
2554 mutex_unlock(&cnic_lock
);
2557 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2558 mutex_unlock(&cnic_lock
);
2560 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2561 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
2563 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2567 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
2569 struct cnic_dev
*dev
= data
;
2571 switch (info
->cmd
) {
2572 case CNIC_CTL_STOP_CMD
:
2580 case CNIC_CTL_START_CMD
:
2583 if (!cnic_start_hw(dev
))
2584 cnic_ulp_start(dev
);
2588 case CNIC_CTL_COMPLETION_CMD
: {
2589 u32 cid
= BNX2X_SW_CID(info
->data
.comp
.cid
);
2591 struct cnic_local
*cp
= dev
->cnic_priv
;
2593 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
2594 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2597 wake_up(&ctx
->waitq
);
2607 static void cnic_ulp_init(struct cnic_dev
*dev
)
2610 struct cnic_local
*cp
= dev
->cnic_priv
;
2612 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2613 struct cnic_ulp_ops
*ulp_ops
;
2615 mutex_lock(&cnic_lock
);
2616 ulp_ops
= cnic_ulp_tbl
[i
];
2617 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
2618 mutex_unlock(&cnic_lock
);
2622 mutex_unlock(&cnic_lock
);
2624 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2625 ulp_ops
->cnic_init(dev
);
2631 static void cnic_ulp_exit(struct cnic_dev
*dev
)
2634 struct cnic_local
*cp
= dev
->cnic_priv
;
2636 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2637 struct cnic_ulp_ops
*ulp_ops
;
2639 mutex_lock(&cnic_lock
);
2640 ulp_ops
= cnic_ulp_tbl
[i
];
2641 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
2642 mutex_unlock(&cnic_lock
);
2646 mutex_unlock(&cnic_lock
);
2648 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2649 ulp_ops
->cnic_exit(dev
);
2655 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
2657 struct cnic_dev
*dev
= csk
->dev
;
2658 struct l4_kwq_offload_pg
*l4kwqe
;
2659 struct kwqe
*wqes
[1];
2661 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
2662 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2663 wqes
[0] = (struct kwqe
*) l4kwqe
;
2665 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
2667 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
2668 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
2670 l4kwqe
->da0
= csk
->ha
[0];
2671 l4kwqe
->da1
= csk
->ha
[1];
2672 l4kwqe
->da2
= csk
->ha
[2];
2673 l4kwqe
->da3
= csk
->ha
[3];
2674 l4kwqe
->da4
= csk
->ha
[4];
2675 l4kwqe
->da5
= csk
->ha
[5];
2677 l4kwqe
->sa0
= dev
->mac_addr
[0];
2678 l4kwqe
->sa1
= dev
->mac_addr
[1];
2679 l4kwqe
->sa2
= dev
->mac_addr
[2];
2680 l4kwqe
->sa3
= dev
->mac_addr
[3];
2681 l4kwqe
->sa4
= dev
->mac_addr
[4];
2682 l4kwqe
->sa5
= dev
->mac_addr
[5];
2684 l4kwqe
->etype
= ETH_P_IP
;
2685 l4kwqe
->ipid_start
= DEF_IPID_START
;
2686 l4kwqe
->host_opaque
= csk
->l5_cid
;
2689 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
2690 l4kwqe
->vlan_tag
= csk
->vlan_id
;
2691 l4kwqe
->l2hdr_nbytes
+= 4;
2694 return dev
->submit_kwqes(dev
, wqes
, 1);
2697 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
2699 struct cnic_dev
*dev
= csk
->dev
;
2700 struct l4_kwq_update_pg
*l4kwqe
;
2701 struct kwqe
*wqes
[1];
2703 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
2704 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2705 wqes
[0] = (struct kwqe
*) l4kwqe
;
2707 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
2709 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
2710 l4kwqe
->pg_cid
= csk
->pg_cid
;
2712 l4kwqe
->da0
= csk
->ha
[0];
2713 l4kwqe
->da1
= csk
->ha
[1];
2714 l4kwqe
->da2
= csk
->ha
[2];
2715 l4kwqe
->da3
= csk
->ha
[3];
2716 l4kwqe
->da4
= csk
->ha
[4];
2717 l4kwqe
->da5
= csk
->ha
[5];
2719 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
2720 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
2722 return dev
->submit_kwqes(dev
, wqes
, 1);
2725 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
2727 struct cnic_dev
*dev
= csk
->dev
;
2728 struct l4_kwq_upload
*l4kwqe
;
2729 struct kwqe
*wqes
[1];
2731 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
2732 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2733 wqes
[0] = (struct kwqe
*) l4kwqe
;
2735 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
2737 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
2738 l4kwqe
->cid
= csk
->pg_cid
;
2740 return dev
->submit_kwqes(dev
, wqes
, 1);
2743 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
2745 struct cnic_dev
*dev
= csk
->dev
;
2746 struct l4_kwq_connect_req1
*l4kwqe1
;
2747 struct l4_kwq_connect_req2
*l4kwqe2
;
2748 struct l4_kwq_connect_req3
*l4kwqe3
;
2749 struct kwqe
*wqes
[3];
2753 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
2754 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
2755 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
2756 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
2757 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
2758 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
2760 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
2762 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
2763 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
2764 l4kwqe3
->ka_interval
= csk
->ka_interval
;
2765 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
2766 l4kwqe3
->tos
= csk
->tos
;
2767 l4kwqe3
->ttl
= csk
->ttl
;
2768 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
2769 l4kwqe3
->pmtu
= csk
->mtu
;
2770 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
2771 l4kwqe3
->snd_buf
= csk
->snd_buf
;
2772 l4kwqe3
->seed
= csk
->seed
;
2774 wqes
[0] = (struct kwqe
*) l4kwqe1
;
2775 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
2776 wqes
[1] = (struct kwqe
*) l4kwqe2
;
2777 wqes
[2] = (struct kwqe
*) l4kwqe3
;
2780 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
2781 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
2783 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
2784 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
2785 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
2786 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
2787 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
2788 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
2789 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
2790 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
2791 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
2792 sizeof(struct tcphdr
);
2794 wqes
[1] = (struct kwqe
*) l4kwqe3
;
2795 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
2796 sizeof(struct tcphdr
);
2799 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
2801 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
2802 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
2803 l4kwqe1
->cid
= csk
->cid
;
2804 l4kwqe1
->pg_cid
= csk
->pg_cid
;
2805 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
2806 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
2807 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
2808 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
2809 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
2810 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
2811 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
2812 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
2813 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
2814 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
2815 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
2816 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
2817 if (csk
->tcp_flags
& SK_TCP_SACK
)
2818 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
2819 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
2820 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
2822 l4kwqe1
->tcp_flags
= tcp_flags
;
2824 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
2827 static int cnic_cm_close_req(struct cnic_sock
*csk
)
2829 struct cnic_dev
*dev
= csk
->dev
;
2830 struct l4_kwq_close_req
*l4kwqe
;
2831 struct kwqe
*wqes
[1];
2833 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
2834 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2835 wqes
[0] = (struct kwqe
*) l4kwqe
;
2837 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
2838 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
2839 l4kwqe
->cid
= csk
->cid
;
2841 return dev
->submit_kwqes(dev
, wqes
, 1);
2844 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
2846 struct cnic_dev
*dev
= csk
->dev
;
2847 struct l4_kwq_reset_req
*l4kwqe
;
2848 struct kwqe
*wqes
[1];
2850 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
2851 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2852 wqes
[0] = (struct kwqe
*) l4kwqe
;
2854 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
2855 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
2856 l4kwqe
->cid
= csk
->cid
;
2858 return dev
->submit_kwqes(dev
, wqes
, 1);
2861 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
2862 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
2864 struct cnic_local
*cp
= dev
->cnic_priv
;
2865 struct cnic_sock
*csk1
;
2867 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
2871 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2873 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2877 csk1
= &cp
->csk_tbl
[l5_cid
];
2878 if (atomic_read(&csk1
->ref_count
))
2881 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
2886 csk1
->l5_cid
= l5_cid
;
2887 csk1
->ulp_type
= ulp_type
;
2888 csk1
->context
= context
;
2890 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
2891 csk1
->ka_interval
= DEF_KA_INTERVAL
;
2892 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
2893 csk1
->tos
= DEF_TOS
;
2894 csk1
->ttl
= DEF_TTL
;
2895 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
2896 csk1
->rcv_buf
= DEF_RCV_BUF
;
2897 csk1
->snd_buf
= DEF_SND_BUF
;
2898 csk1
->seed
= DEF_SEED
;
2904 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
2906 if (csk
->src_port
) {
2907 struct cnic_dev
*dev
= csk
->dev
;
2908 struct cnic_local
*cp
= dev
->cnic_priv
;
2910 cnic_free_id(&cp
->csk_port_tbl
, be16_to_cpu(csk
->src_port
));
2915 static void cnic_close_conn(struct cnic_sock
*csk
)
2917 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
2918 cnic_cm_upload_pg(csk
);
2919 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
2921 cnic_cm_cleanup(csk
);
2924 static int cnic_cm_destroy(struct cnic_sock
*csk
)
2926 if (!cnic_in_use(csk
))
2930 clear_bit(SK_F_INUSE
, &csk
->flags
);
2931 smp_mb__after_clear_bit();
2932 while (atomic_read(&csk
->ref_count
) != 1)
2934 cnic_cm_cleanup(csk
);
2941 static inline u16
cnic_get_vlan(struct net_device
*dev
,
2942 struct net_device
**vlan_dev
)
2944 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
2945 *vlan_dev
= vlan_dev_real_dev(dev
);
2946 return vlan_dev_vlan_id(dev
);
2952 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
2953 struct dst_entry
**dst
)
2955 #if defined(CONFIG_INET)
2960 memset(&fl
, 0, sizeof(fl
));
2961 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
2963 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
2968 return -ENETUNREACH
;
2972 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
2973 struct dst_entry
**dst
)
2975 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2978 memset(&fl
, 0, sizeof(fl
));
2979 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
2980 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
2981 fl
.oif
= dst_addr
->sin6_scope_id
;
2983 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
2988 return -ENETUNREACH
;
2991 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
2994 struct cnic_dev
*dev
= NULL
;
2995 struct dst_entry
*dst
;
2996 struct net_device
*netdev
= NULL
;
2997 int err
= -ENETUNREACH
;
2999 if (dst_addr
->sin_family
== AF_INET
)
3000 err
= cnic_get_v4_route(dst_addr
, &dst
);
3001 else if (dst_addr
->sin_family
== AF_INET6
) {
3002 struct sockaddr_in6
*dst_addr6
=
3003 (struct sockaddr_in6
*) dst_addr
;
3005 err
= cnic_get_v6_route(dst_addr6
, &dst
);
3015 cnic_get_vlan(dst
->dev
, &netdev
);
3017 dev
= cnic_from_netdev(netdev
);
3026 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3028 struct cnic_dev
*dev
= csk
->dev
;
3029 struct cnic_local
*cp
= dev
->cnic_priv
;
3031 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
3034 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3036 struct cnic_dev
*dev
= csk
->dev
;
3037 struct cnic_local
*cp
= dev
->cnic_priv
;
3039 struct dst_entry
*dst
= NULL
;
3040 struct net_device
*realdev
;
3044 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
3045 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
3047 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
3048 saddr
->remote
.v4
.sin_family
== AF_INET
)
3053 clear_bit(SK_F_IPV6
, &csk
->flags
);
3056 set_bit(SK_F_IPV6
, &csk
->flags
);
3057 cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
3059 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
3060 sizeof(struct in6_addr
));
3061 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
3062 local_port
= saddr
->local
.v6
.sin6_port
;
3065 cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
3067 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
3068 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
3069 local_port
= saddr
->local
.v4
.sin_port
;
3073 csk
->mtu
= dev
->netdev
->mtu
;
3074 if (dst
&& dst
->dev
) {
3075 u16 vlan
= cnic_get_vlan(dst
->dev
, &realdev
);
3076 if (realdev
== dev
->netdev
) {
3077 csk
->vlan_id
= vlan
;
3078 csk
->mtu
= dst_mtu(dst
);
3082 port_id
= be16_to_cpu(local_port
);
3083 if (port_id
>= CNIC_LOCAL_PORT_MIN
&&
3084 port_id
< CNIC_LOCAL_PORT_MAX
) {
3085 if (cnic_alloc_id(&cp
->csk_port_tbl
, port_id
))
3091 port_id
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
3092 if (port_id
== -1) {
3096 local_port
= cpu_to_be16(port_id
);
3098 csk
->src_port
= local_port
;
3105 static void cnic_init_csk_state(struct cnic_sock
*csk
)
3108 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3109 clear_bit(SK_F_CLOSING
, &csk
->flags
);
3112 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3116 if (!cnic_in_use(csk
))
3119 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
3122 cnic_init_csk_state(csk
);
3124 err
= cnic_get_route(csk
, saddr
);
3128 err
= cnic_resolve_addr(csk
, saddr
);
3133 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3137 static int cnic_cm_abort(struct cnic_sock
*csk
)
3139 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3140 u32 opcode
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
3142 if (!cnic_in_use(csk
))
3145 if (cnic_abort_prep(csk
))
3146 return cnic_cm_abort_req(csk
);
3148 /* Getting here means that we haven't started connect, or
3149 * connect was not successful.
3152 cp
->close_conn(csk
, opcode
);
3153 if (csk
->state
!= opcode
)
3159 static int cnic_cm_close(struct cnic_sock
*csk
)
3161 if (!cnic_in_use(csk
))
3164 if (cnic_close_prep(csk
)) {
3165 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
3166 return cnic_cm_close_req(csk
);
3173 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
3176 struct cnic_ulp_ops
*ulp_ops
;
3177 int ulp_type
= csk
->ulp_type
;
3180 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3182 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3183 ulp_ops
->cm_connect_complete(csk
);
3184 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3185 ulp_ops
->cm_close_complete(csk
);
3186 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3187 ulp_ops
->cm_remote_abort(csk
);
3188 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3189 ulp_ops
->cm_abort_complete(csk
);
3190 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3191 ulp_ops
->cm_remote_close(csk
);
3196 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3198 if (cnic_offld_prep(csk
)) {
3199 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3200 cnic_cm_update_pg(csk
);
3202 cnic_cm_offload_pg(csk
);
3207 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3209 struct cnic_local
*cp
= dev
->cnic_priv
;
3210 u32 l5_cid
= kcqe
->pg_host_opaque
;
3211 u8 opcode
= kcqe
->op_code
;
3212 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3215 if (!cnic_in_use(csk
))
3218 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3219 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3222 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3223 if (kcqe
->status
== L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL
) {
3224 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3225 cnic_cm_upcall(cp
, csk
,
3226 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3230 csk
->pg_cid
= kcqe
->pg_cid
;
3231 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3232 cnic_cm_conn_req(csk
);
3238 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3240 struct cnic_local
*cp
= dev
->cnic_priv
;
3241 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3242 u8 opcode
= l4kcqe
->op_code
;
3244 struct cnic_sock
*csk
;
3246 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3247 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3248 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3252 l5_cid
= l4kcqe
->conn_id
;
3254 l5_cid
= l4kcqe
->cid
;
3255 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3258 csk
= &cp
->csk_tbl
[l5_cid
];
3261 if (!cnic_in_use(csk
)) {
3267 case L5CM_RAMROD_CMD_ID_TCP_CONNECT
:
3268 if (l4kcqe
->status
!= 0) {
3269 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3270 cnic_cm_upcall(cp
, csk
,
3271 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3274 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
3275 if (l4kcqe
->status
== 0)
3276 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
3278 smp_mb__before_clear_bit();
3279 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3280 cnic_cm_upcall(cp
, csk
, opcode
);
3283 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3284 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3285 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3286 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3287 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3288 cp
->close_conn(csk
, opcode
);
3291 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
3292 cnic_cm_upcall(cp
, csk
, opcode
);
3298 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
3300 struct cnic_dev
*dev
= data
;
3303 for (i
= 0; i
< num
; i
++)
3304 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
3307 static struct cnic_ulp_ops cm_ulp_ops
= {
3308 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
3311 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
3313 struct cnic_local
*cp
= dev
->cnic_priv
;
3317 cnic_free_id_tbl(&cp
->csk_port_tbl
);
3320 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
3322 struct cnic_local
*cp
= dev
->cnic_priv
;
3324 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
3329 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
3330 CNIC_LOCAL_PORT_MIN
)) {
3331 cnic_cm_free_mem(dev
);
3337 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
3339 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
3340 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3341 opcode
= L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
;
3342 csk
->state
= opcode
;
3345 /* 1. If event opcode matches the expected event in csk->state
3346 * 2. If the expected event is CLOSE_COMP, we accept any event
3347 * 3. If the expected event is 0, meaning the connection was never
3348 * never established, we accept the opcode from cm_abort.
3350 if (opcode
== csk
->state
|| csk
->state
== 0 ||
3351 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
) {
3352 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
)) {
3353 if (csk
->state
== 0)
3354 csk
->state
= opcode
;
3361 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
3363 struct cnic_dev
*dev
= csk
->dev
;
3364 struct cnic_local
*cp
= dev
->cnic_priv
;
3366 if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
) {
3367 cnic_cm_upcall(cp
, csk
, opcode
);
3371 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3372 cnic_close_conn(csk
);
3373 csk
->state
= opcode
;
3374 cnic_cm_upcall(cp
, csk
, opcode
);
3377 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
3381 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
3385 get_random_bytes(&seed
, 4);
3386 cnic_ctx_wr(dev
, 45, 0, seed
);
3390 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
3392 struct cnic_dev
*dev
= csk
->dev
;
3393 struct cnic_local
*cp
= dev
->cnic_priv
;
3394 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
3395 union l5cm_specific_data l5_data
;
3397 int close_complete
= 0;
3400 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3401 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3402 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3403 if (cnic_ready_to_close(csk
, opcode
)) {
3404 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3405 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
3410 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3411 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
3413 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3418 memset(&l5_data
, 0, sizeof(l5_data
));
3420 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
3422 } else if (close_complete
) {
3423 ctx
->timestamp
= jiffies
;
3424 cnic_close_conn(csk
);
3425 cnic_cm_upcall(cp
, csk
, csk
->state
);
3429 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
3431 struct cnic_local
*cp
= dev
->cnic_priv
;
3437 if (!netif_running(dev
->netdev
))
3440 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
3441 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
3443 while (test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
3446 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
3447 netdev_warn(dev
->netdev
, "CID %x not deleted\n",
3451 cancel_delayed_work(&cp
->delete_task
);
3452 flush_workqueue(cnic_wq
);
3454 if (atomic_read(&cp
->iscsi_conn
) != 0)
3455 netdev_warn(dev
->netdev
, "%d iSCSI connections not destroyed\n",
3456 atomic_read(&cp
->iscsi_conn
));
3459 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
3461 struct cnic_local
*cp
= dev
->cnic_priv
;
3462 u32 pfid
= cp
->pfid
;
3463 u32 port
= CNIC_PORT(cp
);
3465 cnic_init_bnx2x_mac(dev
);
3466 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
3468 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
3469 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid
), 0);
3471 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3472 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port
), 1);
3473 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3474 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port
),
3477 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3478 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid
), DEF_TTL
);
3479 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3480 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid
), DEF_TOS
);
3481 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3482 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid
), 2);
3483 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3484 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid
), DEF_SWS_TIMER
);
3486 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(pfid
),
3491 static void cnic_delete_task(struct work_struct
*work
)
3493 struct cnic_local
*cp
;
3494 struct cnic_dev
*dev
;
3496 int need_resched
= 0;
3498 cp
= container_of(work
, struct cnic_local
, delete_task
.work
);
3501 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
3502 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
3504 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
) ||
3505 !test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
3508 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
3513 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
3516 cnic_bnx2x_destroy_ramrod(dev
, i
);
3518 cnic_free_bnx2x_conn_resc(dev
, i
);
3519 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
)
3520 atomic_dec(&cp
->iscsi_conn
);
3522 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
3526 queue_delayed_work(cnic_wq
, &cp
->delete_task
,
3527 msecs_to_jiffies(10));
3531 static int cnic_cm_open(struct cnic_dev
*dev
)
3533 struct cnic_local
*cp
= dev
->cnic_priv
;
3536 err
= cnic_cm_alloc_mem(dev
);
3540 err
= cp
->start_cm(dev
);
3545 INIT_DELAYED_WORK(&cp
->delete_task
, cnic_delete_task
);
3547 dev
->cm_create
= cnic_cm_create
;
3548 dev
->cm_destroy
= cnic_cm_destroy
;
3549 dev
->cm_connect
= cnic_cm_connect
;
3550 dev
->cm_abort
= cnic_cm_abort
;
3551 dev
->cm_close
= cnic_cm_close
;
3552 dev
->cm_select_dev
= cnic_cm_select_dev
;
3554 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
3555 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
3559 cnic_cm_free_mem(dev
);
3563 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
3565 struct cnic_local
*cp
= dev
->cnic_priv
;
3573 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
3574 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
3576 clear_bit(SK_F_INUSE
, &csk
->flags
);
3577 cnic_cm_cleanup(csk
);
3579 cnic_cm_free_mem(dev
);
3584 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
3589 cid_addr
= GET_CID_ADDR(cid
);
3591 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
3592 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
3595 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
3597 struct cnic_local
*cp
= dev
->cnic_priv
;
3599 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
3601 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3604 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3606 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
3609 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
3611 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
3612 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
3613 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
3614 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
3615 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
3616 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
3617 for (j
= 0; j
< 10; j
++) {
3619 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
3620 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
3624 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
3632 static void cnic_free_irq(struct cnic_dev
*dev
)
3634 struct cnic_local
*cp
= dev
->cnic_priv
;
3635 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3637 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3638 cp
->disable_int_sync(dev
);
3639 tasklet_kill(&cp
->cnic_irq_task
);
3640 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
3644 static int cnic_request_irq(struct cnic_dev
*dev
)
3646 struct cnic_local
*cp
= dev
->cnic_priv
;
3647 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3650 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0, "cnic", dev
);
3652 tasklet_disable(&cp
->cnic_irq_task
);
3657 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
3659 struct cnic_local
*cp
= dev
->cnic_priv
;
3660 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3662 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3664 int sblk_num
= cp
->status_blk_num
;
3665 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
3666 BNX2_HC_SB_CONFIG_1
;
3668 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
3670 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
3671 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
3672 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
3674 cp
->last_status_idx
= cp
->status_blk
.bnx2
->status_idx
;
3675 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
3676 (unsigned long) dev
);
3677 err
= cnic_request_irq(dev
);
3681 while (cp
->status_blk
.bnx2
->status_completion_producer_index
&&
3683 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
3684 1 << (11 + sblk_num
));
3689 if (cp
->status_blk
.bnx2
->status_completion_producer_index
) {
3695 struct status_block
*sblk
= cp
->status_blk
.gen
;
3696 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
3699 while (sblk
->status_completion_producer_index
&& i
< 10) {
3700 CNIC_WR(dev
, BNX2_HC_COMMAND
,
3701 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3706 if (sblk
->status_completion_producer_index
)
3713 netdev_err(dev
->netdev
, "KCQ index not resetting to 0\n");
3717 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
3719 struct cnic_local
*cp
= dev
->cnic_priv
;
3720 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3722 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3725 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3726 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
3729 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
3731 struct cnic_local
*cp
= dev
->cnic_priv
;
3732 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3734 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3737 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3738 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3739 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
3740 synchronize_irq(ethdev
->irq_arr
[0].vector
);
3743 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
3745 struct cnic_local
*cp
= dev
->cnic_priv
;
3746 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3747 struct cnic_uio_dev
*udev
= cp
->udev
;
3748 u32 cid_addr
, tx_cid
, sb_id
;
3749 u32 val
, offset0
, offset1
, offset2
, offset3
;
3752 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
3753 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3755 sb_id
= cp
->status_blk_num
;
3757 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
3758 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3759 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3761 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
3762 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
3764 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
3766 cp
->tx_cons
= *cp
->tx_cons_ptr
;
3768 cid_addr
= GET_CID_ADDR(tx_cid
);
3769 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
3770 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
3772 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
3773 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
3775 offset0
= BNX2_L2CTX_TYPE_XI
;
3776 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3777 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3778 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3780 cnic_init_context(dev
, tx_cid
);
3781 cnic_init_context(dev
, tx_cid
+ 1);
3783 offset0
= BNX2_L2CTX_TYPE
;
3784 offset1
= BNX2_L2CTX_CMD_TYPE
;
3785 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3786 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3788 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3789 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
3791 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3792 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
3794 txbd
= (struct tx_bd
*) udev
->l2_ring
;
3796 buf_map
= udev
->l2_buf_map
;
3797 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
3798 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3799 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3801 val
= (u64
) ring_map
>> 32;
3802 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
3803 txbd
->tx_bd_haddr_hi
= val
;
3805 val
= (u64
) ring_map
& 0xffffffff;
3806 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
3807 txbd
->tx_bd_haddr_lo
= val
;
3810 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
3812 struct cnic_local
*cp
= dev
->cnic_priv
;
3813 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3814 struct cnic_uio_dev
*udev
= cp
->udev
;
3815 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
3818 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3819 dma_addr_t ring_map
= udev
->l2_ring_map
;
3821 sb_id
= cp
->status_blk_num
;
3822 cnic_init_context(dev
, 2);
3823 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
3824 coal_reg
= BNX2_HC_COMMAND
;
3825 coal_val
= CNIC_RD(dev
, coal_reg
);
3826 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3827 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3829 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
3830 coal_reg
= BNX2_HC_COALESCE_NOW
;
3831 coal_val
= 1 << (11 + sb_id
);
3834 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
3835 CNIC_WR(dev
, coal_reg
, coal_val
);
3840 cp
->rx_cons
= *cp
->rx_cons_ptr
;
3842 cid_addr
= GET_CID_ADDR(2);
3843 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
3844 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
3845 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
3848 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
3850 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
3851 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
3853 rxbd
= (struct rx_bd
*) (udev
->l2_ring
+ BCM_PAGE_SIZE
);
3854 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
3856 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
3858 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
3859 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
3860 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3861 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3862 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3864 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
3865 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3866 rxbd
->rx_bd_haddr_hi
= val
;
3868 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
3869 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3870 rxbd
->rx_bd_haddr_lo
= val
;
3872 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
3873 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
3876 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
3878 struct kwqe
*wqes
[1], l2kwqe
;
3880 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
3882 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
3883 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
3884 KWQE_OPCODE_SHIFT
) | 2;
3885 dev
->submit_kwqes(dev
, wqes
, 1);
3888 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
3890 struct cnic_local
*cp
= dev
->cnic_priv
;
3893 val
= cp
->func
<< 2;
3895 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
3897 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3898 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
3899 dev
->mac_addr
[0] = (u8
) (val
>> 8);
3900 dev
->mac_addr
[1] = (u8
) val
;
3902 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
3904 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3905 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
3906 dev
->mac_addr
[2] = (u8
) (val
>> 24);
3907 dev
->mac_addr
[3] = (u8
) (val
>> 16);
3908 dev
->mac_addr
[4] = (u8
) (val
>> 8);
3909 dev
->mac_addr
[5] = (u8
) val
;
3911 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
3913 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
3914 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3915 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
3917 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
3918 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
3919 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
3922 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
3924 struct cnic_local
*cp
= dev
->cnic_priv
;
3925 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3926 struct status_block
*sblk
= cp
->status_blk
.gen
;
3927 u32 val
, kcq_cid_addr
, kwq_cid_addr
;
3930 cnic_set_bnx2_mac(dev
);
3932 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
3933 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3934 if (BCM_PAGE_BITS
> 12)
3935 val
|= (12 - 8) << 4;
3937 val
|= (BCM_PAGE_BITS
- 8) << 4;
3939 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
3941 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
3942 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
3943 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
3945 err
= cnic_setup_5709_context(dev
, 1);
3949 cnic_init_context(dev
, KWQ_CID
);
3950 cnic_init_context(dev
, KCQ_CID
);
3952 kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
3953 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3955 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
3956 cp
->kwq_prod_idx
= 0;
3957 cp
->kwq_con_idx
= 0;
3958 set_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
3960 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
3961 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
3963 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
3965 /* Initialize the kernel work queue context. */
3966 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3967 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3968 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3970 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
3971 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3973 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
3974 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3976 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
3977 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3979 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
3980 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3982 kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
3983 cp
->kcq1
.io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3985 cp
->kcq1
.sw_prod_idx
= 0;
3986 cp
->kcq1
.hw_prod_idx_ptr
=
3987 (u16
*) &sblk
->status_completion_producer_index
;
3989 cp
->kcq1
.status_idx_ptr
= (u16
*) &sblk
->status_idx
;
3991 /* Initialize the kernel complete queue context. */
3992 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3993 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3994 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3996 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
3997 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3999 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
4000 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
4002 val
= (u32
) ((u64
) cp
->kcq1
.dma
.pgtbl_map
>> 32);
4003 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
4005 val
= (u32
) cp
->kcq1
.dma
.pgtbl_map
;
4006 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
4009 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4010 struct status_block_msix
*msblk
= cp
->status_blk
.bnx2
;
4011 u32 sb_id
= cp
->status_blk_num
;
4012 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
4014 cp
->kcq1
.hw_prod_idx_ptr
=
4015 (u16
*) &msblk
->status_completion_producer_index
;
4016 cp
->kcq1
.status_idx_ptr
= (u16
*) &msblk
->status_idx
;
4017 cp
->kwq_con_idx_ptr
= (u16
*) &msblk
->status_cmd_consumer_index
;
4018 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
4019 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4020 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4023 /* Enable Commnad Scheduler notification when we write to the
4024 * host producer index of the kernel contexts. */
4025 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
4027 /* Enable Command Scheduler notification when we write to either
4028 * the Send Queue or Receive Queue producer indexes of the kernel
4029 * bypass contexts. */
4030 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
4031 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
4033 /* Notify COM when the driver post an application buffer. */
4034 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
4036 /* Set the CP and COM doorbells. These two processors polls the
4037 * doorbell for a non zero value before running. This must be done
4038 * after setting up the kernel queue contexts. */
4039 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
4040 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
4042 cnic_init_bnx2_tx_ring(dev
);
4043 cnic_init_bnx2_rx_ring(dev
);
4045 err
= cnic_init_bnx2_irq(dev
);
4047 netdev_err(dev
->netdev
, "cnic_init_irq failed\n");
4048 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4049 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4056 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
4058 struct cnic_local
*cp
= dev
->cnic_priv
;
4059 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4060 u32 start_offset
= ethdev
->ctx_tbl_offset
;
4063 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4064 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
4065 dma_addr_t map
= ctx
->mapping
;
4067 if (cp
->ctx_align
) {
4068 unsigned long mask
= cp
->ctx_align
- 1;
4070 map
= (map
+ mask
) & ~mask
;
4073 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
4077 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
4079 struct cnic_local
*cp
= dev
->cnic_priv
;
4080 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4083 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
4084 (unsigned long) dev
);
4085 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
4086 err
= cnic_request_irq(dev
);
4091 static inline void cnic_storm_memset_hc_disable(struct cnic_dev
*dev
,
4092 u16 sb_id
, u8 sb_index
,
4096 u32 addr
= BAR_CSTRORM_INTMEM
+
4097 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4098 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4099 sizeof(struct hc_index_data
)*sb_index
+
4100 offsetof(struct hc_index_data
, flags
);
4101 u16 flags
= CNIC_RD16(dev
, addr
);
4103 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
4104 flags
|= (((~disable
) << HC_INDEX_DATA_HC_ENABLED_SHIFT
) &
4105 HC_INDEX_DATA_HC_ENABLED
);
4106 CNIC_WR16(dev
, addr
, flags
);
4109 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
4111 struct cnic_local
*cp
= dev
->cnic_priv
;
4112 u8 sb_id
= cp
->status_blk_num
;
4114 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4115 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4116 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4117 sizeof(struct hc_index_data
)*HC_INDEX_ISCSI_EQ_CONS
+
4118 offsetof(struct hc_index_data
, timeout
), 64 / 12);
4119 cnic_storm_memset_hc_disable(dev
, sb_id
, HC_INDEX_ISCSI_EQ_CONS
, 0);
4122 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
4126 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
,
4127 struct client_init_ramrod_data
*data
)
4129 struct cnic_local
*cp
= dev
->cnic_priv
;
4130 struct cnic_uio_dev
*udev
= cp
->udev
;
4131 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) udev
->l2_ring
;
4132 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4133 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4134 int port
= CNIC_PORT(cp
);
4136 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4139 memset(txbd
, 0, BCM_PAGE_SIZE
);
4141 buf_map
= udev
->l2_buf_map
;
4142 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
4143 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
4144 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
4146 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4147 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4148 reg_bd
->addr_hi
= start_bd
->addr_hi
;
4149 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
4150 start_bd
->nbytes
= cpu_to_le16(0x10);
4151 start_bd
->nbd
= cpu_to_le16(3);
4152 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
4153 start_bd
->general_data
= (UNICAST_ADDRESS
<<
4154 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
4155 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
4159 val
= (u64
) ring_map
>> 32;
4160 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
4162 data
->tx
.tx_bd_page_base
.hi
= cpu_to_le32(val
);
4164 val
= (u64
) ring_map
& 0xffffffff;
4165 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
4167 data
->tx
.tx_bd_page_base
.lo
= cpu_to_le32(val
);
4169 /* Other ramrod params */
4170 data
->tx
.tx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_CQ_CONS
;
4171 data
->tx
.tx_status_block_id
= BNX2X_DEF_SB_ID
;
4173 /* reset xstorm per client statistics */
4174 if (cli
< MAX_STAT_COUNTER_ID
) {
4175 val
= BAR_XSTRORM_INTMEM
+
4176 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4177 for (i
= 0; i
< sizeof(struct xstorm_per_client_stats
) / 4; i
++)
4178 CNIC_WR(dev
, val
+ i
* 4, 0);
4182 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_CQ_CONS
];
4185 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
,
4186 struct client_init_ramrod_data
*data
)
4188 struct cnic_local
*cp
= dev
->cnic_priv
;
4189 struct cnic_uio_dev
*udev
= cp
->udev
;
4190 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (udev
->l2_ring
+
4192 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
4193 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
4194 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4196 int port
= CNIC_PORT(cp
);
4197 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4198 int cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4200 dma_addr_t ring_map
= udev
->l2_ring_map
;
4203 data
->general
.client_id
= cli
;
4204 data
->general
.statistics_en_flg
= 1;
4205 data
->general
.statistics_counter_id
= cli
;
4206 data
->general
.activate_flg
= 1;
4207 data
->general
.sp_client_id
= cli
;
4209 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4211 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4213 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4214 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4215 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4218 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
4219 rxbd
->addr_hi
= cpu_to_le32(val
);
4220 data
->rx
.bd_page_base
.hi
= cpu_to_le32(val
);
4222 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
4223 rxbd
->addr_lo
= cpu_to_le32(val
);
4224 data
->rx
.bd_page_base
.lo
= cpu_to_le32(val
);
4226 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
4227 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) >> 32;
4228 rxcqe
->addr_hi
= cpu_to_le32(val
);
4229 data
->rx
.cqe_page_base
.hi
= cpu_to_le32(val
);
4231 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) & 0xffffffff;
4232 rxcqe
->addr_lo
= cpu_to_le32(val
);
4233 data
->rx
.cqe_page_base
.lo
= cpu_to_le32(val
);
4235 /* Other ramrod params */
4236 data
->rx
.client_qzone_id
= cl_qzone_id
;
4237 data
->rx
.rx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
;
4238 data
->rx
.status_block_id
= BNX2X_DEF_SB_ID
;
4240 data
->rx
.cache_line_alignment_log_size
= L1_CACHE_SHIFT
;
4241 data
->rx
.bd_buff_size
= cpu_to_le16(cp
->l2_single_buf_size
);
4243 data
->rx
.mtu
= cpu_to_le16(cp
->l2_single_buf_size
- 14);
4244 data
->rx
.outer_vlan_removal_enable_flg
= 1;
4246 /* reset tstorm and ustorm per client statistics */
4247 if (cli
< MAX_STAT_COUNTER_ID
) {
4248 val
= BAR_TSTRORM_INTMEM
+
4249 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4250 for (i
= 0; i
< sizeof(struct tstorm_per_client_stats
) / 4; i
++)
4251 CNIC_WR(dev
, val
+ i
* 4, 0);
4253 val
= BAR_USTRORM_INTMEM
+
4254 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4255 for (i
= 0; i
< sizeof(struct ustorm_per_client_stats
) / 4; i
++)
4256 CNIC_WR(dev
, val
+ i
* 4, 0);
4260 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
];
4261 cp
->rx_cons
= *cp
->rx_cons_ptr
;
4264 static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev
*dev
, u32 upper_addr
,
4270 val
= CNIC_RD(dev
, upper_addr
);
4272 mac
[0] = (u8
) (val
>> 8);
4275 val
= CNIC_RD(dev
, lower_addr
);
4277 mac
[2] = (u8
) (val
>> 24);
4278 mac
[3] = (u8
) (val
>> 16);
4279 mac
[4] = (u8
) (val
>> 8);
4282 if (is_valid_ether_addr(mac
)) {
4283 memcpy(dev
->mac_addr
, mac
, 6);
4290 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev
*dev
)
4292 struct cnic_local
*cp
= dev
->cnic_priv
;
4293 u32 base
, base2
, addr
, addr1
, val
;
4294 int port
= CNIC_PORT(cp
);
4296 dev
->max_iscsi_conn
= 0;
4297 base
= CNIC_RD(dev
, MISC_REG_SHARED_MEM_ADDR
);
4301 base2
= CNIC_RD(dev
, (CNIC_PATH(cp
) ? MISC_REG_GENERIC_CR_1
:
4302 MISC_REG_GENERIC_CR_0
));
4303 addr
= BNX2X_SHMEM_ADDR(base
,
4304 dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
4306 addr1
= BNX2X_SHMEM_ADDR(base
,
4307 dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
4309 cnic_read_bnx2x_iscsi_mac(dev
, addr
, addr1
);
4311 addr
= BNX2X_SHMEM_ADDR(base
, validity_map
[port
]);
4312 val
= CNIC_RD(dev
, addr
);
4314 if (!(val
& SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT
)) {
4317 addr
= BNX2X_SHMEM_ADDR(base
,
4318 drv_lic_key
[port
].max_iscsi_init_conn
);
4319 val16
= CNIC_RD16(dev
, addr
);
4323 dev
->max_iscsi_conn
= val16
;
4325 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
) || BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4326 int func
= CNIC_FUNC(cp
);
4329 if (BNX2X_SHMEM2_HAS(base2
, mf_cfg_addr
))
4330 mf_cfg_addr
= CNIC_RD(dev
, BNX2X_SHMEM2_ADDR(base2
,
4333 mf_cfg_addr
= base
+ BNX2X_SHMEM_MF_BLK_OFFSET
;
4335 if (BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4336 /* Must determine if the MF is SD vs SI mode */
4337 addr
= BNX2X_SHMEM_ADDR(base
,
4338 dev_info
.shared_feature_config
.config
);
4339 val
= CNIC_RD(dev
, addr
);
4340 if ((val
& SHARED_FEAT_CFG_FORCE_SF_MODE_MASK
) ==
4341 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT
) {
4344 /* MULTI_FUNCTION_SI mode */
4345 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4346 func_ext_config
[func
].func_cfg
);
4347 val
= CNIC_RD(dev
, addr
);
4348 if (!(val
& MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD
))
4349 dev
->max_iscsi_conn
= 0;
4351 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4352 func_ext_config
[func
].
4353 iscsi_mac_addr_upper
);
4354 addr1
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4355 func_ext_config
[func
].
4356 iscsi_mac_addr_lower
);
4357 rc
= cnic_read_bnx2x_iscsi_mac(dev
, addr
,
4360 dev
->max_iscsi_conn
= 0;
4366 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4367 func_mf_config
[func
].e1hov_tag
);
4369 val
= CNIC_RD(dev
, addr
);
4370 val
&= FUNC_MF_CFG_E1HOV_TAG_MASK
;
4371 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
4372 addr
= BNX2X_MF_CFG_ADDR(mf_cfg_addr
,
4373 func_mf_config
[func
].config
);
4374 val
= CNIC_RD(dev
, addr
);
4375 val
&= FUNC_MF_CFG_PROTOCOL_MASK
;
4376 if (val
!= FUNC_MF_CFG_PROTOCOL_ISCSI
)
4377 dev
->max_iscsi_conn
= 0;
4380 if (!is_valid_ether_addr(dev
->mac_addr
))
4381 dev
->max_iscsi_conn
= 0;
4384 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
4386 struct cnic_local
*cp
= dev
->cnic_priv
;
4387 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4388 int func
= CNIC_FUNC(cp
), ret
, i
;
4391 if (BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4392 u32 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN_OVWR
);
4395 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN
);
4397 val
= (val
>> 1) & 1;
4400 cp
->pfid
= func
>> 1;
4402 cp
->pfid
= func
& 0x6;
4408 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
4409 cp
->iscsi_start_cid
);
4414 cp
->bnx2x_igu_sb_id
= ethdev
->irq_arr
[0].status_blk_num2
;
4416 cp
->kcq1
.io_addr
= BAR_CSTRORM_INTMEM
+
4417 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid
, 0);
4418 cp
->kcq1
.sw_prod_idx
= 0;
4420 if (BNX2X_CHIP_IS_E2(cp
->chip_id
)) {
4421 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
4423 cp
->kcq1
.hw_prod_idx_ptr
=
4424 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4425 cp
->kcq1
.status_idx_ptr
=
4426 &sb
->sb
.running_index
[SM_RX_ID
];
4428 struct host_hc_status_block_e1x
*sb
= cp
->status_blk
.gen
;
4430 cp
->kcq1
.hw_prod_idx_ptr
=
4431 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4432 cp
->kcq1
.status_idx_ptr
=
4433 &sb
->sb
.running_index
[SM_RX_ID
];
4436 cnic_get_bnx2x_iscsi_info(dev
);
4439 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, MAX_KCQ_IDX
);
4440 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4441 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid
, 0), 0);
4442 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4443 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0),
4444 cp
->kcq1
.dma
.pg_map_arr
[1] & 0xffffffff);
4445 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4446 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0) + 4,
4447 (u64
) cp
->kcq1
.dma
.pg_map_arr
[1] >> 32);
4448 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4449 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0),
4450 cp
->kcq1
.dma
.pg_map_arr
[0] & 0xffffffff);
4451 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4452 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0) + 4,
4453 (u64
) cp
->kcq1
.dma
.pg_map_arr
[0] >> 32);
4454 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4455 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid
, 0), 1);
4456 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4457 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid
, 0), cp
->status_blk_num
);
4458 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4459 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid
, 0),
4460 HC_INDEX_ISCSI_EQ_CONS
);
4462 for (i
= 0; i
< cp
->conn_buf_info
.num_pages
; i
++) {
4463 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4464 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid
, i
),
4465 cp
->conn_buf_info
.pgtbl
[2 * i
]);
4466 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4467 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid
, i
) + 4,
4468 cp
->conn_buf_info
.pgtbl
[(2 * i
) + 1]);
4471 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4472 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
),
4473 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
4474 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4475 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
) + 4,
4476 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
4478 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4479 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid
), DEF_RCV_BUF
);
4481 cnic_setup_bnx2x_context(dev
);
4483 ret
= cnic_init_bnx2x_irq(dev
);
4490 static void cnic_init_rings(struct cnic_dev
*dev
)
4492 struct cnic_local
*cp
= dev
->cnic_priv
;
4493 struct cnic_uio_dev
*udev
= cp
->udev
;
4495 if (test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4498 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4499 cnic_init_bnx2_tx_ring(dev
);
4500 cnic_init_bnx2_rx_ring(dev
);
4501 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4502 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4503 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4504 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
4505 u32 cl_qzone_id
, type
;
4506 struct client_init_ramrod_data
*data
;
4507 union l5cm_specific_data l5_data
;
4508 struct ustorm_eth_rx_producers rx_prods
= {0};
4511 rx_prods
.bd_prod
= 0;
4512 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
4515 cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4517 off
= BAR_USTRORM_INTMEM
+
4518 (BNX2X_CHIP_IS_E2(cp
->chip_id
) ?
4519 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id
) :
4520 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp
), cli
));
4522 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
4523 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
4525 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4527 data
= udev
->l2_buf
;
4529 memset(data
, 0, sizeof(*data
));
4531 cnic_init_bnx2x_tx_ring(dev
, data
);
4532 cnic_init_bnx2x_rx_ring(dev
, data
);
4534 l5_data
.phy_address
.lo
= udev
->l2_buf_map
& 0xffffffff;
4535 l5_data
.phy_address
.hi
= (u64
) udev
->l2_buf_map
>> 32;
4537 type
= (ETH_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
4538 & SPE_HDR_CONN_TYPE
;
4539 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
4540 SPE_HDR_FUNCTION_ID
);
4542 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4544 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
4545 cid
, type
, &l5_data
);
4548 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
4552 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
4553 netdev_err(dev
->netdev
,
4554 "iSCSI CLIENT_SETUP did not complete\n");
4555 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
4556 cnic_ring_ctl(dev
, cid
, cli
, 1);
4560 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
4562 struct cnic_local
*cp
= dev
->cnic_priv
;
4564 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4567 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4568 cnic_shutdown_bnx2_rx_ring(dev
);
4569 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4570 struct cnic_local
*cp
= dev
->cnic_priv
;
4571 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4572 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
4573 union l5cm_specific_data l5_data
;
4577 cnic_ring_ctl(dev
, cid
, cli
, 0);
4579 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4581 l5_data
.phy_address
.lo
= cli
;
4582 l5_data
.phy_address
.hi
= 0;
4583 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
4584 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
4586 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
4590 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
4591 netdev_err(dev
->netdev
,
4592 "iSCSI CLIENT_HALT did not complete\n");
4593 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
4595 memset(&l5_data
, 0, sizeof(l5_data
));
4596 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
4597 & SPE_HDR_CONN_TYPE
;
4598 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
4599 SPE_HDR_FUNCTION_ID
);
4600 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
4601 cid
, type
, &l5_data
);
4604 clear_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4607 static int cnic_register_netdev(struct cnic_dev
*dev
)
4609 struct cnic_local
*cp
= dev
->cnic_priv
;
4610 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4616 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
4619 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
4621 netdev_err(dev
->netdev
, "register_cnic failed\n");
4626 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
4628 struct cnic_local
*cp
= dev
->cnic_priv
;
4629 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4634 ethdev
->drv_unregister_cnic(dev
->netdev
);
4637 static int cnic_start_hw(struct cnic_dev
*dev
)
4639 struct cnic_local
*cp
= dev
->cnic_priv
;
4640 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4643 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
4646 dev
->regview
= ethdev
->io_base
;
4647 pci_dev_get(dev
->pcidev
);
4648 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
4649 cp
->status_blk
.gen
= ethdev
->irq_arr
[0].status_blk
;
4650 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
4652 err
= cp
->alloc_resc(dev
);
4654 netdev_err(dev
->netdev
, "allocate resource failure\n");
4658 err
= cp
->start_hw(dev
);
4662 err
= cnic_cm_open(dev
);
4666 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4668 cp
->enable_int(dev
);
4674 pci_dev_put(dev
->pcidev
);
4678 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
4680 cnic_disable_bnx2_int_sync(dev
);
4682 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4683 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4685 cnic_init_context(dev
, KWQ_CID
);
4686 cnic_init_context(dev
, KCQ_CID
);
4688 cnic_setup_5709_context(dev
, 0);
4691 cnic_free_resc(dev
);
4695 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
4697 struct cnic_local
*cp
= dev
->cnic_priv
;
4700 *cp
->kcq1
.hw_prod_idx_ptr
= 0;
4701 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4702 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->pfid
, 0), 0);
4703 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, 0);
4704 cnic_free_resc(dev
);
4707 static void cnic_stop_hw(struct cnic_dev
*dev
)
4709 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4710 struct cnic_local
*cp
= dev
->cnic_priv
;
4713 /* Need to wait for the ring shutdown event to complete
4714 * before clearing the CNIC_UP flag.
4716 while (cp
->udev
->uio_dev
!= -1 && i
< 15) {
4720 cnic_shutdown_rings(dev
);
4721 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4722 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
4724 cnic_cm_shutdown(dev
);
4726 pci_dev_put(dev
->pcidev
);
4730 static void cnic_free_dev(struct cnic_dev
*dev
)
4734 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
4738 if (atomic_read(&dev
->ref_count
) != 0)
4739 netdev_err(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
4741 netdev_info(dev
->netdev
, "Removed CNIC device\n");
4742 dev_put(dev
->netdev
);
4746 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
4747 struct pci_dev
*pdev
)
4749 struct cnic_dev
*cdev
;
4750 struct cnic_local
*cp
;
4753 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
4755 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
4757 netdev_err(dev
, "allocate dev struct failure\n");
4762 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
4763 cdev
->register_device
= cnic_register_device
;
4764 cdev
->unregister_device
= cnic_unregister_device
;
4765 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
4767 cp
= cdev
->cnic_priv
;
4769 cp
->l2_single_buf_size
= 0x400;
4770 cp
->l2_rx_ring_size
= 3;
4772 spin_lock_init(&cp
->cnic_ulp_lock
);
4774 netdev_info(dev
, "Added CNIC device\n");
4779 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
4781 struct pci_dev
*pdev
;
4782 struct cnic_dev
*cdev
;
4783 struct cnic_local
*cp
;
4784 struct cnic_eth_dev
*ethdev
= NULL
;
4785 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4787 probe
= symbol_get(bnx2_cnic_probe
);
4789 ethdev
= (*probe
)(dev
);
4790 symbol_put(bnx2_cnic_probe
);
4795 pdev
= ethdev
->pdev
;
4801 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
4802 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
4805 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
4813 cdev
= cnic_alloc_dev(dev
, pdev
);
4817 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
4818 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
4820 cp
= cdev
->cnic_priv
;
4821 cp
->ethdev
= ethdev
;
4822 cdev
->pcidev
= pdev
;
4823 cp
->chip_id
= ethdev
->chip_id
;
4825 cp
->cnic_ops
= &cnic_bnx2_ops
;
4826 cp
->start_hw
= cnic_start_bnx2_hw
;
4827 cp
->stop_hw
= cnic_stop_bnx2_hw
;
4828 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
4829 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
4830 cp
->free_resc
= cnic_free_resc
;
4831 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
4832 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
4833 cp
->enable_int
= cnic_enable_bnx2_int
;
4834 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
4835 cp
->close_conn
= cnic_close_bnx2_conn
;
4836 cp
->next_idx
= cnic_bnx2_next_idx
;
4837 cp
->hw_idx
= cnic_bnx2_hw_idx
;
4845 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
4847 struct pci_dev
*pdev
;
4848 struct cnic_dev
*cdev
;
4849 struct cnic_local
*cp
;
4850 struct cnic_eth_dev
*ethdev
= NULL
;
4851 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4853 probe
= symbol_get(bnx2x_cnic_probe
);
4855 ethdev
= (*probe
)(dev
);
4856 symbol_put(bnx2x_cnic_probe
);
4861 pdev
= ethdev
->pdev
;
4866 cdev
= cnic_alloc_dev(dev
, pdev
);
4872 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
4873 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
4875 cp
= cdev
->cnic_priv
;
4876 cp
->ethdev
= ethdev
;
4877 cdev
->pcidev
= pdev
;
4878 cp
->chip_id
= ethdev
->chip_id
;
4880 cp
->cnic_ops
= &cnic_bnx2x_ops
;
4881 cp
->start_hw
= cnic_start_bnx2x_hw
;
4882 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
4883 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
4884 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
4885 cp
->free_resc
= cnic_free_resc
;
4886 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
4887 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
4888 cp
->enable_int
= cnic_enable_bnx2x_int
;
4889 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
4890 if (BNX2X_CHIP_IS_E2(cp
->chip_id
))
4891 cp
->ack_int
= cnic_ack_bnx2x_e2_msix
;
4893 cp
->ack_int
= cnic_ack_bnx2x_msix
;
4894 cp
->close_conn
= cnic_close_bnx2x_conn
;
4895 cp
->next_idx
= cnic_bnx2x_next_idx
;
4896 cp
->hw_idx
= cnic_bnx2x_hw_idx
;
4900 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
4902 struct ethtool_drvinfo drvinfo
;
4903 struct cnic_dev
*cdev
= NULL
;
4905 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
4906 memset(&drvinfo
, 0, sizeof(drvinfo
));
4907 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
4909 if (!strcmp(drvinfo
.driver
, "bnx2"))
4910 cdev
= init_bnx2_cnic(dev
);
4911 if (!strcmp(drvinfo
.driver
, "bnx2x"))
4912 cdev
= init_bnx2x_cnic(dev
);
4914 write_lock(&cnic_dev_lock
);
4915 list_add(&cdev
->list
, &cnic_dev_list
);
4916 write_unlock(&cnic_dev_lock
);
4923 * netdev event handler
4925 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
4928 struct net_device
*netdev
= ptr
;
4929 struct cnic_dev
*dev
;
4933 dev
= cnic_from_netdev(netdev
);
4935 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
4936 /* Check for the hot-plug device */
4937 dev
= is_cnic_dev(netdev
);
4944 struct cnic_local
*cp
= dev
->cnic_priv
;
4948 else if (event
== NETDEV_UNREGISTER
)
4951 if (event
== NETDEV_UP
) {
4952 if (cnic_register_netdev(dev
) != 0) {
4956 if (!cnic_start_hw(dev
))
4957 cnic_ulp_start(dev
);
4961 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
4962 struct cnic_ulp_ops
*ulp_ops
;
4965 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
4966 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
4969 ctx
= cp
->ulp_handle
[if_type
];
4971 ulp_ops
->indicate_netevent(ctx
, event
);
4975 if (event
== NETDEV_GOING_DOWN
) {
4978 cnic_unregister_netdev(dev
);
4979 } else if (event
== NETDEV_UNREGISTER
) {
4980 write_lock(&cnic_dev_lock
);
4981 list_del_init(&dev
->list
);
4982 write_unlock(&cnic_dev_lock
);
4994 static struct notifier_block cnic_netdev_notifier
= {
4995 .notifier_call
= cnic_netdev_event
4998 static void cnic_release(void)
5000 struct cnic_dev
*dev
;
5001 struct cnic_uio_dev
*udev
;
5003 while (!list_empty(&cnic_dev_list
)) {
5004 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
5005 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
5011 cnic_unregister_netdev(dev
);
5012 list_del_init(&dev
->list
);
5015 while (!list_empty(&cnic_udev_list
)) {
5016 udev
= list_entry(cnic_udev_list
.next
, struct cnic_uio_dev
,
5018 cnic_free_uio(udev
);
5022 static int __init
cnic_init(void)
5026 pr_info("%s", version
);
5028 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
5034 cnic_wq
= create_singlethread_workqueue("cnic_wq");
5037 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5044 static void __exit
cnic_exit(void)
5046 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5048 destroy_workqueue(cnic_wq
);
5051 module_init(cnic_init
);
5052 module_exit(cnic_exit
);