1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
36 #include <net/route.h>
38 #include <net/ip6_route.h>
39 #include <net/ip6_checksum.h>
40 #include <scsi/iscsi_if.h>
44 #include "bnx2x/bnx2x_reg.h"
45 #include "bnx2x/bnx2x_fw_defs.h"
46 #include "bnx2x/bnx2x_hsi.h"
47 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
48 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "cnic_defs.h"
52 #define DRV_MODULE_NAME "cnic"
54 static char version
[] __devinitdata
=
55 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
57 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
58 "Chen (zongxi@broadcom.com");
59 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
60 MODULE_LICENSE("GPL");
61 MODULE_VERSION(CNIC_MODULE_VERSION
);
63 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
64 static LIST_HEAD(cnic_dev_list
);
65 static LIST_HEAD(cnic_udev_list
);
66 static DEFINE_RWLOCK(cnic_dev_lock
);
67 static DEFINE_MUTEX(cnic_lock
);
69 static struct cnic_ulp_ops __rcu
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
71 /* helper function, assuming cnic_lock is held */
72 static inline struct cnic_ulp_ops
*cnic_ulp_tbl_prot(int type
)
74 return rcu_dereference_protected(cnic_ulp_tbl
[type
],
75 lockdep_is_held(&cnic_lock
));
78 static int cnic_service_bnx2(void *, void *);
79 static int cnic_service_bnx2x(void *, void *);
80 static int cnic_ctl(void *, struct cnic_ctl_info
*);
82 static struct cnic_ops cnic_bnx2_ops
= {
83 .cnic_owner
= THIS_MODULE
,
84 .cnic_handler
= cnic_service_bnx2
,
88 static struct cnic_ops cnic_bnx2x_ops
= {
89 .cnic_owner
= THIS_MODULE
,
90 .cnic_handler
= cnic_service_bnx2x
,
94 static struct workqueue_struct
*cnic_wq
;
96 static void cnic_shutdown_rings(struct cnic_dev
*);
97 static void cnic_init_rings(struct cnic_dev
*);
98 static int cnic_cm_set_pg(struct cnic_sock
*);
100 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
102 struct cnic_uio_dev
*udev
= uinfo
->priv
;
103 struct cnic_dev
*dev
;
105 if (!capable(CAP_NET_ADMIN
))
108 if (udev
->uio_dev
!= -1)
114 if (!dev
|| !test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
119 udev
->uio_dev
= iminor(inode
);
121 cnic_shutdown_rings(dev
);
122 cnic_init_rings(dev
);
128 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
130 struct cnic_uio_dev
*udev
= uinfo
->priv
;
136 static inline void cnic_hold(struct cnic_dev
*dev
)
138 atomic_inc(&dev
->ref_count
);
141 static inline void cnic_put(struct cnic_dev
*dev
)
143 atomic_dec(&dev
->ref_count
);
146 static inline void csk_hold(struct cnic_sock
*csk
)
148 atomic_inc(&csk
->ref_count
);
151 static inline void csk_put(struct cnic_sock
*csk
)
153 atomic_dec(&csk
->ref_count
);
156 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
158 struct cnic_dev
*cdev
;
160 read_lock(&cnic_dev_lock
);
161 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
162 if (netdev
== cdev
->netdev
) {
164 read_unlock(&cnic_dev_lock
);
168 read_unlock(&cnic_dev_lock
);
172 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
174 atomic_inc(&ulp_ops
->ref_count
);
177 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
179 atomic_dec(&ulp_ops
->ref_count
);
182 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
184 struct cnic_local
*cp
= dev
->cnic_priv
;
185 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
186 struct drv_ctl_info info
;
187 struct drv_ctl_io
*io
= &info
.data
.io
;
189 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
190 io
->cid_addr
= cid_addr
;
193 ethdev
->drv_ctl(dev
->netdev
, &info
);
196 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
198 struct cnic_local
*cp
= dev
->cnic_priv
;
199 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
200 struct drv_ctl_info info
;
201 struct drv_ctl_io
*io
= &info
.data
.io
;
203 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
206 ethdev
->drv_ctl(dev
->netdev
, &info
);
209 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
211 struct cnic_local
*cp
= dev
->cnic_priv
;
212 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
213 struct drv_ctl_info info
;
214 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
217 info
.cmd
= DRV_CTL_START_L2_CMD
;
219 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
222 ring
->client_id
= cl_id
;
223 ethdev
->drv_ctl(dev
->netdev
, &info
);
226 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
228 struct cnic_local
*cp
= dev
->cnic_priv
;
229 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
230 struct drv_ctl_info info
;
231 struct drv_ctl_io
*io
= &info
.data
.io
;
233 info
.cmd
= DRV_CTL_IO_WR_CMD
;
236 ethdev
->drv_ctl(dev
->netdev
, &info
);
239 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
241 struct cnic_local
*cp
= dev
->cnic_priv
;
242 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
243 struct drv_ctl_info info
;
244 struct drv_ctl_io
*io
= &info
.data
.io
;
246 info
.cmd
= DRV_CTL_IO_RD_CMD
;
248 ethdev
->drv_ctl(dev
->netdev
, &info
);
252 static int cnic_in_use(struct cnic_sock
*csk
)
254 return test_bit(SK_F_INUSE
, &csk
->flags
);
257 static void cnic_spq_completion(struct cnic_dev
*dev
, int cmd
, u32 count
)
259 struct cnic_local
*cp
= dev
->cnic_priv
;
260 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
261 struct drv_ctl_info info
;
264 info
.data
.credit
.credit_count
= count
;
265 ethdev
->drv_ctl(dev
->netdev
, &info
);
268 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
272 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
273 if (cp
->ctx_tbl
[i
].cid
== cid
) {
281 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
282 struct cnic_sock
*csk
)
284 struct iscsi_path path_req
;
287 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
288 struct cnic_ulp_ops
*ulp_ops
;
289 struct cnic_uio_dev
*udev
= cp
->udev
;
290 int rc
= 0, retry
= 0;
292 if (!udev
|| udev
->uio_dev
== -1)
296 len
= sizeof(path_req
);
297 buf
= (char *) &path_req
;
298 memset(&path_req
, 0, len
);
300 msg_type
= ISCSI_KEVENT_PATH_REQ
;
301 path_req
.handle
= (u64
) csk
->l5_cid
;
302 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
303 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
304 sizeof(struct in6_addr
));
305 path_req
.ip_addr_len
= 16;
307 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
308 sizeof(struct in_addr
));
309 path_req
.ip_addr_len
= 4;
311 path_req
.vlan_id
= csk
->vlan_id
;
312 path_req
.pmtu
= csk
->mtu
;
318 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
320 rc
= ulp_ops
->iscsi_nl_send_msg(
321 cp
->ulp_handle
[CNIC_ULP_ISCSI
],
324 if (rc
== 0 || msg_type
!= ISCSI_KEVENT_PATH_REQ
)
333 static void cnic_cm_upcall(struct cnic_local
*, struct cnic_sock
*, u8
);
335 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
341 case ISCSI_UEVENT_PATH_UPDATE
: {
342 struct cnic_local
*cp
;
344 struct cnic_sock
*csk
;
345 struct iscsi_path
*path_resp
;
347 if (len
< sizeof(*path_resp
))
350 path_resp
= (struct iscsi_path
*) buf
;
352 l5_cid
= (u32
) path_resp
->handle
;
353 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
357 if (!rcu_dereference(cp
->ulp_ops
[CNIC_ULP_L4
])) {
362 csk
= &cp
->csk_tbl
[l5_cid
];
364 if (cnic_in_use(csk
) &&
365 test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
367 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
368 if (test_bit(SK_F_IPV6
, &csk
->flags
))
369 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
370 sizeof(struct in6_addr
));
372 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
373 sizeof(struct in_addr
));
375 if (is_valid_ether_addr(csk
->ha
)) {
377 } else if (!test_bit(SK_F_OFFLD_SCHED
, &csk
->flags
) &&
378 !test_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
380 cnic_cm_upcall(cp
, csk
,
381 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
382 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
394 static int cnic_offld_prep(struct cnic_sock
*csk
)
396 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
399 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
400 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
407 static int cnic_close_prep(struct cnic_sock
*csk
)
409 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
410 smp_mb__after_clear_bit();
412 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
413 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
421 static int cnic_abort_prep(struct cnic_sock
*csk
)
423 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
424 smp_mb__after_clear_bit();
426 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
429 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
430 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
437 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
439 struct cnic_dev
*dev
;
441 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
442 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
445 mutex_lock(&cnic_lock
);
446 if (cnic_ulp_tbl_prot(ulp_type
)) {
447 pr_err("%s: Type %d has already been registered\n",
449 mutex_unlock(&cnic_lock
);
453 read_lock(&cnic_dev_lock
);
454 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
455 struct cnic_local
*cp
= dev
->cnic_priv
;
457 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
459 read_unlock(&cnic_dev_lock
);
461 atomic_set(&ulp_ops
->ref_count
, 0);
462 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
463 mutex_unlock(&cnic_lock
);
465 /* Prevent race conditions with netdev_event */
467 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
468 struct cnic_local
*cp
= dev
->cnic_priv
;
470 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
471 ulp_ops
->cnic_init(dev
);
478 int cnic_unregister_driver(int ulp_type
)
480 struct cnic_dev
*dev
;
481 struct cnic_ulp_ops
*ulp_ops
;
484 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
485 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
488 mutex_lock(&cnic_lock
);
489 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
491 pr_err("%s: Type %d has not been registered\n",
495 read_lock(&cnic_dev_lock
);
496 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
497 struct cnic_local
*cp
= dev
->cnic_priv
;
499 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
500 pr_err("%s: Type %d still has devices registered\n",
502 read_unlock(&cnic_dev_lock
);
506 read_unlock(&cnic_dev_lock
);
508 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
510 mutex_unlock(&cnic_lock
);
512 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
517 if (atomic_read(&ulp_ops
->ref_count
) != 0)
518 netdev_warn(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
522 mutex_unlock(&cnic_lock
);
526 static int cnic_start_hw(struct cnic_dev
*);
527 static void cnic_stop_hw(struct cnic_dev
*);
529 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
532 struct cnic_local
*cp
= dev
->cnic_priv
;
533 struct cnic_ulp_ops
*ulp_ops
;
535 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
536 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
539 mutex_lock(&cnic_lock
);
540 if (cnic_ulp_tbl_prot(ulp_type
) == NULL
) {
541 pr_err("%s: Driver with type %d has not been registered\n",
543 mutex_unlock(&cnic_lock
);
546 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
547 pr_err("%s: Type %d has already been registered to this device\n",
549 mutex_unlock(&cnic_lock
);
553 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
554 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
555 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
556 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
559 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
560 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
561 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
563 mutex_unlock(&cnic_lock
);
568 EXPORT_SYMBOL(cnic_register_driver
);
570 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
572 struct cnic_local
*cp
= dev
->cnic_priv
;
575 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
576 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
579 mutex_lock(&cnic_lock
);
580 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
581 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
584 pr_err("%s: device not registered to this ulp type %d\n",
586 mutex_unlock(&cnic_lock
);
589 mutex_unlock(&cnic_lock
);
591 if (ulp_type
== CNIC_ULP_ISCSI
)
592 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
596 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
601 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
602 netdev_warn(dev
->netdev
, "Failed waiting for ULP up call to complete\n");
606 EXPORT_SYMBOL(cnic_unregister_driver
);
608 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
,
611 id_tbl
->start
= start_id
;
614 spin_lock_init(&id_tbl
->lock
);
615 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
622 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
624 kfree(id_tbl
->table
);
625 id_tbl
->table
= NULL
;
628 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
633 if (id
>= id_tbl
->max
)
636 spin_lock(&id_tbl
->lock
);
637 if (!test_bit(id
, id_tbl
->table
)) {
638 set_bit(id
, id_tbl
->table
);
641 spin_unlock(&id_tbl
->lock
);
645 /* Returns -1 if not successful */
646 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
650 spin_lock(&id_tbl
->lock
);
651 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
652 if (id
>= id_tbl
->max
) {
654 if (id_tbl
->next
!= 0) {
655 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
656 if (id
>= id_tbl
->next
)
661 if (id
< id_tbl
->max
) {
662 set_bit(id
, id_tbl
->table
);
663 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
667 spin_unlock(&id_tbl
->lock
);
672 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
678 if (id
>= id_tbl
->max
)
681 clear_bit(id
, id_tbl
->table
);
684 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
691 for (i
= 0; i
< dma
->num_pages
; i
++) {
692 if (dma
->pg_arr
[i
]) {
693 dma_free_coherent(&dev
->pcidev
->dev
, BCM_PAGE_SIZE
,
694 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
695 dma
->pg_arr
[i
] = NULL
;
699 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
700 dma
->pgtbl
, dma
->pgtbl_map
);
708 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
711 __le32
*page_table
= (__le32
*) dma
->pgtbl
;
713 for (i
= 0; i
< dma
->num_pages
; i
++) {
714 /* Each entry needs to be in big endian format. */
715 *page_table
= cpu_to_le32((u64
) dma
->pg_map_arr
[i
] >> 32);
717 *page_table
= cpu_to_le32(dma
->pg_map_arr
[i
] & 0xffffffff);
722 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
725 __le32
*page_table
= (__le32
*) dma
->pgtbl
;
727 for (i
= 0; i
< dma
->num_pages
; i
++) {
728 /* Each entry needs to be in little endian format. */
729 *page_table
= cpu_to_le32(dma
->pg_map_arr
[i
] & 0xffffffff);
731 *page_table
= cpu_to_le32((u64
) dma
->pg_map_arr
[i
] >> 32);
736 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
737 int pages
, int use_pg_tbl
)
740 struct cnic_local
*cp
= dev
->cnic_priv
;
742 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
743 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
744 if (dma
->pg_arr
== NULL
)
747 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
748 dma
->num_pages
= pages
;
750 for (i
= 0; i
< pages
; i
++) {
751 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
755 if (dma
->pg_arr
[i
] == NULL
)
761 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
762 ~(BCM_PAGE_SIZE
- 1);
763 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
764 &dma
->pgtbl_map
, GFP_ATOMIC
);
765 if (dma
->pgtbl
== NULL
)
768 cp
->setup_pgtbl(dev
, dma
);
773 cnic_free_dma(dev
, dma
);
777 static void cnic_free_context(struct cnic_dev
*dev
)
779 struct cnic_local
*cp
= dev
->cnic_priv
;
782 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
783 if (cp
->ctx_arr
[i
].ctx
) {
784 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
786 cp
->ctx_arr
[i
].mapping
);
787 cp
->ctx_arr
[i
].ctx
= NULL
;
792 static void __cnic_free_uio(struct cnic_uio_dev
*udev
)
794 uio_unregister_device(&udev
->cnic_uinfo
);
797 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
798 udev
->l2_buf
, udev
->l2_buf_map
);
803 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
804 udev
->l2_ring
, udev
->l2_ring_map
);
805 udev
->l2_ring
= NULL
;
808 pci_dev_put(udev
->pdev
);
812 static void cnic_free_uio(struct cnic_uio_dev
*udev
)
817 write_lock(&cnic_dev_lock
);
818 list_del_init(&udev
->list
);
819 write_unlock(&cnic_dev_lock
);
820 __cnic_free_uio(udev
);
823 static void cnic_free_resc(struct cnic_dev
*dev
)
825 struct cnic_local
*cp
= dev
->cnic_priv
;
826 struct cnic_uio_dev
*udev
= cp
->udev
;
833 cnic_free_context(dev
);
838 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
839 cnic_free_dma(dev
, &cp
->kwq_info
);
840 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
841 cnic_free_dma(dev
, &cp
->kcq2
.dma
);
842 cnic_free_dma(dev
, &cp
->kcq1
.dma
);
843 kfree(cp
->iscsi_tbl
);
844 cp
->iscsi_tbl
= NULL
;
848 cnic_free_id_tbl(&cp
->fcoe_cid_tbl
);
849 cnic_free_id_tbl(&cp
->cid_tbl
);
852 static int cnic_alloc_context(struct cnic_dev
*dev
)
854 struct cnic_local
*cp
= dev
->cnic_priv
;
856 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
859 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
860 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
861 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
862 sizeof(struct cnic_ctx
);
863 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
864 if (cp
->ctx_arr
== NULL
)
868 for (i
= 0; i
< 2; i
++) {
869 u32 j
, reg
, off
, lo
, hi
;
872 off
= BNX2_PG_CTX_MAP
;
874 off
= BNX2_ISCSI_CTX_MAP
;
876 reg
= cnic_reg_rd_ind(dev
, off
);
879 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
880 cp
->ctx_arr
[k
].cid
= j
;
884 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
889 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
891 dma_alloc_coherent(&dev
->pcidev
->dev
,
893 &cp
->ctx_arr
[i
].mapping
,
895 if (cp
->ctx_arr
[i
].ctx
== NULL
)
902 static u16
cnic_bnx2_next_idx(u16 idx
)
907 static u16
cnic_bnx2_hw_idx(u16 idx
)
912 static u16
cnic_bnx2x_next_idx(u16 idx
)
915 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
921 static u16
cnic_bnx2x_hw_idx(u16 idx
)
923 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
928 static int cnic_alloc_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
,
931 int err
, i
, use_page_tbl
= 0;
937 err
= cnic_alloc_dma(dev
, &info
->dma
, KCQ_PAGE_CNT
, use_page_tbl
);
941 kcq
= (struct kcqe
**) info
->dma
.pg_arr
;
944 info
->next_idx
= cnic_bnx2_next_idx
;
945 info
->hw_idx
= cnic_bnx2_hw_idx
;
949 info
->next_idx
= cnic_bnx2x_next_idx
;
950 info
->hw_idx
= cnic_bnx2x_hw_idx
;
952 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
953 struct bnx2x_bd_chain_next
*next
=
954 (struct bnx2x_bd_chain_next
*) &kcq
[i
][MAX_KCQE_CNT
];
957 if (j
>= KCQ_PAGE_CNT
)
959 next
->addr_hi
= (u64
) info
->dma
.pg_map_arr
[j
] >> 32;
960 next
->addr_lo
= info
->dma
.pg_map_arr
[j
] & 0xffffffff;
965 static int cnic_alloc_uio_rings(struct cnic_dev
*dev
, int pages
)
967 struct cnic_local
*cp
= dev
->cnic_priv
;
968 struct cnic_uio_dev
*udev
;
970 read_lock(&cnic_dev_lock
);
971 list_for_each_entry(udev
, &cnic_udev_list
, list
) {
972 if (udev
->pdev
== dev
->pcidev
) {
975 read_unlock(&cnic_dev_lock
);
979 read_unlock(&cnic_dev_lock
);
981 udev
= kzalloc(sizeof(struct cnic_uio_dev
), GFP_ATOMIC
);
988 udev
->pdev
= dev
->pcidev
;
989 udev
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
990 udev
->l2_ring
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
992 GFP_KERNEL
| __GFP_COMP
);
996 udev
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
997 udev
->l2_buf_size
= PAGE_ALIGN(udev
->l2_buf_size
);
998 udev
->l2_buf
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
1000 GFP_KERNEL
| __GFP_COMP
);
1004 write_lock(&cnic_dev_lock
);
1005 list_add(&udev
->list
, &cnic_udev_list
);
1006 write_unlock(&cnic_dev_lock
);
1008 pci_dev_get(udev
->pdev
);
1014 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
1015 udev
->l2_ring
, udev
->l2_ring_map
);
1021 static int cnic_init_uio(struct cnic_dev
*dev
)
1023 struct cnic_local
*cp
= dev
->cnic_priv
;
1024 struct cnic_uio_dev
*udev
= cp
->udev
;
1025 struct uio_info
*uinfo
;
1031 uinfo
= &udev
->cnic_uinfo
;
1033 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
1034 uinfo
->mem
[0].internal_addr
= dev
->regview
;
1035 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
1036 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
1038 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
1039 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
.gen
&
1041 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
1042 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
1044 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
1046 uinfo
->name
= "bnx2_cnic";
1047 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
1048 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
1050 uinfo
->mem
[1].size
= sizeof(*cp
->bnx2x_def_status_blk
);
1052 uinfo
->name
= "bnx2x_cnic";
1055 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
1057 uinfo
->mem
[2].addr
= (unsigned long) udev
->l2_ring
;
1058 uinfo
->mem
[2].size
= udev
->l2_ring_size
;
1059 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
1061 uinfo
->mem
[3].addr
= (unsigned long) udev
->l2_buf
;
1062 uinfo
->mem
[3].size
= udev
->l2_buf_size
;
1063 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
1065 uinfo
->version
= CNIC_MODULE_VERSION
;
1066 uinfo
->irq
= UIO_IRQ_CUSTOM
;
1068 uinfo
->open
= cnic_uio_open
;
1069 uinfo
->release
= cnic_uio_close
;
1071 if (udev
->uio_dev
== -1) {
1075 ret
= uio_register_device(&udev
->pdev
->dev
, uinfo
);
1078 cnic_init_rings(dev
);
1084 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
1086 struct cnic_local
*cp
= dev
->cnic_priv
;
1089 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
1092 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
1094 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
, true);
1098 ret
= cnic_alloc_context(dev
);
1102 ret
= cnic_alloc_uio_rings(dev
, 2);
1106 ret
= cnic_init_uio(dev
);
1113 cnic_free_resc(dev
);
1117 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
1119 struct cnic_local
*cp
= dev
->cnic_priv
;
1120 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
1121 int total_mem
, blks
, i
;
1123 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cp
->max_cid_space
;
1124 blks
= total_mem
/ ctx_blk_size
;
1125 if (total_mem
% ctx_blk_size
)
1128 if (blks
> cp
->ethdev
->ctx_tbl_len
)
1131 cp
->ctx_arr
= kcalloc(blks
, sizeof(struct cnic_ctx
), GFP_KERNEL
);
1132 if (cp
->ctx_arr
== NULL
)
1135 cp
->ctx_blks
= blks
;
1136 cp
->ctx_blk_size
= ctx_blk_size
;
1137 if (!BNX2X_CHIP_IS_57710(cp
->chip_id
))
1140 cp
->ctx_align
= ctx_blk_size
;
1142 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1144 for (i
= 0; i
< blks
; i
++) {
1145 cp
->ctx_arr
[i
].ctx
=
1146 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1147 &cp
->ctx_arr
[i
].mapping
,
1149 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1152 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1153 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1154 cnic_free_context(dev
);
1155 cp
->ctx_blk_size
+= cp
->ctx_align
;
1164 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1166 struct cnic_local
*cp
= dev
->cnic_priv
;
1167 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1168 u32 start_cid
= ethdev
->starting_cid
;
1169 int i
, j
, n
, ret
, pages
;
1170 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1172 cp
->iro_arr
= ethdev
->iro_arr
;
1174 cp
->max_cid_space
= MAX_ISCSI_TBL_SZ
+ BNX2X_FCOE_NUM_CONNECTIONS
;
1175 cp
->iscsi_start_cid
= start_cid
;
1176 cp
->fcoe_start_cid
= start_cid
+ MAX_ISCSI_TBL_SZ
;
1178 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
1179 cp
->max_cid_space
+= BNX2X_FCOE_NUM_CONNECTIONS
;
1180 cp
->fcoe_init_cid
= ethdev
->fcoe_init_cid
;
1181 if (!cp
->fcoe_init_cid
)
1182 cp
->fcoe_init_cid
= 0x10;
1185 if (start_cid
< BNX2X_ISCSI_START_CID
) {
1186 u32 delta
= BNX2X_ISCSI_START_CID
- start_cid
;
1188 cp
->iscsi_start_cid
= BNX2X_ISCSI_START_CID
;
1189 cp
->fcoe_start_cid
+= delta
;
1190 cp
->max_cid_space
+= delta
;
1193 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1198 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1199 cp
->max_cid_space
, GFP_KERNEL
);
1203 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1204 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1205 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1208 for (i
= MAX_ISCSI_TBL_SZ
; i
< cp
->max_cid_space
; i
++)
1209 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_FCOE
;
1211 pages
= PAGE_ALIGN(cp
->max_cid_space
* CNIC_KWQ16_DATA_SIZE
) /
1214 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1218 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1219 for (i
= 0, j
= 0; i
< cp
->max_cid_space
; i
++) {
1220 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1222 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1223 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1226 if ((i
% n
) == (n
- 1))
1230 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
, false);
1234 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
1235 ret
= cnic_alloc_kcq(dev
, &cp
->kcq2
, true);
1240 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1241 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1245 ret
= cnic_alloc_bnx2x_context(dev
);
1249 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1251 cp
->l2_rx_ring_size
= 15;
1253 ret
= cnic_alloc_uio_rings(dev
, 4);
1257 ret
= cnic_init_uio(dev
);
1264 cnic_free_resc(dev
);
1268 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1270 return cp
->max_kwq_idx
-
1271 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1274 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1277 struct cnic_local
*cp
= dev
->cnic_priv
;
1278 struct kwqe
*prod_qe
;
1279 u16 prod
, sw_prod
, i
;
1281 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1282 return -EAGAIN
; /* bnx2 is down */
1284 spin_lock_bh(&cp
->cnic_ulp_lock
);
1285 if (num_wqes
> cnic_kwq_avail(cp
) &&
1286 !test_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
)) {
1287 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1291 clear_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
1293 prod
= cp
->kwq_prod_idx
;
1294 sw_prod
= prod
& MAX_KWQ_IDX
;
1295 for (i
= 0; i
< num_wqes
; i
++) {
1296 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1297 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1299 sw_prod
= prod
& MAX_KWQ_IDX
;
1301 cp
->kwq_prod_idx
= prod
;
1303 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1305 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1309 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1310 union l5cm_specific_data
*l5_data
)
1312 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1315 map
= ctx
->kwqe_data_mapping
;
1316 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1317 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1318 return ctx
->kwqe_data
;
1321 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1322 u32 type
, union l5cm_specific_data
*l5_data
)
1324 struct cnic_local
*cp
= dev
->cnic_priv
;
1325 struct l5cm_spe kwqe
;
1326 struct kwqe_16
*kwq
[1];
1330 kwqe
.hdr
.conn_and_cmd_data
=
1331 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1332 BNX2X_HW_CID(cp
, cid
)));
1334 type_16
= (type
<< SPE_HDR_CONN_TYPE_SHIFT
) & SPE_HDR_CONN_TYPE
;
1335 type_16
|= (cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
1336 SPE_HDR_FUNCTION_ID
;
1338 kwqe
.hdr
.type
= cpu_to_le16(type_16
);
1339 kwqe
.hdr
.reserved1
= 0;
1340 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1341 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1343 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1345 spin_lock_bh(&cp
->cnic_ulp_lock
);
1346 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1347 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1355 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1356 struct kcqe
*cqes
[], u32 num_cqes
)
1358 struct cnic_local
*cp
= dev
->cnic_priv
;
1359 struct cnic_ulp_ops
*ulp_ops
;
1362 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1363 if (likely(ulp_ops
)) {
1364 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1370 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1372 struct cnic_local
*cp
= dev
->cnic_priv
;
1373 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1375 u32 pfid
= cp
->pfid
;
1377 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1378 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1379 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1380 cp
->num_iscsi_tasks
;
1381 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1382 BNX2X_ISCSI_R2TQE_SIZE
;
1383 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1384 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1385 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1386 cp
->num_cqs
= req1
->num_cqs
;
1388 if (!dev
->max_iscsi_conn
)
1391 /* init Tstorm RAM */
1392 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1394 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1396 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1397 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1398 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1399 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1400 req1
->num_tasks_per_conn
);
1402 /* init Ustorm RAM */
1403 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1404 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid
),
1405 req1
->rq_buffer_size
);
1406 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1408 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1409 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1410 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1411 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1412 req1
->num_tasks_per_conn
);
1413 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1415 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1417 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1418 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1420 /* init Xstorm RAM */
1421 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1423 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1424 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1425 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1426 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1427 req1
->num_tasks_per_conn
);
1428 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1430 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid
),
1431 req1
->num_tasks_per_conn
);
1432 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1433 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1435 /* init Cstorm RAM */
1436 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1438 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1439 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1440 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1441 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1442 req1
->num_tasks_per_conn
);
1443 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1445 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1451 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1453 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1454 struct cnic_local
*cp
= dev
->cnic_priv
;
1455 u32 pfid
= cp
->pfid
;
1456 struct iscsi_kcqe kcqe
;
1457 struct kcqe
*cqes
[1];
1459 memset(&kcqe
, 0, sizeof(kcqe
));
1460 if (!dev
->max_iscsi_conn
) {
1461 kcqe
.completion_status
=
1462 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1466 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1467 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1468 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1469 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1470 req2
->error_bit_map
[1]);
1472 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1473 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1474 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1475 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1476 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1477 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1478 req2
->error_bit_map
[1]);
1480 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1481 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1483 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1486 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1487 cqes
[0] = (struct kcqe
*) &kcqe
;
1488 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1493 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1495 struct cnic_local
*cp
= dev
->cnic_priv
;
1496 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1498 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1499 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1501 cnic_free_dma(dev
, &iscsi
->hq_info
);
1502 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1503 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1504 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1506 cnic_free_id(&cp
->fcoe_cid_tbl
, ctx
->cid
);
1512 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1516 struct cnic_local
*cp
= dev
->cnic_priv
;
1517 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1518 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1520 if (ctx
->ulp_proto_id
== CNIC_ULP_FCOE
) {
1521 cid
= cnic_alloc_new_id(&cp
->fcoe_cid_tbl
);
1530 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1537 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1539 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1543 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1544 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1548 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1549 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1556 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1560 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1561 struct regpair
*ctx_addr
)
1563 struct cnic_local
*cp
= dev
->cnic_priv
;
1564 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1565 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1566 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1567 unsigned long align_off
= 0;
1571 if (cp
->ctx_align
) {
1572 unsigned long mask
= cp
->ctx_align
- 1;
1574 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1575 align_off
= cp
->ctx_align
-
1576 (cp
->ctx_arr
[blk
].mapping
& mask
);
1578 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1579 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1580 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1581 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1583 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1585 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1586 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1590 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1593 struct cnic_local
*cp
= dev
->cnic_priv
;
1594 struct iscsi_kwqe_conn_offload1
*req1
=
1595 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1596 struct iscsi_kwqe_conn_offload2
*req2
=
1597 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1598 struct iscsi_kwqe_conn_offload3
*req3
;
1599 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1600 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1602 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
1603 struct iscsi_context
*ictx
;
1604 struct regpair context_addr
;
1605 int i
, j
, n
= 2, n_max
;
1606 u8 port
= CNIC_PORT(cp
);
1609 if (!req2
->num_additional_wqes
)
1612 n_max
= req2
->num_additional_wqes
+ 2;
1614 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1618 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1620 ictx
->xstorm_ag_context
.hq_prod
= 1;
1622 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1623 ISCSI_DEF_FIRST_BURST_LEN
;
1624 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1625 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1626 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1627 req1
->sq_page_table_addr_lo
;
1628 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1629 req1
->sq_page_table_addr_hi
;
1630 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1631 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1632 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1633 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1634 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1635 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1636 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1637 iscsi
->hq_info
.pgtbl
[0];
1638 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1639 iscsi
->hq_info
.pgtbl
[1];
1640 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1641 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1642 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1643 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1644 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1645 iscsi
->r2tq_info
.pgtbl
[0];
1646 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1647 iscsi
->r2tq_info
.pgtbl
[1];
1648 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1649 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1650 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1651 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1652 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1653 BNX2X_ISCSI_PBL_NOT_CACHED
;
1654 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1655 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1656 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1657 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1658 ictx
->xstorm_st_context
.common
.ethernet
.reserved_vlan_type
=
1660 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) &&
1661 cp
->port_mode
== CHIP_2_PORT_MODE
) {
1665 ictx
->xstorm_st_context
.common
.flags
=
1666 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT
;
1667 ictx
->xstorm_st_context
.common
.flags
=
1668 port
<< XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT
;
1670 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1671 /* TSTORM requires the base address of RQ DB & not PTE */
1672 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1673 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1674 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1675 req2
->rq_page_table_addr_hi
;
1676 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1677 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1678 ictx
->tstorm_st_context
.tcp
.flags2
|=
1679 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1680 ictx
->tstorm_st_context
.tcp
.ooo_support_mode
=
1681 TCP_TSTORM_OOO_DROP_AND_PROC_ACK
;
1683 ictx
->timers_context
.flags
|= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1685 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1686 req2
->rq_page_table_addr_lo
;
1687 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1688 req2
->rq_page_table_addr_hi
;
1689 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1690 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1691 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1692 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1693 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1694 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1695 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1696 iscsi
->r2tq_info
.pgtbl
[0];
1697 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1698 iscsi
->r2tq_info
.pgtbl
[1];
1699 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1700 req1
->cq_page_table_addr_lo
;
1701 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1702 req1
->cq_page_table_addr_hi
;
1703 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1704 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1705 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1706 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1707 BNX2X_ISCSI_PBL_NOT_CACHED
;
1708 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1709 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1711 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1715 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1718 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1719 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1720 req3
->qp_first_pte
[j
].hi
;
1721 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1722 req3
->qp_first_pte
[j
].lo
;
1725 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1726 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1727 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1728 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1729 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1730 iscsi
->task_array_info
.pgtbl
[0];
1731 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1732 iscsi
->task_array_info
.pgtbl
[1];
1733 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1734 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1735 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1736 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1737 ISCSI_DEF_MAX_BURST_LEN
;
1738 ictx
->ustorm_st_context
.negotiated_rx
|=
1739 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1740 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1742 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1743 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1744 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1745 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1746 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1747 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1748 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1749 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1750 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1751 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1752 /* CSTORM and USTORM initialization is different, CSTORM requires
1753 * CQ DB base & not PTE addr */
1754 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1755 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1756 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1757 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1758 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1759 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1760 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1762 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1766 ictx
->xstorm_ag_context
.cdu_reserved
=
1767 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1768 ISCSI_CONNECTION_TYPE
);
1769 ictx
->ustorm_ag_context
.cdu_usage
=
1770 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1771 ISCSI_CONNECTION_TYPE
);
1776 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1779 struct iscsi_kwqe_conn_offload1
*req1
;
1780 struct iscsi_kwqe_conn_offload2
*req2
;
1781 struct cnic_local
*cp
= dev
->cnic_priv
;
1782 struct cnic_context
*ctx
;
1783 struct iscsi_kcqe kcqe
;
1784 struct kcqe
*cqes
[1];
1793 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1794 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1795 if ((num
- 2) < req2
->num_additional_wqes
) {
1799 *work
= 2 + req2
->num_additional_wqes
;
1801 l5_cid
= req1
->iscsi_conn_id
;
1802 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1805 memset(&kcqe
, 0, sizeof(kcqe
));
1806 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1807 kcqe
.iscsi_conn_id
= l5_cid
;
1808 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1810 ctx
= &cp
->ctx_tbl
[l5_cid
];
1811 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
)) {
1812 kcqe
.completion_status
=
1813 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY
;
1817 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1818 atomic_dec(&cp
->iscsi_conn
);
1821 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1823 atomic_dec(&cp
->iscsi_conn
);
1827 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1829 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1830 atomic_dec(&cp
->iscsi_conn
);
1834 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1835 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
, cp
->ctx_tbl
[l5_cid
].cid
);
1838 cqes
[0] = (struct kcqe
*) &kcqe
;
1839 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1844 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1846 struct cnic_local
*cp
= dev
->cnic_priv
;
1847 struct iscsi_kwqe_conn_update
*req
=
1848 (struct iscsi_kwqe_conn_update
*) kwqe
;
1850 union l5cm_specific_data l5_data
;
1851 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1854 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1857 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1861 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1863 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1864 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1868 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev
*dev
, u32 l5_cid
)
1870 struct cnic_local
*cp
= dev
->cnic_priv
;
1871 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1872 union l5cm_specific_data l5_data
;
1876 init_waitqueue_head(&ctx
->waitq
);
1878 memset(&l5_data
, 0, sizeof(l5_data
));
1879 hw_cid
= BNX2X_HW_CID(cp
, ctx
->cid
);
1881 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
1882 hw_cid
, NONE_CONNECTION_TYPE
, &l5_data
);
1885 wait_event(ctx
->waitq
, ctx
->wait_cond
);
1886 if (unlikely(test_bit(CTX_FL_CID_ERROR
, &ctx
->ctx_flags
)))
1893 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1895 struct cnic_local
*cp
= dev
->cnic_priv
;
1896 struct iscsi_kwqe_conn_destroy
*req
=
1897 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1898 u32 l5_cid
= req
->reserved0
;
1899 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1901 struct iscsi_kcqe kcqe
;
1902 struct kcqe
*cqes
[1];
1904 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
1905 goto skip_cfc_delete
;
1907 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
1908 unsigned long delta
= ctx
->timestamp
+ (2 * HZ
) - jiffies
;
1910 if (delta
> (2 * HZ
))
1913 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
1914 queue_delayed_work(cnic_wq
, &cp
->delete_task
, delta
);
1918 ret
= cnic_bnx2x_destroy_ramrod(dev
, l5_cid
);
1921 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1924 atomic_dec(&cp
->iscsi_conn
);
1925 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
1929 memset(&kcqe
, 0, sizeof(kcqe
));
1930 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1931 kcqe
.iscsi_conn_id
= l5_cid
;
1932 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1933 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1935 cqes
[0] = (struct kcqe
*) &kcqe
;
1936 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1941 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
1942 struct l4_kwq_connect_req1
*kwqe1
,
1943 struct l4_kwq_connect_req3
*kwqe3
,
1944 struct l5cm_active_conn_buffer
*conn_buf
)
1946 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
1947 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
1948 &conn_buf
->xstorm_conn_buffer
;
1949 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
1950 &conn_buf
->tstorm_conn_buffer
;
1951 struct regpair context_addr
;
1952 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
1953 struct in6_addr src_ip
, dst_ip
;
1957 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
1958 for (i
= 0; i
< 4; i
++, addrp
++)
1959 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1961 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
1962 for (i
= 0; i
< 4; i
++, addrp
++)
1963 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1965 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
1967 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
1968 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
1969 xstorm_buf
->mss
= 0xffff;
1970 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1971 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
1972 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
1973 xstorm_buf
->pseudo_header_checksum
=
1974 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
1976 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
1977 tstorm_buf
->params
|=
1978 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
1979 if (kwqe3
->ka_timeout
) {
1980 tstorm_buf
->ka_enable
= 1;
1981 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
1982 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
1983 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
1985 tstorm_buf
->max_rt_time
= 0xffffffff;
1988 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
1990 struct cnic_local
*cp
= dev
->cnic_priv
;
1991 u32 pfid
= cp
->pfid
;
1992 u8
*mac
= dev
->mac_addr
;
1994 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1995 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid
), mac
[0]);
1996 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1997 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid
), mac
[1]);
1998 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1999 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid
), mac
[2]);
2000 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2001 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid
), mac
[3]);
2002 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2003 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid
), mac
[4]);
2004 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2005 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid
), mac
[5]);
2007 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2008 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[5]);
2009 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2010 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2012 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2013 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[3]);
2014 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2015 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2017 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2018 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[1]);
2019 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2020 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2024 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
2026 struct cnic_local
*cp
= dev
->cnic_priv
;
2027 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
2028 u16 tstorm_flags
= 0;
2031 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
2032 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
2035 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2036 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), xstorm_flags
);
2038 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
2039 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), tstorm_flags
);
2042 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2045 struct cnic_local
*cp
= dev
->cnic_priv
;
2046 struct l4_kwq_connect_req1
*kwqe1
=
2047 (struct l4_kwq_connect_req1
*) wqes
[0];
2048 struct l4_kwq_connect_req3
*kwqe3
;
2049 struct l5cm_active_conn_buffer
*conn_buf
;
2050 struct l5cm_conn_addr_params
*conn_addr
;
2051 union l5cm_specific_data l5_data
;
2052 u32 l5_cid
= kwqe1
->pg_cid
;
2053 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
2054 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2062 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
2072 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
2073 netdev_err(dev
->netdev
, "conn_buf size too big\n");
2076 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2080 memset(conn_buf
, 0, sizeof(*conn_buf
));
2082 conn_addr
= &conn_buf
->conn_addr_buf
;
2083 conn_addr
->remote_addr_0
= csk
->ha
[0];
2084 conn_addr
->remote_addr_1
= csk
->ha
[1];
2085 conn_addr
->remote_addr_2
= csk
->ha
[2];
2086 conn_addr
->remote_addr_3
= csk
->ha
[3];
2087 conn_addr
->remote_addr_4
= csk
->ha
[4];
2088 conn_addr
->remote_addr_5
= csk
->ha
[5];
2090 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
2091 struct l4_kwq_connect_req2
*kwqe2
=
2092 (struct l4_kwq_connect_req2
*) wqes
[1];
2094 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
2095 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
2096 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
2098 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
2099 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
2100 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
2101 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
2103 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
2105 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
2106 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
2107 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
2108 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
2110 conn_addr
->pmtu
= kwqe3
->pmtu
;
2111 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
2113 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
2114 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->pfid
), csk
->vlan_id
);
2116 cnic_bnx2x_set_tcp_timestamp(dev
,
2117 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
2119 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
2120 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2122 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2127 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2129 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
2130 union l5cm_specific_data l5_data
;
2133 memset(&l5_data
, 0, sizeof(l5_data
));
2134 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
2135 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2139 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2141 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
2142 union l5cm_specific_data l5_data
;
2145 memset(&l5_data
, 0, sizeof(l5_data
));
2146 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
2147 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2150 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2152 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
2154 struct kcqe
*cqes
[1];
2156 memset(&kcqe
, 0, sizeof(kcqe
));
2157 kcqe
.pg_host_opaque
= req
->host_opaque
;
2158 kcqe
.pg_cid
= req
->host_opaque
;
2159 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
2160 cqes
[0] = (struct kcqe
*) &kcqe
;
2161 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2165 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2167 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
2169 struct kcqe
*cqes
[1];
2171 memset(&kcqe
, 0, sizeof(kcqe
));
2172 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
2173 kcqe
.pg_cid
= req
->pg_cid
;
2174 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
2175 cqes
[0] = (struct kcqe
*) &kcqe
;
2176 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2180 static int cnic_bnx2x_fcoe_stat(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2182 struct fcoe_kwqe_stat
*req
;
2183 struct fcoe_stat_ramrod_params
*fcoe_stat
;
2184 union l5cm_specific_data l5_data
;
2185 struct cnic_local
*cp
= dev
->cnic_priv
;
2189 req
= (struct fcoe_kwqe_stat
*) kwqe
;
2190 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2192 fcoe_stat
= cnic_get_kwqe_16_data(cp
, BNX2X_FCOE_L5_CID_BASE
, &l5_data
);
2196 memset(fcoe_stat
, 0, sizeof(*fcoe_stat
));
2197 memcpy(&fcoe_stat
->stat_kwqe
, req
, sizeof(*req
));
2199 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_STAT_FUNC
, cid
,
2200 FCOE_CONNECTION_TYPE
, &l5_data
);
2204 static int cnic_bnx2x_fcoe_init1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2208 struct cnic_local
*cp
= dev
->cnic_priv
;
2210 struct fcoe_init_ramrod_params
*fcoe_init
;
2211 struct fcoe_kwqe_init1
*req1
;
2212 struct fcoe_kwqe_init2
*req2
;
2213 struct fcoe_kwqe_init3
*req3
;
2214 union l5cm_specific_data l5_data
;
2220 req1
= (struct fcoe_kwqe_init1
*) wqes
[0];
2221 req2
= (struct fcoe_kwqe_init2
*) wqes
[1];
2222 req3
= (struct fcoe_kwqe_init3
*) wqes
[2];
2223 if (req2
->hdr
.op_code
!= FCOE_KWQE_OPCODE_INIT2
) {
2227 if (req3
->hdr
.op_code
!= FCOE_KWQE_OPCODE_INIT3
) {
2232 if (sizeof(*fcoe_init
) > CNIC_KWQ16_DATA_SIZE
) {
2233 netdev_err(dev
->netdev
, "fcoe_init size too big\n");
2236 fcoe_init
= cnic_get_kwqe_16_data(cp
, BNX2X_FCOE_L5_CID_BASE
, &l5_data
);
2240 memset(fcoe_init
, 0, sizeof(*fcoe_init
));
2241 memcpy(&fcoe_init
->init_kwqe1
, req1
, sizeof(*req1
));
2242 memcpy(&fcoe_init
->init_kwqe2
, req2
, sizeof(*req2
));
2243 memcpy(&fcoe_init
->init_kwqe3
, req3
, sizeof(*req3
));
2244 fcoe_init
->eq_pbl_base
.lo
= cp
->kcq2
.dma
.pgtbl_map
& 0xffffffff;
2245 fcoe_init
->eq_pbl_base
.hi
= (u64
) cp
->kcq2
.dma
.pgtbl_map
>> 32;
2246 fcoe_init
->eq_pbl_size
= cp
->kcq2
.dma
.num_pages
;
2248 fcoe_init
->sb_num
= cp
->status_blk_num
;
2249 fcoe_init
->eq_prod
= MAX_KCQ_IDX
;
2250 fcoe_init
->sb_id
= HC_INDEX_FCOE_EQ_CONS
;
2251 cp
->kcq2
.sw_prod_idx
= 0;
2253 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2254 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_INIT_FUNC
, cid
,
2255 FCOE_CONNECTION_TYPE
, &l5_data
);
2260 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2264 u32 cid
= -1, l5_cid
;
2265 struct cnic_local
*cp
= dev
->cnic_priv
;
2266 struct fcoe_kwqe_conn_offload1
*req1
;
2267 struct fcoe_kwqe_conn_offload2
*req2
;
2268 struct fcoe_kwqe_conn_offload3
*req3
;
2269 struct fcoe_kwqe_conn_offload4
*req4
;
2270 struct fcoe_conn_offload_ramrod_params
*fcoe_offload
;
2271 struct cnic_context
*ctx
;
2272 struct fcoe_context
*fctx
;
2273 struct regpair ctx_addr
;
2274 union l5cm_specific_data l5_data
;
2275 struct fcoe_kcqe kcqe
;
2276 struct kcqe
*cqes
[1];
2282 req1
= (struct fcoe_kwqe_conn_offload1
*) wqes
[0];
2283 req2
= (struct fcoe_kwqe_conn_offload2
*) wqes
[1];
2284 req3
= (struct fcoe_kwqe_conn_offload3
*) wqes
[2];
2285 req4
= (struct fcoe_kwqe_conn_offload4
*) wqes
[3];
2289 l5_cid
= req1
->fcoe_conn_id
;
2290 if (l5_cid
>= BNX2X_FCOE_NUM_CONNECTIONS
)
2293 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2295 ctx
= &cp
->ctx_tbl
[l5_cid
];
2296 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2299 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
2306 fctx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &ctx_addr
);
2308 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
2311 val
= CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
2312 FCOE_CONNECTION_TYPE
);
2313 fctx
->xstorm_ag_context
.cdu_reserved
= val
;
2314 val
= CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
2315 FCOE_CONNECTION_TYPE
);
2316 fctx
->ustorm_ag_context
.cdu_usage
= val
;
2318 if (sizeof(*fcoe_offload
) > CNIC_KWQ16_DATA_SIZE
) {
2319 netdev_err(dev
->netdev
, "fcoe_offload size too big\n");
2322 fcoe_offload
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2326 memset(fcoe_offload
, 0, sizeof(*fcoe_offload
));
2327 memcpy(&fcoe_offload
->offload_kwqe1
, req1
, sizeof(*req1
));
2328 memcpy(&fcoe_offload
->offload_kwqe2
, req2
, sizeof(*req2
));
2329 memcpy(&fcoe_offload
->offload_kwqe3
, req3
, sizeof(*req3
));
2330 memcpy(&fcoe_offload
->offload_kwqe4
, req4
, sizeof(*req4
));
2332 cid
= BNX2X_HW_CID(cp
, cid
);
2333 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN
, cid
,
2334 FCOE_CONNECTION_TYPE
, &l5_data
);
2336 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2342 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
2344 memset(&kcqe
, 0, sizeof(kcqe
));
2345 kcqe
.op_code
= FCOE_KCQE_OPCODE_OFFLOAD_CONN
;
2346 kcqe
.fcoe_conn_id
= req1
->fcoe_conn_id
;
2347 kcqe
.completion_status
= FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
2349 cqes
[0] = (struct kcqe
*) &kcqe
;
2350 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_FCOE
, cqes
, 1);
2354 static int cnic_bnx2x_fcoe_enable(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2356 struct fcoe_kwqe_conn_enable_disable
*req
;
2357 struct fcoe_conn_enable_disable_ramrod_params
*fcoe_enable
;
2358 union l5cm_specific_data l5_data
;
2361 struct cnic_local
*cp
= dev
->cnic_priv
;
2363 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2364 cid
= req
->context_id
;
2365 l5_cid
= req
->conn_id
+ BNX2X_FCOE_L5_CID_BASE
;
2367 if (sizeof(*fcoe_enable
) > CNIC_KWQ16_DATA_SIZE
) {
2368 netdev_err(dev
->netdev
, "fcoe_enable size too big\n");
2371 fcoe_enable
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2375 memset(fcoe_enable
, 0, sizeof(*fcoe_enable
));
2376 memcpy(&fcoe_enable
->enable_disable_kwqe
, req
, sizeof(*req
));
2377 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_ENABLE_CONN
, cid
,
2378 FCOE_CONNECTION_TYPE
, &l5_data
);
2382 static int cnic_bnx2x_fcoe_disable(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2384 struct fcoe_kwqe_conn_enable_disable
*req
;
2385 struct fcoe_conn_enable_disable_ramrod_params
*fcoe_disable
;
2386 union l5cm_specific_data l5_data
;
2389 struct cnic_local
*cp
= dev
->cnic_priv
;
2391 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2392 cid
= req
->context_id
;
2393 l5_cid
= req
->conn_id
;
2394 if (l5_cid
>= BNX2X_FCOE_NUM_CONNECTIONS
)
2397 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2399 if (sizeof(*fcoe_disable
) > CNIC_KWQ16_DATA_SIZE
) {
2400 netdev_err(dev
->netdev
, "fcoe_disable size too big\n");
2403 fcoe_disable
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2407 memset(fcoe_disable
, 0, sizeof(*fcoe_disable
));
2408 memcpy(&fcoe_disable
->enable_disable_kwqe
, req
, sizeof(*req
));
2409 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_DISABLE_CONN
, cid
,
2410 FCOE_CONNECTION_TYPE
, &l5_data
);
2414 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2416 struct fcoe_kwqe_conn_destroy
*req
;
2417 union l5cm_specific_data l5_data
;
2420 struct cnic_local
*cp
= dev
->cnic_priv
;
2421 struct cnic_context
*ctx
;
2422 struct fcoe_kcqe kcqe
;
2423 struct kcqe
*cqes
[1];
2425 req
= (struct fcoe_kwqe_conn_destroy
*) kwqe
;
2426 cid
= req
->context_id
;
2427 l5_cid
= req
->conn_id
;
2428 if (l5_cid
>= BNX2X_FCOE_NUM_CONNECTIONS
)
2431 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2433 ctx
= &cp
->ctx_tbl
[l5_cid
];
2435 init_waitqueue_head(&ctx
->waitq
);
2438 memset(&l5_data
, 0, sizeof(l5_data
));
2439 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_TERMINATE_CONN
, cid
,
2440 FCOE_CONNECTION_TYPE
, &l5_data
);
2442 wait_event(ctx
->waitq
, ctx
->wait_cond
);
2443 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
2444 queue_delayed_work(cnic_wq
, &cp
->delete_task
,
2445 msecs_to_jiffies(2000));
2448 memset(&kcqe
, 0, sizeof(kcqe
));
2449 kcqe
.op_code
= FCOE_KCQE_OPCODE_DESTROY_CONN
;
2450 kcqe
.fcoe_conn_id
= req
->conn_id
;
2451 kcqe
.fcoe_conn_context_id
= cid
;
2453 cqes
[0] = (struct kcqe
*) &kcqe
;
2454 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_FCOE
, cqes
, 1);
2458 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2460 struct fcoe_kwqe_destroy
*req
;
2461 union l5cm_specific_data l5_data
;
2462 struct cnic_local
*cp
= dev
->cnic_priv
;
2466 req
= (struct fcoe_kwqe_destroy
*) kwqe
;
2467 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2469 memset(&l5_data
, 0, sizeof(l5_data
));
2470 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_DESTROY_FUNC
, cid
,
2471 FCOE_CONNECTION_TYPE
, &l5_data
);
2475 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev
*dev
,
2476 struct kwqe
*wqes
[], u32 num_wqes
)
2482 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2483 return -EAGAIN
; /* bnx2 is down */
2485 for (i
= 0; i
< num_wqes
; ) {
2487 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2491 case ISCSI_KWQE_OPCODE_INIT1
:
2492 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
2494 case ISCSI_KWQE_OPCODE_INIT2
:
2495 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
2497 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
2498 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2499 num_wqes
- i
, &work
);
2501 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2502 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2504 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2505 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2507 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2508 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2511 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2512 ret
= cnic_bnx2x_close(dev
, kwqe
);
2514 case L4_KWQE_OPCODE_VALUE_RESET
:
2515 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2517 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2518 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2520 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2521 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2523 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2528 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2533 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2540 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev
*dev
,
2541 struct kwqe
*wqes
[], u32 num_wqes
)
2543 struct cnic_local
*cp
= dev
->cnic_priv
;
2548 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2549 return -EAGAIN
; /* bnx2 is down */
2551 if (!BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
))
2554 for (i
= 0; i
< num_wqes
; ) {
2556 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2560 case FCOE_KWQE_OPCODE_INIT1
:
2561 ret
= cnic_bnx2x_fcoe_init1(dev
, &wqes
[i
],
2562 num_wqes
- i
, &work
);
2564 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1
:
2565 ret
= cnic_bnx2x_fcoe_ofld1(dev
, &wqes
[i
],
2566 num_wqes
- i
, &work
);
2568 case FCOE_KWQE_OPCODE_ENABLE_CONN
:
2569 ret
= cnic_bnx2x_fcoe_enable(dev
, kwqe
);
2571 case FCOE_KWQE_OPCODE_DISABLE_CONN
:
2572 ret
= cnic_bnx2x_fcoe_disable(dev
, kwqe
);
2574 case FCOE_KWQE_OPCODE_DESTROY_CONN
:
2575 ret
= cnic_bnx2x_fcoe_destroy(dev
, kwqe
);
2577 case FCOE_KWQE_OPCODE_DESTROY
:
2578 ret
= cnic_bnx2x_fcoe_fw_destroy(dev
, kwqe
);
2580 case FCOE_KWQE_OPCODE_STAT
:
2581 ret
= cnic_bnx2x_fcoe_stat(dev
, kwqe
);
2585 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2590 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2597 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2603 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2604 return -EAGAIN
; /* bnx2x is down */
2609 layer_code
= wqes
[0]->kwqe_op_flag
& KWQE_LAYER_MASK
;
2610 switch (layer_code
) {
2611 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI
:
2612 case KWQE_FLAGS_LAYER_MASK_L4
:
2613 case KWQE_FLAGS_LAYER_MASK_L2
:
2614 ret
= cnic_submit_bnx2x_iscsi_kwqes(dev
, wqes
, num_wqes
);
2617 case KWQE_FLAGS_LAYER_MASK_L5_FCOE
:
2618 ret
= cnic_submit_bnx2x_fcoe_kwqes(dev
, wqes
, num_wqes
);
2624 static inline u32
cnic_get_kcqe_layer_mask(u32 opflag
)
2626 if (unlikely(KCQE_OPCODE(opflag
) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN
))
2627 return KCQE_FLAGS_LAYER_MASK_L4
;
2629 return opflag
& KCQE_FLAGS_LAYER_MASK
;
2632 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2634 struct cnic_local
*cp
= dev
->cnic_priv
;
2640 struct cnic_ulp_ops
*ulp_ops
;
2642 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2643 u32 kcqe_layer
= cnic_get_kcqe_layer_mask(kcqe_op_flag
);
2645 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2648 while (j
< num_cqes
) {
2649 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2651 if (cnic_get_kcqe_layer_mask(next_op
) != kcqe_layer
)
2654 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2659 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2660 ulp_type
= CNIC_ULP_RDMA
;
2661 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2662 ulp_type
= CNIC_ULP_ISCSI
;
2663 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_FCOE
)
2664 ulp_type
= CNIC_ULP_FCOE
;
2665 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2666 ulp_type
= CNIC_ULP_L4
;
2667 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2670 netdev_err(dev
->netdev
, "Unknown type of KCQE(0x%x)\n",
2676 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2677 if (likely(ulp_ops
)) {
2678 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2679 cp
->completed_kcq
+ i
, j
);
2688 cnic_spq_completion(dev
, DRV_CTL_RET_L5_SPQ_CREDIT_CMD
, comp
);
2691 static int cnic_get_kcqes(struct cnic_dev
*dev
, struct kcq_info
*info
)
2693 struct cnic_local
*cp
= dev
->cnic_priv
;
2694 u16 i
, ri
, hw_prod
, last
;
2696 int kcqe_cnt
= 0, last_cnt
= 0;
2698 i
= ri
= last
= info
->sw_prod_idx
;
2700 hw_prod
= *info
->hw_prod_idx_ptr
;
2701 hw_prod
= info
->hw_idx(hw_prod
);
2703 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2704 kcqe
= &info
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2705 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2706 i
= info
->next_idx(i
);
2707 ri
= i
& MAX_KCQ_IDX
;
2708 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2709 last_cnt
= kcqe_cnt
;
2714 info
->sw_prod_idx
= last
;
2718 static int cnic_l2_completion(struct cnic_local
*cp
)
2720 u16 hw_cons
, sw_cons
;
2721 struct cnic_uio_dev
*udev
= cp
->udev
;
2722 union eth_rx_cqe
*cqe
, *cqe_ring
= (union eth_rx_cqe
*)
2723 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
2727 if (!test_bit(CNIC_F_BNX2X_CLASS
, &cp
->dev
->flags
))
2730 hw_cons
= *cp
->rx_cons_ptr
;
2731 if ((hw_cons
& BNX2X_MAX_RCQ_DESC_CNT
) == BNX2X_MAX_RCQ_DESC_CNT
)
2734 sw_cons
= cp
->rx_cons
;
2735 while (sw_cons
!= hw_cons
) {
2738 cqe
= &cqe_ring
[sw_cons
& BNX2X_MAX_RCQ_DESC_CNT
];
2739 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
2740 if (cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
) {
2741 cmd
= le32_to_cpu(cqe
->ramrod_cqe
.conn_and_cmd_data
);
2742 cmd
>>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT
;
2743 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
||
2744 cmd
== RAMROD_CMD_ID_ETH_HALT
)
2747 sw_cons
= BNX2X_NEXT_RCQE(sw_cons
);
2752 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2754 u16 rx_cons
, tx_cons
;
2757 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
2760 rx_cons
= *cp
->rx_cons_ptr
;
2761 tx_cons
= *cp
->tx_cons_ptr
;
2762 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2763 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
2764 comp
= cnic_l2_completion(cp
);
2766 cp
->tx_cons
= tx_cons
;
2767 cp
->rx_cons
= rx_cons
;
2770 uio_event_notify(&cp
->udev
->cnic_uinfo
);
2773 clear_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
2776 static u32
cnic_service_bnx2_queues(struct cnic_dev
*dev
)
2778 struct cnic_local
*cp
= dev
->cnic_priv
;
2779 u32 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2782 /* status block index must be read before reading other fields */
2784 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2786 while ((kcqe_cnt
= cnic_get_kcqes(dev
, &cp
->kcq1
))) {
2788 service_kcqes(dev
, kcqe_cnt
);
2790 /* Tell compiler that status_blk fields can change. */
2792 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2793 /* status block index must be read first */
2795 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2798 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
);
2800 cnic_chk_pkt_rings(cp
);
2805 static int cnic_service_bnx2(void *data
, void *status_blk
)
2807 struct cnic_dev
*dev
= data
;
2809 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2810 struct status_block
*sblk
= status_blk
;
2812 return sblk
->status_idx
;
2815 return cnic_service_bnx2_queues(dev
);
2818 static void cnic_service_bnx2_msix(unsigned long data
)
2820 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2821 struct cnic_local
*cp
= dev
->cnic_priv
;
2823 cp
->last_status_idx
= cnic_service_bnx2_queues(dev
);
2825 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2826 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2829 static void cnic_doirq(struct cnic_dev
*dev
)
2831 struct cnic_local
*cp
= dev
->cnic_priv
;
2833 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2834 u16 prod
= cp
->kcq1
.sw_prod_idx
& MAX_KCQ_IDX
;
2836 prefetch(cp
->status_blk
.gen
);
2837 prefetch(&cp
->kcq1
.kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2839 tasklet_schedule(&cp
->cnic_irq_task
);
2843 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
2845 struct cnic_dev
*dev
= dev_instance
;
2846 struct cnic_local
*cp
= dev
->cnic_priv
;
2856 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
2857 u16 index
, u8 op
, u8 update
)
2859 struct cnic_local
*cp
= dev
->cnic_priv
;
2860 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
2861 COMMAND_REG_INT_ACK
);
2862 struct igu_ack_register igu_ack
;
2864 igu_ack
.status_block_index
= index
;
2865 igu_ack
.sb_id_and_flags
=
2866 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
2867 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
2868 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
2869 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
2871 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
2874 static void cnic_ack_igu_sb(struct cnic_dev
*dev
, u8 igu_sb_id
, u8 segment
,
2875 u16 index
, u8 op
, u8 update
)
2877 struct igu_regular cmd_data
;
2878 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
) * 8;
2880 cmd_data
.sb_id_and_flags
=
2881 (index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
2882 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
2883 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
2884 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
);
2887 CNIC_WR(dev
, igu_addr
, cmd_data
.sb_id_and_flags
);
2890 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
2892 struct cnic_local
*cp
= dev
->cnic_priv
;
2894 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, CSTORM_ID
, 0,
2895 IGU_INT_DISABLE
, 0);
2898 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev
*dev
)
2900 struct cnic_local
*cp
= dev
->cnic_priv
;
2902 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
, 0,
2903 IGU_INT_DISABLE
, 0);
2906 static u32
cnic_service_bnx2x_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
2908 u32 last_status
= *info
->status_idx_ptr
;
2911 /* status block index must be read before reading the KCQ */
2913 while ((kcqe_cnt
= cnic_get_kcqes(dev
, info
))) {
2915 service_kcqes(dev
, kcqe_cnt
);
2917 /* Tell compiler that sblk fields can change. */
2920 last_status
= *info
->status_idx_ptr
;
2921 /* status block index must be read before reading the KCQ */
2927 static void cnic_service_bnx2x_bh(unsigned long data
)
2929 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2930 struct cnic_local
*cp
= dev
->cnic_priv
;
2931 u32 status_idx
, new_status_idx
;
2933 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2937 status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq1
);
2939 CNIC_WR16(dev
, cp
->kcq1
.io_addr
,
2940 cp
->kcq1
.sw_prod_idx
+ MAX_KCQ_IDX
);
2942 if (!BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
2943 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, USTORM_ID
,
2944 status_idx
, IGU_INT_ENABLE
, 1);
2948 new_status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq2
);
2950 if (new_status_idx
!= status_idx
)
2953 CNIC_WR16(dev
, cp
->kcq2
.io_addr
, cp
->kcq2
.sw_prod_idx
+
2956 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
,
2957 status_idx
, IGU_INT_ENABLE
, 1);
2963 static int cnic_service_bnx2x(void *data
, void *status_blk
)
2965 struct cnic_dev
*dev
= data
;
2966 struct cnic_local
*cp
= dev
->cnic_priv
;
2968 if (!(cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2971 cnic_chk_pkt_rings(cp
);
2976 static void cnic_ulp_stop_one(struct cnic_local
*cp
, int if_type
)
2978 struct cnic_ulp_ops
*ulp_ops
;
2980 if (if_type
== CNIC_ULP_ISCSI
)
2981 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
2983 mutex_lock(&cnic_lock
);
2984 ulp_ops
= rcu_dereference_protected(cp
->ulp_ops
[if_type
],
2985 lockdep_is_held(&cnic_lock
));
2987 mutex_unlock(&cnic_lock
);
2990 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2991 mutex_unlock(&cnic_lock
);
2993 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2994 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
2996 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2999 static void cnic_ulp_stop(struct cnic_dev
*dev
)
3001 struct cnic_local
*cp
= dev
->cnic_priv
;
3004 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++)
3005 cnic_ulp_stop_one(cp
, if_type
);
3008 static void cnic_ulp_start(struct cnic_dev
*dev
)
3010 struct cnic_local
*cp
= dev
->cnic_priv
;
3013 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
3014 struct cnic_ulp_ops
*ulp_ops
;
3016 mutex_lock(&cnic_lock
);
3017 ulp_ops
= rcu_dereference_protected(cp
->ulp_ops
[if_type
],
3018 lockdep_is_held(&cnic_lock
));
3019 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
3020 mutex_unlock(&cnic_lock
);
3023 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3024 mutex_unlock(&cnic_lock
);
3026 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
3027 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
3029 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3033 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
3035 struct cnic_dev
*dev
= data
;
3037 switch (info
->cmd
) {
3038 case CNIC_CTL_STOP_CMD
:
3046 case CNIC_CTL_START_CMD
:
3049 if (!cnic_start_hw(dev
))
3050 cnic_ulp_start(dev
);
3054 case CNIC_CTL_STOP_ISCSI_CMD
: {
3055 struct cnic_local
*cp
= dev
->cnic_priv
;
3056 set_bit(CNIC_LCL_FL_STOP_ISCSI
, &cp
->cnic_local_flags
);
3057 queue_delayed_work(cnic_wq
, &cp
->delete_task
, 0);
3060 case CNIC_CTL_COMPLETION_CMD
: {
3061 struct cnic_ctl_completion
*comp
= &info
->data
.comp
;
3062 u32 cid
= BNX2X_SW_CID(comp
->cid
);
3064 struct cnic_local
*cp
= dev
->cnic_priv
;
3066 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
3067 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3069 if (unlikely(comp
->error
)) {
3070 set_bit(CTX_FL_CID_ERROR
, &ctx
->ctx_flags
);
3071 netdev_err(dev
->netdev
,
3072 "CID %x CFC delete comp error %x\n",
3077 wake_up(&ctx
->waitq
);
3087 static void cnic_ulp_init(struct cnic_dev
*dev
)
3090 struct cnic_local
*cp
= dev
->cnic_priv
;
3092 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
3093 struct cnic_ulp_ops
*ulp_ops
;
3095 mutex_lock(&cnic_lock
);
3096 ulp_ops
= cnic_ulp_tbl_prot(i
);
3097 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
3098 mutex_unlock(&cnic_lock
);
3102 mutex_unlock(&cnic_lock
);
3104 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
3105 ulp_ops
->cnic_init(dev
);
3111 static void cnic_ulp_exit(struct cnic_dev
*dev
)
3114 struct cnic_local
*cp
= dev
->cnic_priv
;
3116 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
3117 struct cnic_ulp_ops
*ulp_ops
;
3119 mutex_lock(&cnic_lock
);
3120 ulp_ops
= cnic_ulp_tbl_prot(i
);
3121 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
3122 mutex_unlock(&cnic_lock
);
3126 mutex_unlock(&cnic_lock
);
3128 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
3129 ulp_ops
->cnic_exit(dev
);
3135 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
3137 struct cnic_dev
*dev
= csk
->dev
;
3138 struct l4_kwq_offload_pg
*l4kwqe
;
3139 struct kwqe
*wqes
[1];
3141 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
3142 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3143 wqes
[0] = (struct kwqe
*) l4kwqe
;
3145 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
3147 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
3148 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
3150 l4kwqe
->da0
= csk
->ha
[0];
3151 l4kwqe
->da1
= csk
->ha
[1];
3152 l4kwqe
->da2
= csk
->ha
[2];
3153 l4kwqe
->da3
= csk
->ha
[3];
3154 l4kwqe
->da4
= csk
->ha
[4];
3155 l4kwqe
->da5
= csk
->ha
[5];
3157 l4kwqe
->sa0
= dev
->mac_addr
[0];
3158 l4kwqe
->sa1
= dev
->mac_addr
[1];
3159 l4kwqe
->sa2
= dev
->mac_addr
[2];
3160 l4kwqe
->sa3
= dev
->mac_addr
[3];
3161 l4kwqe
->sa4
= dev
->mac_addr
[4];
3162 l4kwqe
->sa5
= dev
->mac_addr
[5];
3164 l4kwqe
->etype
= ETH_P_IP
;
3165 l4kwqe
->ipid_start
= DEF_IPID_START
;
3166 l4kwqe
->host_opaque
= csk
->l5_cid
;
3169 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
3170 l4kwqe
->vlan_tag
= csk
->vlan_id
;
3171 l4kwqe
->l2hdr_nbytes
+= 4;
3174 return dev
->submit_kwqes(dev
, wqes
, 1);
3177 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
3179 struct cnic_dev
*dev
= csk
->dev
;
3180 struct l4_kwq_update_pg
*l4kwqe
;
3181 struct kwqe
*wqes
[1];
3183 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
3184 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3185 wqes
[0] = (struct kwqe
*) l4kwqe
;
3187 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
3189 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
3190 l4kwqe
->pg_cid
= csk
->pg_cid
;
3192 l4kwqe
->da0
= csk
->ha
[0];
3193 l4kwqe
->da1
= csk
->ha
[1];
3194 l4kwqe
->da2
= csk
->ha
[2];
3195 l4kwqe
->da3
= csk
->ha
[3];
3196 l4kwqe
->da4
= csk
->ha
[4];
3197 l4kwqe
->da5
= csk
->ha
[5];
3199 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
3200 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
3202 return dev
->submit_kwqes(dev
, wqes
, 1);
3205 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
3207 struct cnic_dev
*dev
= csk
->dev
;
3208 struct l4_kwq_upload
*l4kwqe
;
3209 struct kwqe
*wqes
[1];
3211 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
3212 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3213 wqes
[0] = (struct kwqe
*) l4kwqe
;
3215 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
3217 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
3218 l4kwqe
->cid
= csk
->pg_cid
;
3220 return dev
->submit_kwqes(dev
, wqes
, 1);
3223 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
3225 struct cnic_dev
*dev
= csk
->dev
;
3226 struct l4_kwq_connect_req1
*l4kwqe1
;
3227 struct l4_kwq_connect_req2
*l4kwqe2
;
3228 struct l4_kwq_connect_req3
*l4kwqe3
;
3229 struct kwqe
*wqes
[3];
3233 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
3234 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
3235 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
3236 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
3237 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
3238 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
3240 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
3242 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
3243 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
3244 l4kwqe3
->ka_interval
= csk
->ka_interval
;
3245 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
3246 l4kwqe3
->tos
= csk
->tos
;
3247 l4kwqe3
->ttl
= csk
->ttl
;
3248 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
3249 l4kwqe3
->pmtu
= csk
->mtu
;
3250 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
3251 l4kwqe3
->snd_buf
= csk
->snd_buf
;
3252 l4kwqe3
->seed
= csk
->seed
;
3254 wqes
[0] = (struct kwqe
*) l4kwqe1
;
3255 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
3256 wqes
[1] = (struct kwqe
*) l4kwqe2
;
3257 wqes
[2] = (struct kwqe
*) l4kwqe3
;
3260 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
3261 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
3263 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
3264 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
3265 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
3266 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
3267 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
3268 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
3269 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
3270 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
3271 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
3272 sizeof(struct tcphdr
);
3274 wqes
[1] = (struct kwqe
*) l4kwqe3
;
3275 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
3276 sizeof(struct tcphdr
);
3279 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
3281 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
3282 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
3283 l4kwqe1
->cid
= csk
->cid
;
3284 l4kwqe1
->pg_cid
= csk
->pg_cid
;
3285 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
3286 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
3287 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
3288 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
3289 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
3290 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
3291 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
3292 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
3293 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
3294 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
3295 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
3296 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
3297 if (csk
->tcp_flags
& SK_TCP_SACK
)
3298 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
3299 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
3300 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
3302 l4kwqe1
->tcp_flags
= tcp_flags
;
3304 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
3307 static int cnic_cm_close_req(struct cnic_sock
*csk
)
3309 struct cnic_dev
*dev
= csk
->dev
;
3310 struct l4_kwq_close_req
*l4kwqe
;
3311 struct kwqe
*wqes
[1];
3313 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
3314 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3315 wqes
[0] = (struct kwqe
*) l4kwqe
;
3317 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
3318 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
3319 l4kwqe
->cid
= csk
->cid
;
3321 return dev
->submit_kwqes(dev
, wqes
, 1);
3324 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
3326 struct cnic_dev
*dev
= csk
->dev
;
3327 struct l4_kwq_reset_req
*l4kwqe
;
3328 struct kwqe
*wqes
[1];
3330 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
3331 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3332 wqes
[0] = (struct kwqe
*) l4kwqe
;
3334 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
3335 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
3336 l4kwqe
->cid
= csk
->cid
;
3338 return dev
->submit_kwqes(dev
, wqes
, 1);
3341 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
3342 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
3344 struct cnic_local
*cp
= dev
->cnic_priv
;
3345 struct cnic_sock
*csk1
;
3347 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3351 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3353 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
3357 csk1
= &cp
->csk_tbl
[l5_cid
];
3358 if (atomic_read(&csk1
->ref_count
))
3361 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
3366 csk1
->l5_cid
= l5_cid
;
3367 csk1
->ulp_type
= ulp_type
;
3368 csk1
->context
= context
;
3370 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
3371 csk1
->ka_interval
= DEF_KA_INTERVAL
;
3372 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
3373 csk1
->tos
= DEF_TOS
;
3374 csk1
->ttl
= DEF_TTL
;
3375 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
3376 csk1
->rcv_buf
= DEF_RCV_BUF
;
3377 csk1
->snd_buf
= DEF_SND_BUF
;
3378 csk1
->seed
= DEF_SEED
;
3384 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
3386 if (csk
->src_port
) {
3387 struct cnic_dev
*dev
= csk
->dev
;
3388 struct cnic_local
*cp
= dev
->cnic_priv
;
3390 cnic_free_id(&cp
->csk_port_tbl
, be16_to_cpu(csk
->src_port
));
3395 static void cnic_close_conn(struct cnic_sock
*csk
)
3397 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
3398 cnic_cm_upload_pg(csk
);
3399 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3401 cnic_cm_cleanup(csk
);
3404 static int cnic_cm_destroy(struct cnic_sock
*csk
)
3406 if (!cnic_in_use(csk
))
3410 clear_bit(SK_F_INUSE
, &csk
->flags
);
3411 smp_mb__after_clear_bit();
3412 while (atomic_read(&csk
->ref_count
) != 1)
3414 cnic_cm_cleanup(csk
);
3421 static inline u16
cnic_get_vlan(struct net_device
*dev
,
3422 struct net_device
**vlan_dev
)
3424 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
3425 *vlan_dev
= vlan_dev_real_dev(dev
);
3426 return vlan_dev_vlan_id(dev
);
3432 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
3433 struct dst_entry
**dst
)
3435 #if defined(CONFIG_INET)
3438 rt
= ip_route_output(&init_net
, dst_addr
->sin_addr
.s_addr
, 0, 0, 0);
3445 return -ENETUNREACH
;
3449 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
3450 struct dst_entry
**dst
)
3452 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3455 memset(&fl6
, 0, sizeof(fl6
));
3456 ipv6_addr_copy(&fl6
.daddr
, &dst_addr
->sin6_addr
);
3457 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
3458 fl6
.flowi6_oif
= dst_addr
->sin6_scope_id
;
3460 *dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
3465 return -ENETUNREACH
;
3468 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
3471 struct cnic_dev
*dev
= NULL
;
3472 struct dst_entry
*dst
;
3473 struct net_device
*netdev
= NULL
;
3474 int err
= -ENETUNREACH
;
3476 if (dst_addr
->sin_family
== AF_INET
)
3477 err
= cnic_get_v4_route(dst_addr
, &dst
);
3478 else if (dst_addr
->sin_family
== AF_INET6
) {
3479 struct sockaddr_in6
*dst_addr6
=
3480 (struct sockaddr_in6
*) dst_addr
;
3482 err
= cnic_get_v6_route(dst_addr6
, &dst
);
3492 cnic_get_vlan(dst
->dev
, &netdev
);
3494 dev
= cnic_from_netdev(netdev
);
3503 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3505 struct cnic_dev
*dev
= csk
->dev
;
3506 struct cnic_local
*cp
= dev
->cnic_priv
;
3508 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
3511 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3513 struct cnic_dev
*dev
= csk
->dev
;
3514 struct cnic_local
*cp
= dev
->cnic_priv
;
3516 struct dst_entry
*dst
= NULL
;
3517 struct net_device
*realdev
;
3521 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
3522 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
3524 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
3525 saddr
->remote
.v4
.sin_family
== AF_INET
)
3530 clear_bit(SK_F_IPV6
, &csk
->flags
);
3533 set_bit(SK_F_IPV6
, &csk
->flags
);
3534 cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
3536 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
3537 sizeof(struct in6_addr
));
3538 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
3539 local_port
= saddr
->local
.v6
.sin6_port
;
3542 cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
3544 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
3545 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
3546 local_port
= saddr
->local
.v4
.sin_port
;
3550 csk
->mtu
= dev
->netdev
->mtu
;
3551 if (dst
&& dst
->dev
) {
3552 u16 vlan
= cnic_get_vlan(dst
->dev
, &realdev
);
3553 if (realdev
== dev
->netdev
) {
3554 csk
->vlan_id
= vlan
;
3555 csk
->mtu
= dst_mtu(dst
);
3559 port_id
= be16_to_cpu(local_port
);
3560 if (port_id
>= CNIC_LOCAL_PORT_MIN
&&
3561 port_id
< CNIC_LOCAL_PORT_MAX
) {
3562 if (cnic_alloc_id(&cp
->csk_port_tbl
, port_id
))
3568 port_id
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
3569 if (port_id
== -1) {
3573 local_port
= cpu_to_be16(port_id
);
3575 csk
->src_port
= local_port
;
3582 static void cnic_init_csk_state(struct cnic_sock
*csk
)
3585 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3586 clear_bit(SK_F_CLOSING
, &csk
->flags
);
3589 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3591 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3594 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_NO_ISCSI
)
3597 if (!cnic_in_use(csk
))
3600 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
3603 cnic_init_csk_state(csk
);
3605 err
= cnic_get_route(csk
, saddr
);
3609 err
= cnic_resolve_addr(csk
, saddr
);
3614 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3618 static int cnic_cm_abort(struct cnic_sock
*csk
)
3620 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3621 u32 opcode
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
3623 if (!cnic_in_use(csk
))
3626 if (cnic_abort_prep(csk
))
3627 return cnic_cm_abort_req(csk
);
3629 /* Getting here means that we haven't started connect, or
3630 * connect was not successful.
3633 cp
->close_conn(csk
, opcode
);
3634 if (csk
->state
!= opcode
)
3640 static int cnic_cm_close(struct cnic_sock
*csk
)
3642 if (!cnic_in_use(csk
))
3645 if (cnic_close_prep(csk
)) {
3646 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
3647 return cnic_cm_close_req(csk
);
3654 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
3657 struct cnic_ulp_ops
*ulp_ops
;
3658 int ulp_type
= csk
->ulp_type
;
3661 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3663 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3664 ulp_ops
->cm_connect_complete(csk
);
3665 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3666 ulp_ops
->cm_close_complete(csk
);
3667 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3668 ulp_ops
->cm_remote_abort(csk
);
3669 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3670 ulp_ops
->cm_abort_complete(csk
);
3671 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3672 ulp_ops
->cm_remote_close(csk
);
3677 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3679 if (cnic_offld_prep(csk
)) {
3680 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3681 cnic_cm_update_pg(csk
);
3683 cnic_cm_offload_pg(csk
);
3688 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3690 struct cnic_local
*cp
= dev
->cnic_priv
;
3691 u32 l5_cid
= kcqe
->pg_host_opaque
;
3692 u8 opcode
= kcqe
->op_code
;
3693 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3696 if (!cnic_in_use(csk
))
3699 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3700 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3703 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3704 if (kcqe
->status
== L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL
) {
3705 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3706 cnic_cm_upcall(cp
, csk
,
3707 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3711 csk
->pg_cid
= kcqe
->pg_cid
;
3712 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3713 cnic_cm_conn_req(csk
);
3719 static void cnic_process_fcoe_term_conn(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3721 struct cnic_local
*cp
= dev
->cnic_priv
;
3722 struct fcoe_kcqe
*fc_kcqe
= (struct fcoe_kcqe
*) kcqe
;
3723 u32 l5_cid
= fc_kcqe
->fcoe_conn_id
+ BNX2X_FCOE_L5_CID_BASE
;
3724 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3726 ctx
->timestamp
= jiffies
;
3728 wake_up(&ctx
->waitq
);
3731 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3733 struct cnic_local
*cp
= dev
->cnic_priv
;
3734 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3735 u8 opcode
= l4kcqe
->op_code
;
3737 struct cnic_sock
*csk
;
3739 if (opcode
== FCOE_RAMROD_CMD_ID_TERMINATE_CONN
) {
3740 cnic_process_fcoe_term_conn(dev
, kcqe
);
3743 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3744 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3745 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3749 l5_cid
= l4kcqe
->conn_id
;
3751 l5_cid
= l4kcqe
->cid
;
3752 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3755 csk
= &cp
->csk_tbl
[l5_cid
];
3758 if (!cnic_in_use(csk
)) {
3764 case L5CM_RAMROD_CMD_ID_TCP_CONNECT
:
3765 if (l4kcqe
->status
!= 0) {
3766 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3767 cnic_cm_upcall(cp
, csk
,
3768 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3771 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
3772 if (l4kcqe
->status
== 0)
3773 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
3775 smp_mb__before_clear_bit();
3776 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3777 cnic_cm_upcall(cp
, csk
, opcode
);
3780 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3781 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3782 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3783 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3784 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3785 cp
->close_conn(csk
, opcode
);
3788 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
3789 /* after we already sent CLOSE_REQ */
3790 if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
) &&
3791 !test_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
) &&
3792 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3793 cp
->close_conn(csk
, L4_KCQE_OPCODE_VALUE_RESET_COMP
);
3795 cnic_cm_upcall(cp
, csk
, opcode
);
3801 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
3803 struct cnic_dev
*dev
= data
;
3806 for (i
= 0; i
< num
; i
++)
3807 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
3810 static struct cnic_ulp_ops cm_ulp_ops
= {
3811 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
3814 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
3816 struct cnic_local
*cp
= dev
->cnic_priv
;
3820 cnic_free_id_tbl(&cp
->csk_port_tbl
);
3823 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
3825 struct cnic_local
*cp
= dev
->cnic_priv
;
3828 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
3833 get_random_bytes(&port_id
, sizeof(port_id
));
3834 port_id
%= CNIC_LOCAL_PORT_RANGE
;
3835 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
3836 CNIC_LOCAL_PORT_MIN
, port_id
)) {
3837 cnic_cm_free_mem(dev
);
3843 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
3845 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
3846 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3847 opcode
= L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
;
3848 csk
->state
= opcode
;
3851 /* 1. If event opcode matches the expected event in csk->state
3852 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
3854 * 3. If the expected event is 0, meaning the connection was never
3855 * never established, we accept the opcode from cm_abort.
3857 if (opcode
== csk
->state
|| csk
->state
== 0 ||
3858 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
||
3859 csk
->state
== L4_KCQE_OPCODE_VALUE_RESET_COMP
) {
3860 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
)) {
3861 if (csk
->state
== 0)
3862 csk
->state
= opcode
;
3869 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
3871 struct cnic_dev
*dev
= csk
->dev
;
3872 struct cnic_local
*cp
= dev
->cnic_priv
;
3874 if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
) {
3875 cnic_cm_upcall(cp
, csk
, opcode
);
3879 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3880 cnic_close_conn(csk
);
3881 csk
->state
= opcode
;
3882 cnic_cm_upcall(cp
, csk
, opcode
);
3885 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
3889 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
3893 get_random_bytes(&seed
, 4);
3894 cnic_ctx_wr(dev
, 45, 0, seed
);
3898 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
3900 struct cnic_dev
*dev
= csk
->dev
;
3901 struct cnic_local
*cp
= dev
->cnic_priv
;
3902 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
3903 union l5cm_specific_data l5_data
;
3905 int close_complete
= 0;
3908 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3909 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3910 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3911 if (cnic_ready_to_close(csk
, opcode
)) {
3912 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3913 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
3918 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3919 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
3921 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3926 memset(&l5_data
, 0, sizeof(l5_data
));
3928 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
3930 } else if (close_complete
) {
3931 ctx
->timestamp
= jiffies
;
3932 cnic_close_conn(csk
);
3933 cnic_cm_upcall(cp
, csk
, csk
->state
);
3937 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
3939 struct cnic_local
*cp
= dev
->cnic_priv
;
3945 if (!netif_running(dev
->netdev
))
3948 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
3949 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
3952 while (test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
3955 for (j
= 0; j
< 5; j
++) {
3956 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
3961 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
3962 netdev_warn(dev
->netdev
, "CID %x not deleted\n",
3966 cancel_delayed_work(&cp
->delete_task
);
3967 flush_workqueue(cnic_wq
);
3969 if (atomic_read(&cp
->iscsi_conn
) != 0)
3970 netdev_warn(dev
->netdev
, "%d iSCSI connections not destroyed\n",
3971 atomic_read(&cp
->iscsi_conn
));
3974 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
3976 struct cnic_local
*cp
= dev
->cnic_priv
;
3977 u32 pfid
= cp
->pfid
;
3978 u32 port
= CNIC_PORT(cp
);
3980 cnic_init_bnx2x_mac(dev
);
3981 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
3983 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
3984 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid
), 0);
3986 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3987 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port
), 1);
3988 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3989 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port
),
3992 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3993 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid
), DEF_TTL
);
3994 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3995 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid
), DEF_TOS
);
3996 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3997 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid
), 2);
3998 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3999 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid
), DEF_SWS_TIMER
);
4001 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(pfid
),
4006 static void cnic_delete_task(struct work_struct
*work
)
4008 struct cnic_local
*cp
;
4009 struct cnic_dev
*dev
;
4011 int need_resched
= 0;
4013 cp
= container_of(work
, struct cnic_local
, delete_task
.work
);
4016 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI
, &cp
->cnic_local_flags
)) {
4017 struct drv_ctl_info info
;
4019 cnic_ulp_stop_one(cp
, CNIC_ULP_ISCSI
);
4021 info
.cmd
= DRV_CTL_ISCSI_STOPPED_CMD
;
4022 cp
->ethdev
->drv_ctl(dev
->netdev
, &info
);
4025 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
4026 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
4029 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
) ||
4030 !test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
4033 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
4038 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
4041 err
= cnic_bnx2x_destroy_ramrod(dev
, i
);
4043 cnic_free_bnx2x_conn_resc(dev
, i
);
4045 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
)
4046 atomic_dec(&cp
->iscsi_conn
);
4048 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
4053 queue_delayed_work(cnic_wq
, &cp
->delete_task
,
4054 msecs_to_jiffies(10));
4058 static int cnic_cm_open(struct cnic_dev
*dev
)
4060 struct cnic_local
*cp
= dev
->cnic_priv
;
4063 err
= cnic_cm_alloc_mem(dev
);
4067 err
= cp
->start_cm(dev
);
4072 INIT_DELAYED_WORK(&cp
->delete_task
, cnic_delete_task
);
4074 dev
->cm_create
= cnic_cm_create
;
4075 dev
->cm_destroy
= cnic_cm_destroy
;
4076 dev
->cm_connect
= cnic_cm_connect
;
4077 dev
->cm_abort
= cnic_cm_abort
;
4078 dev
->cm_close
= cnic_cm_close
;
4079 dev
->cm_select_dev
= cnic_cm_select_dev
;
4081 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
4082 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
4086 cnic_cm_free_mem(dev
);
4090 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
4092 struct cnic_local
*cp
= dev
->cnic_priv
;
4100 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
4101 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
4103 clear_bit(SK_F_INUSE
, &csk
->flags
);
4104 cnic_cm_cleanup(csk
);
4106 cnic_cm_free_mem(dev
);
4111 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
4116 cid_addr
= GET_CID_ADDR(cid
);
4118 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
4119 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
4122 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
4124 struct cnic_local
*cp
= dev
->cnic_priv
;
4126 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
4128 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
4131 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4133 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
4136 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
4138 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
4139 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
4140 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
4141 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
4142 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
4143 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
4144 for (j
= 0; j
< 10; j
++) {
4146 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
4147 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
4151 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
4159 static void cnic_free_irq(struct cnic_dev
*dev
)
4161 struct cnic_local
*cp
= dev
->cnic_priv
;
4162 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4164 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4165 cp
->disable_int_sync(dev
);
4166 tasklet_kill(&cp
->cnic_irq_task
);
4167 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
4171 static int cnic_request_irq(struct cnic_dev
*dev
)
4173 struct cnic_local
*cp
= dev
->cnic_priv
;
4174 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4177 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0, "cnic", dev
);
4179 tasklet_disable(&cp
->cnic_irq_task
);
4184 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
4186 struct cnic_local
*cp
= dev
->cnic_priv
;
4187 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4189 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4191 int sblk_num
= cp
->status_blk_num
;
4192 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
4193 BNX2_HC_SB_CONFIG_1
;
4195 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
4197 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
4198 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
4199 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
4201 cp
->last_status_idx
= cp
->status_blk
.bnx2
->status_idx
;
4202 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
4203 (unsigned long) dev
);
4204 err
= cnic_request_irq(dev
);
4208 while (cp
->status_blk
.bnx2
->status_completion_producer_index
&&
4210 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
4211 1 << (11 + sblk_num
));
4216 if (cp
->status_blk
.bnx2
->status_completion_producer_index
) {
4222 struct status_block
*sblk
= cp
->status_blk
.gen
;
4223 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
4226 while (sblk
->status_completion_producer_index
&& i
< 10) {
4227 CNIC_WR(dev
, BNX2_HC_COMMAND
,
4228 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4233 if (sblk
->status_completion_producer_index
)
4240 netdev_err(dev
->netdev
, "KCQ index not resetting to 0\n");
4244 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
4246 struct cnic_local
*cp
= dev
->cnic_priv
;
4247 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4249 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
4252 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
4253 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
4256 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
4258 struct cnic_local
*cp
= dev
->cnic_priv
;
4259 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4261 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
4264 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
4265 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
4266 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
4267 synchronize_irq(ethdev
->irq_arr
[0].vector
);
4270 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
4272 struct cnic_local
*cp
= dev
->cnic_priv
;
4273 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4274 struct cnic_uio_dev
*udev
= cp
->udev
;
4275 u32 cid_addr
, tx_cid
, sb_id
;
4276 u32 val
, offset0
, offset1
, offset2
, offset3
;
4279 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4280 struct status_block
*s_blk
= cp
->status_blk
.gen
;
4282 sb_id
= cp
->status_blk_num
;
4284 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
4285 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4286 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
4288 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
4289 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
4291 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
4293 cp
->tx_cons
= *cp
->tx_cons_ptr
;
4295 cid_addr
= GET_CID_ADDR(tx_cid
);
4296 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
4297 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
4299 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
4300 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
4302 offset0
= BNX2_L2CTX_TYPE_XI
;
4303 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
4304 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
4305 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
4307 cnic_init_context(dev
, tx_cid
);
4308 cnic_init_context(dev
, tx_cid
+ 1);
4310 offset0
= BNX2_L2CTX_TYPE
;
4311 offset1
= BNX2_L2CTX_CMD_TYPE
;
4312 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
4313 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
4315 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
4316 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
4318 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
4319 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
4321 txbd
= udev
->l2_ring
;
4323 buf_map
= udev
->l2_buf_map
;
4324 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
4325 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
4326 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
4328 val
= (u64
) ring_map
>> 32;
4329 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
4330 txbd
->tx_bd_haddr_hi
= val
;
4332 val
= (u64
) ring_map
& 0xffffffff;
4333 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
4334 txbd
->tx_bd_haddr_lo
= val
;
4337 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
4339 struct cnic_local
*cp
= dev
->cnic_priv
;
4340 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4341 struct cnic_uio_dev
*udev
= cp
->udev
;
4342 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
4345 struct status_block
*s_blk
= cp
->status_blk
.gen
;
4346 dma_addr_t ring_map
= udev
->l2_ring_map
;
4348 sb_id
= cp
->status_blk_num
;
4349 cnic_init_context(dev
, 2);
4350 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
4351 coal_reg
= BNX2_HC_COMMAND
;
4352 coal_val
= CNIC_RD(dev
, coal_reg
);
4353 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4354 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
4356 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
4357 coal_reg
= BNX2_HC_COALESCE_NOW
;
4358 coal_val
= 1 << (11 + sb_id
);
4361 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
4362 CNIC_WR(dev
, coal_reg
, coal_val
);
4367 cp
->rx_cons
= *cp
->rx_cons_ptr
;
4369 cid_addr
= GET_CID_ADDR(2);
4370 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
4371 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
4372 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
4375 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
4377 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
4378 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
4380 rxbd
= udev
->l2_ring
+ BCM_PAGE_SIZE
;
4381 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4383 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4385 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4386 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
4387 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
4388 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
4389 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
4391 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
4392 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
4393 rxbd
->rx_bd_haddr_hi
= val
;
4395 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
4396 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
4397 rxbd
->rx_bd_haddr_lo
= val
;
4399 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
4400 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
4403 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
4405 struct kwqe
*wqes
[1], l2kwqe
;
4407 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
4409 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_LAYER_SHIFT
) |
4410 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
4411 KWQE_OPCODE_SHIFT
) | 2;
4412 dev
->submit_kwqes(dev
, wqes
, 1);
4415 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
4417 struct cnic_local
*cp
= dev
->cnic_priv
;
4420 val
= cp
->func
<< 2;
4422 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
4424 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
4425 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
4426 dev
->mac_addr
[0] = (u8
) (val
>> 8);
4427 dev
->mac_addr
[1] = (u8
) val
;
4429 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
4431 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
4432 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
4433 dev
->mac_addr
[2] = (u8
) (val
>> 24);
4434 dev
->mac_addr
[3] = (u8
) (val
>> 16);
4435 dev
->mac_addr
[4] = (u8
) (val
>> 8);
4436 dev
->mac_addr
[5] = (u8
) val
;
4438 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
4440 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
4441 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
4442 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
4444 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
4445 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
4446 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
4449 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
4451 struct cnic_local
*cp
= dev
->cnic_priv
;
4452 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4453 struct status_block
*sblk
= cp
->status_blk
.gen
;
4454 u32 val
, kcq_cid_addr
, kwq_cid_addr
;
4457 cnic_set_bnx2_mac(dev
);
4459 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
4460 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
4461 if (BCM_PAGE_BITS
> 12)
4462 val
|= (12 - 8) << 4;
4464 val
|= (BCM_PAGE_BITS
- 8) << 4;
4466 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
4468 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
4469 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
4470 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
4472 err
= cnic_setup_5709_context(dev
, 1);
4476 cnic_init_context(dev
, KWQ_CID
);
4477 cnic_init_context(dev
, KCQ_CID
);
4479 kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
4480 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
4482 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
4483 cp
->kwq_prod_idx
= 0;
4484 cp
->kwq_con_idx
= 0;
4485 set_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
4487 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
4488 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
4490 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
4492 /* Initialize the kernel work queue context. */
4493 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
4494 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
4495 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
4497 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
4498 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
4500 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
4501 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
4503 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
4504 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
4506 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
4507 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
4509 kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
4510 cp
->kcq1
.io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
4512 cp
->kcq1
.sw_prod_idx
= 0;
4513 cp
->kcq1
.hw_prod_idx_ptr
=
4514 (u16
*) &sblk
->status_completion_producer_index
;
4516 cp
->kcq1
.status_idx_ptr
= (u16
*) &sblk
->status_idx
;
4518 /* Initialize the kernel complete queue context. */
4519 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
4520 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
4521 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
4523 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
4524 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
4526 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
4527 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
4529 val
= (u32
) ((u64
) cp
->kcq1
.dma
.pgtbl_map
>> 32);
4530 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
4532 val
= (u32
) cp
->kcq1
.dma
.pgtbl_map
;
4533 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
4536 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4537 struct status_block_msix
*msblk
= cp
->status_blk
.bnx2
;
4538 u32 sb_id
= cp
->status_blk_num
;
4539 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
4541 cp
->kcq1
.hw_prod_idx_ptr
=
4542 (u16
*) &msblk
->status_completion_producer_index
;
4543 cp
->kcq1
.status_idx_ptr
= (u16
*) &msblk
->status_idx
;
4544 cp
->kwq_con_idx_ptr
= (u16
*) &msblk
->status_cmd_consumer_index
;
4545 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
4546 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4547 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4550 /* Enable Commnad Scheduler notification when we write to the
4551 * host producer index of the kernel contexts. */
4552 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
4554 /* Enable Command Scheduler notification when we write to either
4555 * the Send Queue or Receive Queue producer indexes of the kernel
4556 * bypass contexts. */
4557 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
4558 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
4560 /* Notify COM when the driver post an application buffer. */
4561 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
4563 /* Set the CP and COM doorbells. These two processors polls the
4564 * doorbell for a non zero value before running. This must be done
4565 * after setting up the kernel queue contexts. */
4566 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
4567 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
4569 cnic_init_bnx2_tx_ring(dev
);
4570 cnic_init_bnx2_rx_ring(dev
);
4572 err
= cnic_init_bnx2_irq(dev
);
4574 netdev_err(dev
->netdev
, "cnic_init_irq failed\n");
4575 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4576 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4583 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
4585 struct cnic_local
*cp
= dev
->cnic_priv
;
4586 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4587 u32 start_offset
= ethdev
->ctx_tbl_offset
;
4590 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4591 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
4592 dma_addr_t map
= ctx
->mapping
;
4594 if (cp
->ctx_align
) {
4595 unsigned long mask
= cp
->ctx_align
- 1;
4597 map
= (map
+ mask
) & ~mask
;
4600 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
4604 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
4606 struct cnic_local
*cp
= dev
->cnic_priv
;
4607 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4610 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
4611 (unsigned long) dev
);
4612 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
4613 err
= cnic_request_irq(dev
);
4618 static inline void cnic_storm_memset_hc_disable(struct cnic_dev
*dev
,
4619 u16 sb_id
, u8 sb_index
,
4623 u32 addr
= BAR_CSTRORM_INTMEM
+
4624 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4625 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4626 sizeof(struct hc_index_data
)*sb_index
+
4627 offsetof(struct hc_index_data
, flags
);
4628 u16 flags
= CNIC_RD16(dev
, addr
);
4630 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
4631 flags
|= (((~disable
) << HC_INDEX_DATA_HC_ENABLED_SHIFT
) &
4632 HC_INDEX_DATA_HC_ENABLED
);
4633 CNIC_WR16(dev
, addr
, flags
);
4636 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
4638 struct cnic_local
*cp
= dev
->cnic_priv
;
4639 u8 sb_id
= cp
->status_blk_num
;
4641 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4642 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4643 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4644 sizeof(struct hc_index_data
)*HC_INDEX_ISCSI_EQ_CONS
+
4645 offsetof(struct hc_index_data
, timeout
), 64 / 4);
4646 cnic_storm_memset_hc_disable(dev
, sb_id
, HC_INDEX_ISCSI_EQ_CONS
, 0);
4649 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
4653 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
,
4654 struct client_init_ramrod_data
*data
)
4656 struct cnic_local
*cp
= dev
->cnic_priv
;
4657 struct cnic_uio_dev
*udev
= cp
->udev
;
4658 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) udev
->l2_ring
;
4659 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4660 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4662 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4665 memset(txbd
, 0, BCM_PAGE_SIZE
);
4667 buf_map
= udev
->l2_buf_map
;
4668 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
4669 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
4670 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
4672 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4673 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4674 reg_bd
->addr_hi
= start_bd
->addr_hi
;
4675 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
4676 start_bd
->nbytes
= cpu_to_le16(0x10);
4677 start_bd
->nbd
= cpu_to_le16(3);
4678 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
4679 start_bd
->general_data
= (UNICAST_ADDRESS
<<
4680 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
4681 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
4685 val
= (u64
) ring_map
>> 32;
4686 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
4688 data
->tx
.tx_bd_page_base
.hi
= cpu_to_le32(val
);
4690 val
= (u64
) ring_map
& 0xffffffff;
4691 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
4693 data
->tx
.tx_bd_page_base
.lo
= cpu_to_le32(val
);
4695 /* Other ramrod params */
4696 data
->tx
.tx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_CQ_CONS
;
4697 data
->tx
.tx_status_block_id
= BNX2X_DEF_SB_ID
;
4699 /* reset xstorm per client statistics */
4700 if (cli
< MAX_STAT_COUNTER_ID
) {
4701 data
->general
.statistics_zero_flg
= 1;
4702 data
->general
.statistics_en_flg
= 1;
4703 data
->general
.statistics_counter_id
= cli
;
4707 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_CQ_CONS
];
4710 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
,
4711 struct client_init_ramrod_data
*data
)
4713 struct cnic_local
*cp
= dev
->cnic_priv
;
4714 struct cnic_uio_dev
*udev
= cp
->udev
;
4715 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (udev
->l2_ring
+
4717 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
4718 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
4719 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4721 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4722 int cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4724 dma_addr_t ring_map
= udev
->l2_ring_map
;
4727 data
->general
.client_id
= cli
;
4728 data
->general
.activate_flg
= 1;
4729 data
->general
.sp_client_id
= cli
;
4730 data
->general
.mtu
= cpu_to_le16(cp
->l2_single_buf_size
- 14);
4731 data
->general
.func_id
= cp
->pfid
;
4733 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4735 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4737 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4738 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4739 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4742 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
4743 rxbd
->addr_hi
= cpu_to_le32(val
);
4744 data
->rx
.bd_page_base
.hi
= cpu_to_le32(val
);
4746 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
4747 rxbd
->addr_lo
= cpu_to_le32(val
);
4748 data
->rx
.bd_page_base
.lo
= cpu_to_le32(val
);
4750 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
4751 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) >> 32;
4752 rxcqe
->addr_hi
= cpu_to_le32(val
);
4753 data
->rx
.cqe_page_base
.hi
= cpu_to_le32(val
);
4755 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) & 0xffffffff;
4756 rxcqe
->addr_lo
= cpu_to_le32(val
);
4757 data
->rx
.cqe_page_base
.lo
= cpu_to_le32(val
);
4759 /* Other ramrod params */
4760 data
->rx
.client_qzone_id
= cl_qzone_id
;
4761 data
->rx
.rx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
;
4762 data
->rx
.status_block_id
= BNX2X_DEF_SB_ID
;
4764 data
->rx
.cache_line_alignment_log_size
= L1_CACHE_SHIFT
;
4766 data
->rx
.max_bytes_on_bd
= cpu_to_le16(cp
->l2_single_buf_size
);
4767 data
->rx
.outer_vlan_removal_enable_flg
= 1;
4768 data
->rx
.silent_vlan_removal_flg
= 1;
4769 data
->rx
.silent_vlan_value
= 0;
4770 data
->rx
.silent_vlan_mask
= 0xffff;
4773 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
];
4774 cp
->rx_cons
= *cp
->rx_cons_ptr
;
4777 static void cnic_init_bnx2x_kcq(struct cnic_dev
*dev
)
4779 struct cnic_local
*cp
= dev
->cnic_priv
;
4780 u32 pfid
= cp
->pfid
;
4782 cp
->kcq1
.io_addr
= BAR_CSTRORM_INTMEM
+
4783 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid
, 0);
4784 cp
->kcq1
.sw_prod_idx
= 0;
4786 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
4787 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
4789 cp
->kcq1
.hw_prod_idx_ptr
=
4790 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4791 cp
->kcq1
.status_idx_ptr
=
4792 &sb
->sb
.running_index
[SM_RX_ID
];
4794 struct host_hc_status_block_e1x
*sb
= cp
->status_blk
.gen
;
4796 cp
->kcq1
.hw_prod_idx_ptr
=
4797 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4798 cp
->kcq1
.status_idx_ptr
=
4799 &sb
->sb
.running_index
[SM_RX_ID
];
4802 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
4803 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
4805 cp
->kcq2
.io_addr
= BAR_USTRORM_INTMEM
+
4806 USTORM_FCOE_EQ_PROD_OFFSET(pfid
);
4807 cp
->kcq2
.sw_prod_idx
= 0;
4808 cp
->kcq2
.hw_prod_idx_ptr
=
4809 &sb
->sb
.index_values
[HC_INDEX_FCOE_EQ_CONS
];
4810 cp
->kcq2
.status_idx_ptr
=
4811 &sb
->sb
.running_index
[SM_RX_ID
];
4815 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
4817 struct cnic_local
*cp
= dev
->cnic_priv
;
4818 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4819 int func
= CNIC_FUNC(cp
), ret
;
4822 cp
->port_mode
= CHIP_PORT_MODE_NONE
;
4824 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
4825 u32 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN_OVWR
);
4828 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN
);
4830 val
= (val
>> 1) & 1;
4833 cp
->port_mode
= CHIP_4_PORT_MODE
;
4834 cp
->pfid
= func
>> 1;
4836 cp
->port_mode
= CHIP_4_PORT_MODE
;
4837 cp
->pfid
= func
& 0x6;
4844 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
4845 cp
->iscsi_start_cid
, 0);
4850 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
4851 ret
= cnic_init_id_tbl(&cp
->fcoe_cid_tbl
,
4852 BNX2X_FCOE_NUM_CONNECTIONS
,
4853 cp
->fcoe_start_cid
, 0);
4859 cp
->bnx2x_igu_sb_id
= ethdev
->irq_arr
[0].status_blk_num2
;
4861 cnic_init_bnx2x_kcq(dev
);
4864 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, MAX_KCQ_IDX
);
4865 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4866 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid
, 0), 0);
4867 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4868 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0),
4869 cp
->kcq1
.dma
.pg_map_arr
[1] & 0xffffffff);
4870 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4871 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0) + 4,
4872 (u64
) cp
->kcq1
.dma
.pg_map_arr
[1] >> 32);
4873 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4874 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0),
4875 cp
->kcq1
.dma
.pg_map_arr
[0] & 0xffffffff);
4876 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4877 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0) + 4,
4878 (u64
) cp
->kcq1
.dma
.pg_map_arr
[0] >> 32);
4879 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4880 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid
, 0), 1);
4881 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4882 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid
, 0), cp
->status_blk_num
);
4883 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4884 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid
, 0),
4885 HC_INDEX_ISCSI_EQ_CONS
);
4887 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4888 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
),
4889 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
4890 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4891 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
) + 4,
4892 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
4894 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4895 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid
), DEF_RCV_BUF
);
4897 cnic_setup_bnx2x_context(dev
);
4899 ret
= cnic_init_bnx2x_irq(dev
);
4906 static void cnic_init_rings(struct cnic_dev
*dev
)
4908 struct cnic_local
*cp
= dev
->cnic_priv
;
4909 struct cnic_uio_dev
*udev
= cp
->udev
;
4911 if (test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4914 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4915 cnic_init_bnx2_tx_ring(dev
);
4916 cnic_init_bnx2_rx_ring(dev
);
4917 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4918 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4919 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4920 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
4922 struct client_init_ramrod_data
*data
;
4923 union l5cm_specific_data l5_data
;
4924 struct ustorm_eth_rx_producers rx_prods
= {0};
4927 rx_prods
.bd_prod
= 0;
4928 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
4931 cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4933 off
= BAR_USTRORM_INTMEM
+
4934 (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) ?
4935 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id
) :
4936 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp
), cli
));
4938 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
4939 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
4941 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4943 data
= udev
->l2_buf
;
4945 memset(data
, 0, sizeof(*data
));
4947 cnic_init_bnx2x_tx_ring(dev
, data
);
4948 cnic_init_bnx2x_rx_ring(dev
, data
);
4950 l5_data
.phy_address
.lo
= udev
->l2_buf_map
& 0xffffffff;
4951 l5_data
.phy_address
.hi
= (u64
) udev
->l2_buf_map
>> 32;
4953 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4955 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
4956 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
4959 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
4963 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
4964 netdev_err(dev
->netdev
,
4965 "iSCSI CLIENT_SETUP did not complete\n");
4966 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
4967 cnic_ring_ctl(dev
, cid
, cli
, 1);
4971 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
4973 struct cnic_local
*cp
= dev
->cnic_priv
;
4975 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4978 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4979 cnic_shutdown_bnx2_rx_ring(dev
);
4980 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4981 struct cnic_local
*cp
= dev
->cnic_priv
;
4982 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4983 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
4984 union l5cm_specific_data l5_data
;
4987 cnic_ring_ctl(dev
, cid
, cli
, 0);
4989 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4991 l5_data
.phy_address
.lo
= cli
;
4992 l5_data
.phy_address
.hi
= 0;
4993 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
4994 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
4996 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
5000 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
5001 netdev_err(dev
->netdev
,
5002 "iSCSI CLIENT_HALT did not complete\n");
5003 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
5005 memset(&l5_data
, 0, sizeof(l5_data
));
5006 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
5007 cid
, NONE_CONNECTION_TYPE
, &l5_data
);
5010 clear_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
5013 static int cnic_register_netdev(struct cnic_dev
*dev
)
5015 struct cnic_local
*cp
= dev
->cnic_priv
;
5016 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5022 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
5025 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
5027 netdev_err(dev
->netdev
, "register_cnic failed\n");
5032 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
5034 struct cnic_local
*cp
= dev
->cnic_priv
;
5035 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5040 ethdev
->drv_unregister_cnic(dev
->netdev
);
5043 static int cnic_start_hw(struct cnic_dev
*dev
)
5045 struct cnic_local
*cp
= dev
->cnic_priv
;
5046 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5049 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
5052 dev
->regview
= ethdev
->io_base
;
5053 pci_dev_get(dev
->pcidev
);
5054 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
5055 cp
->status_blk
.gen
= ethdev
->irq_arr
[0].status_blk
;
5056 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
5058 err
= cp
->alloc_resc(dev
);
5060 netdev_err(dev
->netdev
, "allocate resource failure\n");
5064 err
= cp
->start_hw(dev
);
5068 err
= cnic_cm_open(dev
);
5072 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
5074 cp
->enable_int(dev
);
5080 pci_dev_put(dev
->pcidev
);
5084 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
5086 cnic_disable_bnx2_int_sync(dev
);
5088 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
5089 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
5091 cnic_init_context(dev
, KWQ_CID
);
5092 cnic_init_context(dev
, KCQ_CID
);
5094 cnic_setup_5709_context(dev
, 0);
5097 cnic_free_resc(dev
);
5101 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
5103 struct cnic_local
*cp
= dev
->cnic_priv
;
5106 *cp
->kcq1
.hw_prod_idx_ptr
= 0;
5107 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5108 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->pfid
, 0), 0);
5109 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, 0);
5110 cnic_free_resc(dev
);
5113 static void cnic_stop_hw(struct cnic_dev
*dev
)
5115 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
5116 struct cnic_local
*cp
= dev
->cnic_priv
;
5119 /* Need to wait for the ring shutdown event to complete
5120 * before clearing the CNIC_UP flag.
5122 while (cp
->udev
->uio_dev
!= -1 && i
< 15) {
5126 cnic_shutdown_rings(dev
);
5127 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
5128 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
5130 cnic_cm_shutdown(dev
);
5132 pci_dev_put(dev
->pcidev
);
5136 static void cnic_free_dev(struct cnic_dev
*dev
)
5140 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
5144 if (atomic_read(&dev
->ref_count
) != 0)
5145 netdev_err(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
5147 netdev_info(dev
->netdev
, "Removed CNIC device\n");
5148 dev_put(dev
->netdev
);
5152 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
5153 struct pci_dev
*pdev
)
5155 struct cnic_dev
*cdev
;
5156 struct cnic_local
*cp
;
5159 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
5161 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
5163 netdev_err(dev
, "allocate dev struct failure\n");
5168 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
5169 cdev
->register_device
= cnic_register_device
;
5170 cdev
->unregister_device
= cnic_unregister_device
;
5171 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
5173 cp
= cdev
->cnic_priv
;
5175 cp
->l2_single_buf_size
= 0x400;
5176 cp
->l2_rx_ring_size
= 3;
5178 spin_lock_init(&cp
->cnic_ulp_lock
);
5180 netdev_info(dev
, "Added CNIC device\n");
5185 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
5187 struct pci_dev
*pdev
;
5188 struct cnic_dev
*cdev
;
5189 struct cnic_local
*cp
;
5190 struct cnic_eth_dev
*ethdev
= NULL
;
5191 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
5193 probe
= symbol_get(bnx2_cnic_probe
);
5195 ethdev
= (*probe
)(dev
);
5196 symbol_put(bnx2_cnic_probe
);
5201 pdev
= ethdev
->pdev
;
5207 if ((pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
5208 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) &&
5209 (pdev
->revision
< 0x10)) {
5215 cdev
= cnic_alloc_dev(dev
, pdev
);
5219 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
5220 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
5222 cp
= cdev
->cnic_priv
;
5223 cp
->ethdev
= ethdev
;
5224 cdev
->pcidev
= pdev
;
5225 cp
->chip_id
= ethdev
->chip_id
;
5227 cdev
->max_iscsi_conn
= ethdev
->max_iscsi_conn
;
5229 cp
->cnic_ops
= &cnic_bnx2_ops
;
5230 cp
->start_hw
= cnic_start_bnx2_hw
;
5231 cp
->stop_hw
= cnic_stop_bnx2_hw
;
5232 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
5233 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
5234 cp
->free_resc
= cnic_free_resc
;
5235 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
5236 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
5237 cp
->enable_int
= cnic_enable_bnx2_int
;
5238 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
5239 cp
->close_conn
= cnic_close_bnx2_conn
;
5247 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
5249 struct pci_dev
*pdev
;
5250 struct cnic_dev
*cdev
;
5251 struct cnic_local
*cp
;
5252 struct cnic_eth_dev
*ethdev
= NULL
;
5253 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
5255 probe
= symbol_get(bnx2x_cnic_probe
);
5257 ethdev
= (*probe
)(dev
);
5258 symbol_put(bnx2x_cnic_probe
);
5263 pdev
= ethdev
->pdev
;
5268 cdev
= cnic_alloc_dev(dev
, pdev
);
5274 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
5275 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
5277 cp
= cdev
->cnic_priv
;
5278 cp
->ethdev
= ethdev
;
5279 cdev
->pcidev
= pdev
;
5280 cp
->chip_id
= ethdev
->chip_id
;
5282 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_NO_ISCSI
))
5283 cdev
->max_iscsi_conn
= ethdev
->max_iscsi_conn
;
5284 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) &&
5285 !(ethdev
->drv_state
& CNIC_DRV_STATE_NO_FCOE
))
5286 cdev
->max_fcoe_conn
= ethdev
->max_fcoe_conn
;
5288 memcpy(cdev
->mac_addr
, ethdev
->iscsi_mac
, 6);
5290 cp
->cnic_ops
= &cnic_bnx2x_ops
;
5291 cp
->start_hw
= cnic_start_bnx2x_hw
;
5292 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
5293 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
5294 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
5295 cp
->free_resc
= cnic_free_resc
;
5296 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
5297 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
5298 cp
->enable_int
= cnic_enable_bnx2x_int
;
5299 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
5300 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
))
5301 cp
->ack_int
= cnic_ack_bnx2x_e2_msix
;
5303 cp
->ack_int
= cnic_ack_bnx2x_msix
;
5304 cp
->close_conn
= cnic_close_bnx2x_conn
;
5308 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
5310 struct ethtool_drvinfo drvinfo
;
5311 struct cnic_dev
*cdev
= NULL
;
5313 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
5314 memset(&drvinfo
, 0, sizeof(drvinfo
));
5315 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
5317 if (!strcmp(drvinfo
.driver
, "bnx2"))
5318 cdev
= init_bnx2_cnic(dev
);
5319 if (!strcmp(drvinfo
.driver
, "bnx2x"))
5320 cdev
= init_bnx2x_cnic(dev
);
5322 write_lock(&cnic_dev_lock
);
5323 list_add(&cdev
->list
, &cnic_dev_list
);
5324 write_unlock(&cnic_dev_lock
);
5331 * netdev event handler
5333 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
5336 struct net_device
*netdev
= ptr
;
5337 struct cnic_dev
*dev
;
5341 dev
= cnic_from_netdev(netdev
);
5343 if (!dev
&& (event
== NETDEV_REGISTER
|| netif_running(netdev
))) {
5344 /* Check for the hot-plug device */
5345 dev
= is_cnic_dev(netdev
);
5352 struct cnic_local
*cp
= dev
->cnic_priv
;
5356 else if (event
== NETDEV_UNREGISTER
)
5359 if (event
== NETDEV_UP
|| (new_dev
&& netif_running(netdev
))) {
5360 if (cnic_register_netdev(dev
) != 0) {
5364 if (!cnic_start_hw(dev
))
5365 cnic_ulp_start(dev
);
5369 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
5370 struct cnic_ulp_ops
*ulp_ops
;
5373 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
5374 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
5377 ctx
= cp
->ulp_handle
[if_type
];
5379 ulp_ops
->indicate_netevent(ctx
, event
);
5383 if (event
== NETDEV_GOING_DOWN
) {
5386 cnic_unregister_netdev(dev
);
5387 } else if (event
== NETDEV_UNREGISTER
) {
5388 write_lock(&cnic_dev_lock
);
5389 list_del_init(&dev
->list
);
5390 write_unlock(&cnic_dev_lock
);
5402 static struct notifier_block cnic_netdev_notifier
= {
5403 .notifier_call
= cnic_netdev_event
5406 static void cnic_release(void)
5408 struct cnic_dev
*dev
;
5409 struct cnic_uio_dev
*udev
;
5411 while (!list_empty(&cnic_dev_list
)) {
5412 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
5413 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
5419 cnic_unregister_netdev(dev
);
5420 list_del_init(&dev
->list
);
5423 while (!list_empty(&cnic_udev_list
)) {
5424 udev
= list_entry(cnic_udev_list
.next
, struct cnic_uio_dev
,
5426 cnic_free_uio(udev
);
5430 static int __init
cnic_init(void)
5434 pr_info("%s", version
);
5436 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
5442 cnic_wq
= create_singlethread_workqueue("cnic_wq");
5445 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5452 static void __exit
cnic_exit(void)
5454 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5456 destroy_workqueue(cnic_wq
);
5459 module_init(cnic_init
);
5460 module_exit(cnic_exit
);