1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
35 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
43 #include "bnx2x/bnx2x_reg.h"
44 #include "bnx2x/bnx2x_fw_defs.h"
45 #include "bnx2x/bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version
[] __devinitdata
=
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION
);
62 static LIST_HEAD(cnic_dev_list
);
63 static DEFINE_RWLOCK(cnic_dev_lock
);
64 static DEFINE_MUTEX(cnic_lock
);
66 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
68 static int cnic_service_bnx2(void *, void *);
69 static int cnic_service_bnx2x(void *, void *);
70 static int cnic_ctl(void *, struct cnic_ctl_info
*);
72 static struct cnic_ops cnic_bnx2_ops
= {
73 .cnic_owner
= THIS_MODULE
,
74 .cnic_handler
= cnic_service_bnx2
,
78 static struct cnic_ops cnic_bnx2x_ops
= {
79 .cnic_owner
= THIS_MODULE
,
80 .cnic_handler
= cnic_service_bnx2x
,
84 static void cnic_shutdown_rings(struct cnic_dev
*);
85 static void cnic_init_rings(struct cnic_dev
*);
86 static int cnic_cm_set_pg(struct cnic_sock
*);
88 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
90 struct cnic_dev
*dev
= uinfo
->priv
;
91 struct cnic_local
*cp
= dev
->cnic_priv
;
93 if (!capable(CAP_NET_ADMIN
))
96 if (cp
->uio_dev
!= -1)
100 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
105 cp
->uio_dev
= iminor(inode
);
107 cnic_init_rings(dev
);
113 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
115 struct cnic_dev
*dev
= uinfo
->priv
;
116 struct cnic_local
*cp
= dev
->cnic_priv
;
118 cnic_shutdown_rings(dev
);
124 static inline void cnic_hold(struct cnic_dev
*dev
)
126 atomic_inc(&dev
->ref_count
);
129 static inline void cnic_put(struct cnic_dev
*dev
)
131 atomic_dec(&dev
->ref_count
);
134 static inline void csk_hold(struct cnic_sock
*csk
)
136 atomic_inc(&csk
->ref_count
);
139 static inline void csk_put(struct cnic_sock
*csk
)
141 atomic_dec(&csk
->ref_count
);
144 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
146 struct cnic_dev
*cdev
;
148 read_lock(&cnic_dev_lock
);
149 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
150 if (netdev
== cdev
->netdev
) {
152 read_unlock(&cnic_dev_lock
);
156 read_unlock(&cnic_dev_lock
);
160 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
162 atomic_inc(&ulp_ops
->ref_count
);
165 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
167 atomic_dec(&ulp_ops
->ref_count
);
170 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
172 struct cnic_local
*cp
= dev
->cnic_priv
;
173 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
174 struct drv_ctl_info info
;
175 struct drv_ctl_io
*io
= &info
.data
.io
;
177 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
178 io
->cid_addr
= cid_addr
;
181 ethdev
->drv_ctl(dev
->netdev
, &info
);
184 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
186 struct cnic_local
*cp
= dev
->cnic_priv
;
187 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
188 struct drv_ctl_info info
;
189 struct drv_ctl_io
*io
= &info
.data
.io
;
191 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
194 ethdev
->drv_ctl(dev
->netdev
, &info
);
197 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
199 struct cnic_local
*cp
= dev
->cnic_priv
;
200 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
201 struct drv_ctl_info info
;
202 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
205 info
.cmd
= DRV_CTL_START_L2_CMD
;
207 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
210 ring
->client_id
= cl_id
;
211 ethdev
->drv_ctl(dev
->netdev
, &info
);
214 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
216 struct cnic_local
*cp
= dev
->cnic_priv
;
217 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
218 struct drv_ctl_info info
;
219 struct drv_ctl_io
*io
= &info
.data
.io
;
221 info
.cmd
= DRV_CTL_IO_WR_CMD
;
224 ethdev
->drv_ctl(dev
->netdev
, &info
);
227 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
229 struct cnic_local
*cp
= dev
->cnic_priv
;
230 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
231 struct drv_ctl_info info
;
232 struct drv_ctl_io
*io
= &info
.data
.io
;
234 info
.cmd
= DRV_CTL_IO_RD_CMD
;
236 ethdev
->drv_ctl(dev
->netdev
, &info
);
240 static int cnic_in_use(struct cnic_sock
*csk
)
242 return test_bit(SK_F_INUSE
, &csk
->flags
);
245 static void cnic_spq_completion(struct cnic_dev
*dev
, int cmd
, u32 count
)
247 struct cnic_local
*cp
= dev
->cnic_priv
;
248 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
249 struct drv_ctl_info info
;
252 info
.data
.credit
.credit_count
= count
;
253 ethdev
->drv_ctl(dev
->netdev
, &info
);
256 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
260 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
261 if (cp
->ctx_tbl
[i
].cid
== cid
) {
269 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
270 struct cnic_sock
*csk
)
272 struct iscsi_path path_req
;
275 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
276 struct cnic_ulp_ops
*ulp_ops
;
278 if (cp
->uio_dev
== -1)
282 len
= sizeof(path_req
);
283 buf
= (char *) &path_req
;
284 memset(&path_req
, 0, len
);
286 msg_type
= ISCSI_KEVENT_PATH_REQ
;
287 path_req
.handle
= (u64
) csk
->l5_cid
;
288 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
289 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
290 sizeof(struct in6_addr
));
291 path_req
.ip_addr_len
= 16;
293 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
294 sizeof(struct in_addr
));
295 path_req
.ip_addr_len
= 4;
297 path_req
.vlan_id
= csk
->vlan_id
;
298 path_req
.pmtu
= csk
->mtu
;
302 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
304 ulp_ops
->iscsi_nl_send_msg(cp
->dev
, msg_type
, buf
, len
);
309 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
315 case ISCSI_UEVENT_PATH_UPDATE
: {
316 struct cnic_local
*cp
;
318 struct cnic_sock
*csk
;
319 struct iscsi_path
*path_resp
;
321 if (len
< sizeof(*path_resp
))
324 path_resp
= (struct iscsi_path
*) buf
;
326 l5_cid
= (u32
) path_resp
->handle
;
327 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
331 if (!rcu_dereference(cp
->ulp_ops
[CNIC_ULP_L4
])) {
336 csk
= &cp
->csk_tbl
[l5_cid
];
338 if (cnic_in_use(csk
)) {
339 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
340 if (test_bit(SK_F_IPV6
, &csk
->flags
))
341 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
342 sizeof(struct in6_addr
));
344 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
345 sizeof(struct in_addr
));
346 if (is_valid_ether_addr(csk
->ha
))
358 static int cnic_offld_prep(struct cnic_sock
*csk
)
360 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
363 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
364 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
371 static int cnic_close_prep(struct cnic_sock
*csk
)
373 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
374 smp_mb__after_clear_bit();
376 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
377 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
385 static int cnic_abort_prep(struct cnic_sock
*csk
)
387 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
388 smp_mb__after_clear_bit();
390 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
393 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
394 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
401 static void cnic_uio_stop(void)
403 struct cnic_dev
*dev
;
405 read_lock(&cnic_dev_lock
);
406 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
407 struct cnic_local
*cp
= dev
->cnic_priv
;
410 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
412 read_unlock(&cnic_dev_lock
);
415 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
417 struct cnic_dev
*dev
;
419 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
420 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
423 mutex_lock(&cnic_lock
);
424 if (cnic_ulp_tbl
[ulp_type
]) {
425 pr_err("%s: Type %d has already been registered\n",
427 mutex_unlock(&cnic_lock
);
431 read_lock(&cnic_dev_lock
);
432 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
433 struct cnic_local
*cp
= dev
->cnic_priv
;
435 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
437 read_unlock(&cnic_dev_lock
);
439 atomic_set(&ulp_ops
->ref_count
, 0);
440 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
441 mutex_unlock(&cnic_lock
);
443 /* Prevent race conditions with netdev_event */
445 read_lock(&cnic_dev_lock
);
446 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
447 struct cnic_local
*cp
= dev
->cnic_priv
;
449 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
450 ulp_ops
->cnic_init(dev
);
452 read_unlock(&cnic_dev_lock
);
458 int cnic_unregister_driver(int ulp_type
)
460 struct cnic_dev
*dev
;
461 struct cnic_ulp_ops
*ulp_ops
;
464 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
465 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
468 mutex_lock(&cnic_lock
);
469 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
471 pr_err("%s: Type %d has not been registered\n",
475 read_lock(&cnic_dev_lock
);
476 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
477 struct cnic_local
*cp
= dev
->cnic_priv
;
479 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
480 pr_err("%s: Type %d still has devices registered\n",
482 read_unlock(&cnic_dev_lock
);
486 read_unlock(&cnic_dev_lock
);
488 if (ulp_type
== CNIC_ULP_ISCSI
)
491 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
493 mutex_unlock(&cnic_lock
);
495 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
500 if (atomic_read(&ulp_ops
->ref_count
) != 0)
501 netdev_warn(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
505 mutex_unlock(&cnic_lock
);
509 static int cnic_start_hw(struct cnic_dev
*);
510 static void cnic_stop_hw(struct cnic_dev
*);
512 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
515 struct cnic_local
*cp
= dev
->cnic_priv
;
516 struct cnic_ulp_ops
*ulp_ops
;
518 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
519 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
522 mutex_lock(&cnic_lock
);
523 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
524 pr_err("%s: Driver with type %d has not been registered\n",
526 mutex_unlock(&cnic_lock
);
529 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
530 pr_err("%s: Type %d has already been registered to this device\n",
532 mutex_unlock(&cnic_lock
);
536 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
537 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
538 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
539 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
542 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
543 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
544 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
546 mutex_unlock(&cnic_lock
);
551 EXPORT_SYMBOL(cnic_register_driver
);
553 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
555 struct cnic_local
*cp
= dev
->cnic_priv
;
558 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
559 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
562 mutex_lock(&cnic_lock
);
563 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
564 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
567 pr_err("%s: device not registered to this ulp type %d\n",
569 mutex_unlock(&cnic_lock
);
572 mutex_unlock(&cnic_lock
);
576 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
581 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
582 netdev_warn(dev
->netdev
, "Failed waiting for ULP up call to complete\n");
586 EXPORT_SYMBOL(cnic_unregister_driver
);
588 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
590 id_tbl
->start
= start_id
;
593 spin_lock_init(&id_tbl
->lock
);
594 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
601 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
603 kfree(id_tbl
->table
);
604 id_tbl
->table
= NULL
;
607 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
612 if (id
>= id_tbl
->max
)
615 spin_lock(&id_tbl
->lock
);
616 if (!test_bit(id
, id_tbl
->table
)) {
617 set_bit(id
, id_tbl
->table
);
620 spin_unlock(&id_tbl
->lock
);
624 /* Returns -1 if not successful */
625 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
629 spin_lock(&id_tbl
->lock
);
630 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
631 if (id
>= id_tbl
->max
) {
633 if (id_tbl
->next
!= 0) {
634 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
635 if (id
>= id_tbl
->next
)
640 if (id
< id_tbl
->max
) {
641 set_bit(id
, id_tbl
->table
);
642 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
646 spin_unlock(&id_tbl
->lock
);
651 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
657 if (id
>= id_tbl
->max
)
660 clear_bit(id
, id_tbl
->table
);
663 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
670 for (i
= 0; i
< dma
->num_pages
; i
++) {
671 if (dma
->pg_arr
[i
]) {
672 dma_free_coherent(&dev
->pcidev
->dev
, BCM_PAGE_SIZE
,
673 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
674 dma
->pg_arr
[i
] = NULL
;
678 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
679 dma
->pgtbl
, dma
->pgtbl_map
);
687 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
690 u32
*page_table
= dma
->pgtbl
;
692 for (i
= 0; i
< dma
->num_pages
; i
++) {
693 /* Each entry needs to be in big endian format. */
694 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
696 *page_table
= (u32
) dma
->pg_map_arr
[i
];
701 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
704 u32
*page_table
= dma
->pgtbl
;
706 for (i
= 0; i
< dma
->num_pages
; i
++) {
707 /* Each entry needs to be in little endian format. */
708 *page_table
= dma
->pg_map_arr
[i
] & 0xffffffff;
710 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
715 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
716 int pages
, int use_pg_tbl
)
719 struct cnic_local
*cp
= dev
->cnic_priv
;
721 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
722 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
723 if (dma
->pg_arr
== NULL
)
726 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
727 dma
->num_pages
= pages
;
729 for (i
= 0; i
< pages
; i
++) {
730 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
734 if (dma
->pg_arr
[i
] == NULL
)
740 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
741 ~(BCM_PAGE_SIZE
- 1);
742 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
743 &dma
->pgtbl_map
, GFP_ATOMIC
);
744 if (dma
->pgtbl
== NULL
)
747 cp
->setup_pgtbl(dev
, dma
);
752 cnic_free_dma(dev
, dma
);
756 static void cnic_free_context(struct cnic_dev
*dev
)
758 struct cnic_local
*cp
= dev
->cnic_priv
;
761 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
762 if (cp
->ctx_arr
[i
].ctx
) {
763 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
765 cp
->ctx_arr
[i
].mapping
);
766 cp
->ctx_arr
[i
].ctx
= NULL
;
771 static void cnic_free_resc(struct cnic_dev
*dev
)
773 struct cnic_local
*cp
= dev
->cnic_priv
;
776 if (cp
->cnic_uinfo
) {
777 while (cp
->uio_dev
!= -1 && i
< 15) {
781 uio_unregister_device(cp
->cnic_uinfo
);
782 kfree(cp
->cnic_uinfo
);
783 cp
->cnic_uinfo
= NULL
;
787 dma_free_coherent(&dev
->pcidev
->dev
, cp
->l2_buf_size
,
788 cp
->l2_buf
, cp
->l2_buf_map
);
793 dma_free_coherent(&dev
->pcidev
->dev
, cp
->l2_ring_size
,
794 cp
->l2_ring
, cp
->l2_ring_map
);
798 cnic_free_context(dev
);
803 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
804 cnic_free_dma(dev
, &cp
->conn_buf_info
);
805 cnic_free_dma(dev
, &cp
->kwq_info
);
806 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
807 cnic_free_dma(dev
, &cp
->kcq1
.dma
);
808 kfree(cp
->iscsi_tbl
);
809 cp
->iscsi_tbl
= NULL
;
813 cnic_free_id_tbl(&cp
->cid_tbl
);
816 static int cnic_alloc_context(struct cnic_dev
*dev
)
818 struct cnic_local
*cp
= dev
->cnic_priv
;
820 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
823 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
824 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
825 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
826 sizeof(struct cnic_ctx
);
827 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
828 if (cp
->ctx_arr
== NULL
)
832 for (i
= 0; i
< 2; i
++) {
833 u32 j
, reg
, off
, lo
, hi
;
836 off
= BNX2_PG_CTX_MAP
;
838 off
= BNX2_ISCSI_CTX_MAP
;
840 reg
= cnic_reg_rd_ind(dev
, off
);
843 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
844 cp
->ctx_arr
[k
].cid
= j
;
848 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
853 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
855 dma_alloc_coherent(&dev
->pcidev
->dev
,
857 &cp
->ctx_arr
[i
].mapping
,
859 if (cp
->ctx_arr
[i
].ctx
== NULL
)
866 static int cnic_alloc_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
868 int err
, i
, is_bnx2
= 0;
871 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
))
874 err
= cnic_alloc_dma(dev
, &info
->dma
, KCQ_PAGE_CNT
, is_bnx2
);
878 kcq
= (struct kcqe
**) info
->dma
.pg_arr
;
884 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
885 struct bnx2x_bd_chain_next
*next
=
886 (struct bnx2x_bd_chain_next
*) &kcq
[i
][MAX_KCQE_CNT
];
889 if (j
>= KCQ_PAGE_CNT
)
891 next
->addr_hi
= (u64
) info
->dma
.pg_map_arr
[j
] >> 32;
892 next
->addr_lo
= info
->dma
.pg_map_arr
[j
] & 0xffffffff;
897 static int cnic_alloc_l2_rings(struct cnic_dev
*dev
, int pages
)
899 struct cnic_local
*cp
= dev
->cnic_priv
;
901 cp
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
902 cp
->l2_ring
= dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->l2_ring_size
,
904 GFP_KERNEL
| __GFP_COMP
);
908 cp
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
909 cp
->l2_buf_size
= PAGE_ALIGN(cp
->l2_buf_size
);
910 cp
->l2_buf
= dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->l2_buf_size
,
912 GFP_KERNEL
| __GFP_COMP
);
919 static int cnic_alloc_uio(struct cnic_dev
*dev
) {
920 struct cnic_local
*cp
= dev
->cnic_priv
;
921 struct uio_info
*uinfo
;
924 uinfo
= kzalloc(sizeof(*uinfo
), GFP_ATOMIC
);
928 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
929 uinfo
->mem
[0].internal_addr
= dev
->regview
;
930 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
931 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
933 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
934 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
.gen
&
936 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
937 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
939 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
941 uinfo
->name
= "bnx2_cnic";
942 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
943 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
945 uinfo
->mem
[1].size
= sizeof(*cp
->bnx2x_def_status_blk
);
947 uinfo
->name
= "bnx2x_cnic";
950 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
952 uinfo
->mem
[2].addr
= (unsigned long) cp
->l2_ring
;
953 uinfo
->mem
[2].size
= cp
->l2_ring_size
;
954 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
956 uinfo
->mem
[3].addr
= (unsigned long) cp
->l2_buf
;
957 uinfo
->mem
[3].size
= cp
->l2_buf_size
;
958 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
960 uinfo
->version
= CNIC_MODULE_VERSION
;
961 uinfo
->irq
= UIO_IRQ_CUSTOM
;
963 uinfo
->open
= cnic_uio_open
;
964 uinfo
->release
= cnic_uio_close
;
968 ret
= uio_register_device(&dev
->pcidev
->dev
, uinfo
);
974 cp
->cnic_uinfo
= uinfo
;
978 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
980 struct cnic_local
*cp
= dev
->cnic_priv
;
983 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
986 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
988 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
);
992 ret
= cnic_alloc_context(dev
);
996 ret
= cnic_alloc_l2_rings(dev
, 2);
1000 ret
= cnic_alloc_uio(dev
);
1007 cnic_free_resc(dev
);
1011 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
1013 struct cnic_local
*cp
= dev
->cnic_priv
;
1014 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
1015 int total_mem
, blks
, i
;
1017 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cp
->max_cid_space
;
1018 blks
= total_mem
/ ctx_blk_size
;
1019 if (total_mem
% ctx_blk_size
)
1022 if (blks
> cp
->ethdev
->ctx_tbl_len
)
1025 cp
->ctx_arr
= kcalloc(blks
, sizeof(struct cnic_ctx
), GFP_KERNEL
);
1026 if (cp
->ctx_arr
== NULL
)
1029 cp
->ctx_blks
= blks
;
1030 cp
->ctx_blk_size
= ctx_blk_size
;
1031 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
))
1034 cp
->ctx_align
= ctx_blk_size
;
1036 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1038 for (i
= 0; i
< blks
; i
++) {
1039 cp
->ctx_arr
[i
].ctx
=
1040 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1041 &cp
->ctx_arr
[i
].mapping
,
1043 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1046 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1047 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1048 cnic_free_context(dev
);
1049 cp
->ctx_blk_size
+= cp
->ctx_align
;
1058 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1060 struct cnic_local
*cp
= dev
->cnic_priv
;
1061 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1062 u32 start_cid
= ethdev
->starting_cid
;
1063 int i
, j
, n
, ret
, pages
;
1064 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1066 cp
->iro_arr
= ethdev
->iro_arr
;
1068 cp
->max_cid_space
= MAX_ISCSI_TBL_SZ
;
1069 cp
->iscsi_start_cid
= start_cid
;
1070 if (start_cid
< BNX2X_ISCSI_START_CID
) {
1071 u32 delta
= BNX2X_ISCSI_START_CID
- start_cid
;
1073 cp
->iscsi_start_cid
= BNX2X_ISCSI_START_CID
;
1074 cp
->max_cid_space
+= delta
;
1077 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1082 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1083 cp
->max_cid_space
, GFP_KERNEL
);
1087 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1088 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1089 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1092 pages
= PAGE_ALIGN(cp
->max_cid_space
* CNIC_KWQ16_DATA_SIZE
) /
1095 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1099 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1100 for (i
= 0, j
= 0; i
< cp
->max_cid_space
; i
++) {
1101 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1103 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1104 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1107 if ((i
% n
) == (n
- 1))
1111 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
);
1115 pages
= PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS
*
1116 BNX2X_ISCSI_CONN_BUF_SIZE
) / PAGE_SIZE
;
1117 ret
= cnic_alloc_dma(dev
, &cp
->conn_buf_info
, pages
, 1);
1121 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1122 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1126 ret
= cnic_alloc_bnx2x_context(dev
);
1130 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1132 cp
->l2_rx_ring_size
= 15;
1134 ret
= cnic_alloc_l2_rings(dev
, 4);
1138 ret
= cnic_alloc_uio(dev
);
1145 cnic_free_resc(dev
);
1149 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1151 return cp
->max_kwq_idx
-
1152 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1155 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1158 struct cnic_local
*cp
= dev
->cnic_priv
;
1159 struct kwqe
*prod_qe
;
1160 u16 prod
, sw_prod
, i
;
1162 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1163 return -EAGAIN
; /* bnx2 is down */
1165 spin_lock_bh(&cp
->cnic_ulp_lock
);
1166 if (num_wqes
> cnic_kwq_avail(cp
) &&
1167 !test_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
)) {
1168 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1172 clear_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
1174 prod
= cp
->kwq_prod_idx
;
1175 sw_prod
= prod
& MAX_KWQ_IDX
;
1176 for (i
= 0; i
< num_wqes
; i
++) {
1177 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1178 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1180 sw_prod
= prod
& MAX_KWQ_IDX
;
1182 cp
->kwq_prod_idx
= prod
;
1184 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1186 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1190 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1191 union l5cm_specific_data
*l5_data
)
1193 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1196 map
= ctx
->kwqe_data_mapping
;
1197 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1198 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1199 return ctx
->kwqe_data
;
1202 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1203 u32 type
, union l5cm_specific_data
*l5_data
)
1205 struct cnic_local
*cp
= dev
->cnic_priv
;
1206 struct l5cm_spe kwqe
;
1207 struct kwqe_16
*kwq
[1];
1210 kwqe
.hdr
.conn_and_cmd_data
=
1211 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1212 BNX2X_HW_CID(cp
, cid
)));
1213 kwqe
.hdr
.type
= cpu_to_le16(type
);
1214 kwqe
.hdr
.reserved1
= 0;
1215 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1216 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1218 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1220 spin_lock_bh(&cp
->cnic_ulp_lock
);
1221 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1222 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1230 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1231 struct kcqe
*cqes
[], u32 num_cqes
)
1233 struct cnic_local
*cp
= dev
->cnic_priv
;
1234 struct cnic_ulp_ops
*ulp_ops
;
1237 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1238 if (likely(ulp_ops
)) {
1239 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1245 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1247 struct cnic_local
*cp
= dev
->cnic_priv
;
1248 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1250 u32 pfid
= cp
->pfid
;
1252 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1253 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1254 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1255 cp
->num_iscsi_tasks
;
1256 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1257 BNX2X_ISCSI_R2TQE_SIZE
;
1258 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1259 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1260 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1261 cp
->num_cqs
= req1
->num_cqs
;
1263 if (!dev
->max_iscsi_conn
)
1266 /* init Tstorm RAM */
1267 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1269 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1271 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1272 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1273 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1274 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1275 req1
->num_tasks_per_conn
);
1277 /* init Ustorm RAM */
1278 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1279 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid
),
1280 req1
->rq_buffer_size
);
1281 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1283 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1284 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1285 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1286 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1287 req1
->num_tasks_per_conn
);
1288 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1290 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1292 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1293 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1295 /* init Xstorm RAM */
1296 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1298 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1299 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1300 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1301 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1302 req1
->num_tasks_per_conn
);
1303 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1305 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid
),
1306 req1
->num_tasks_per_conn
);
1307 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1308 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1310 /* init Cstorm RAM */
1311 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1313 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1314 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1315 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1316 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1317 req1
->num_tasks_per_conn
);
1318 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1320 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1326 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1328 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1329 struct cnic_local
*cp
= dev
->cnic_priv
;
1330 u32 pfid
= cp
->pfid
;
1331 struct iscsi_kcqe kcqe
;
1332 struct kcqe
*cqes
[1];
1334 memset(&kcqe
, 0, sizeof(kcqe
));
1335 if (!dev
->max_iscsi_conn
) {
1336 kcqe
.completion_status
=
1337 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1341 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1342 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1343 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1344 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1345 req2
->error_bit_map
[1]);
1347 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1348 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1349 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1350 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1351 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1352 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1353 req2
->error_bit_map
[1]);
1355 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1356 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1358 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1361 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1362 cqes
[0] = (struct kcqe
*) &kcqe
;
1363 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1368 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1370 struct cnic_local
*cp
= dev
->cnic_priv
;
1371 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1373 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1374 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1376 cnic_free_dma(dev
, &iscsi
->hq_info
);
1377 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1378 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1380 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1384 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1388 struct cnic_local
*cp
= dev
->cnic_priv
;
1389 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1390 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1392 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1399 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1401 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1405 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1406 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1410 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1411 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1418 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1422 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1423 struct regpair
*ctx_addr
)
1425 struct cnic_local
*cp
= dev
->cnic_priv
;
1426 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1427 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1428 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1429 unsigned long align_off
= 0;
1433 if (cp
->ctx_align
) {
1434 unsigned long mask
= cp
->ctx_align
- 1;
1436 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1437 align_off
= cp
->ctx_align
-
1438 (cp
->ctx_arr
[blk
].mapping
& mask
);
1440 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1441 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1442 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1443 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1445 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1447 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1448 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1452 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1455 struct cnic_local
*cp
= dev
->cnic_priv
;
1456 struct iscsi_kwqe_conn_offload1
*req1
=
1457 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1458 struct iscsi_kwqe_conn_offload2
*req2
=
1459 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1460 struct iscsi_kwqe_conn_offload3
*req3
;
1461 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1462 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1464 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
1465 struct iscsi_context
*ictx
;
1466 struct regpair context_addr
;
1467 int i
, j
, n
= 2, n_max
;
1470 if (!req2
->num_additional_wqes
)
1473 n_max
= req2
->num_additional_wqes
+ 2;
1475 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1479 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1481 ictx
->xstorm_ag_context
.hq_prod
= 1;
1483 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1484 ISCSI_DEF_FIRST_BURST_LEN
;
1485 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1486 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1487 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1488 req1
->sq_page_table_addr_lo
;
1489 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1490 req1
->sq_page_table_addr_hi
;
1491 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1492 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1493 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1494 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1495 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1496 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1497 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1498 iscsi
->hq_info
.pgtbl
[0];
1499 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1500 iscsi
->hq_info
.pgtbl
[1];
1501 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1502 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1503 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1504 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1505 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1506 iscsi
->r2tq_info
.pgtbl
[0];
1507 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1508 iscsi
->r2tq_info
.pgtbl
[1];
1509 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1510 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1511 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1512 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1513 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1514 BNX2X_ISCSI_PBL_NOT_CACHED
;
1515 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1516 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1517 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1518 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1520 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1521 /* TSTORM requires the base address of RQ DB & not PTE */
1522 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1523 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1524 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1525 req2
->rq_page_table_addr_hi
;
1526 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1527 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1528 ictx
->tstorm_st_context
.tcp
.flags2
|=
1529 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1530 ictx
->tstorm_st_context
.tcp
.ooo_support_mode
=
1531 TCP_TSTORM_OOO_DROP_AND_PROC_ACK
;
1533 ictx
->timers_context
.flags
|= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1535 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1536 req2
->rq_page_table_addr_lo
;
1537 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1538 req2
->rq_page_table_addr_hi
;
1539 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1540 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1541 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1542 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1543 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1544 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1545 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1546 iscsi
->r2tq_info
.pgtbl
[0];
1547 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1548 iscsi
->r2tq_info
.pgtbl
[1];
1549 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1550 req1
->cq_page_table_addr_lo
;
1551 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1552 req1
->cq_page_table_addr_hi
;
1553 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1554 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1555 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1556 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1557 BNX2X_ISCSI_PBL_NOT_CACHED
;
1558 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1559 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1561 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1565 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1568 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1569 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1570 req3
->qp_first_pte
[j
].hi
;
1571 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1572 req3
->qp_first_pte
[j
].lo
;
1575 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1576 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1577 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1578 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1579 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1580 iscsi
->task_array_info
.pgtbl
[0];
1581 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1582 iscsi
->task_array_info
.pgtbl
[1];
1583 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1584 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1585 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1586 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1587 ISCSI_DEF_MAX_BURST_LEN
;
1588 ictx
->ustorm_st_context
.negotiated_rx
|=
1589 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1590 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1592 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1593 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1594 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1595 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1596 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1597 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1598 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1599 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1600 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1601 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1602 /* CSTORM and USTORM initialization is different, CSTORM requires
1603 * CQ DB base & not PTE addr */
1604 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1605 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1606 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1607 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1608 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1609 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1610 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1612 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1616 ictx
->xstorm_ag_context
.cdu_reserved
=
1617 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1618 ISCSI_CONNECTION_TYPE
);
1619 ictx
->ustorm_ag_context
.cdu_usage
=
1620 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1621 ISCSI_CONNECTION_TYPE
);
1626 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1629 struct iscsi_kwqe_conn_offload1
*req1
;
1630 struct iscsi_kwqe_conn_offload2
*req2
;
1631 struct cnic_local
*cp
= dev
->cnic_priv
;
1632 struct iscsi_kcqe kcqe
;
1633 struct kcqe
*cqes
[1];
1642 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1643 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1644 if ((num
- 2) < req2
->num_additional_wqes
) {
1648 *work
= 2 + req2
->num_additional_wqes
;;
1650 l5_cid
= req1
->iscsi_conn_id
;
1651 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1654 memset(&kcqe
, 0, sizeof(kcqe
));
1655 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1656 kcqe
.iscsi_conn_id
= l5_cid
;
1657 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1659 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1660 atomic_dec(&cp
->iscsi_conn
);
1664 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1666 atomic_dec(&cp
->iscsi_conn
);
1670 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1672 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1673 atomic_dec(&cp
->iscsi_conn
);
1677 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1678 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
, cp
->ctx_tbl
[l5_cid
].cid
);
1681 cqes
[0] = (struct kcqe
*) &kcqe
;
1682 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1687 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1689 struct cnic_local
*cp
= dev
->cnic_priv
;
1690 struct iscsi_kwqe_conn_update
*req
=
1691 (struct iscsi_kwqe_conn_update
*) kwqe
;
1693 union l5cm_specific_data l5_data
;
1694 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1697 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1700 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1704 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1706 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1707 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1711 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev
*dev
, u32 l5_cid
)
1713 struct cnic_local
*cp
= dev
->cnic_priv
;
1714 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1715 union l5cm_specific_data l5_data
;
1719 init_waitqueue_head(&ctx
->waitq
);
1721 memset(&l5_data
, 0, sizeof(l5_data
));
1722 hw_cid
= BNX2X_HW_CID(cp
, ctx
->cid
);
1723 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
1724 & SPE_HDR_CONN_TYPE
;
1725 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
1726 SPE_HDR_FUNCTION_ID
);
1728 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
1729 hw_cid
, type
, &l5_data
);
1732 wait_event(ctx
->waitq
, ctx
->wait_cond
);
1737 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1739 struct cnic_local
*cp
= dev
->cnic_priv
;
1740 struct iscsi_kwqe_conn_destroy
*req
=
1741 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1742 u32 l5_cid
= req
->reserved0
;
1743 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1745 struct iscsi_kcqe kcqe
;
1746 struct kcqe
*cqes
[1];
1748 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
1749 goto skip_cfc_delete
;
1751 while (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
)))
1754 ret
= cnic_bnx2x_destroy_ramrod(dev
, l5_cid
);
1757 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1759 atomic_dec(&cp
->iscsi_conn
);
1761 memset(&kcqe
, 0, sizeof(kcqe
));
1762 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1763 kcqe
.iscsi_conn_id
= l5_cid
;
1764 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1765 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1767 cqes
[0] = (struct kcqe
*) &kcqe
;
1768 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1773 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
1774 struct l4_kwq_connect_req1
*kwqe1
,
1775 struct l4_kwq_connect_req3
*kwqe3
,
1776 struct l5cm_active_conn_buffer
*conn_buf
)
1778 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
1779 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
1780 &conn_buf
->xstorm_conn_buffer
;
1781 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
1782 &conn_buf
->tstorm_conn_buffer
;
1783 struct regpair context_addr
;
1784 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
1785 struct in6_addr src_ip
, dst_ip
;
1789 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
1790 for (i
= 0; i
< 4; i
++, addrp
++)
1791 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1793 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
1794 for (i
= 0; i
< 4; i
++, addrp
++)
1795 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1797 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
1799 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
1800 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
1801 xstorm_buf
->mss
= 0xffff;
1802 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1803 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
1804 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
1805 xstorm_buf
->pseudo_header_checksum
=
1806 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
1808 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
1809 tstorm_buf
->params
|=
1810 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
1811 if (kwqe3
->ka_timeout
) {
1812 tstorm_buf
->ka_enable
= 1;
1813 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
1814 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
1815 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
1817 tstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1818 tstorm_buf
->snd_buf
= kwqe3
->snd_buf
;
1819 tstorm_buf
->max_rt_time
= 0xffffffff;
1822 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
1824 struct cnic_local
*cp
= dev
->cnic_priv
;
1825 u32 pfid
= cp
->pfid
;
1826 u8
*mac
= dev
->mac_addr
;
1828 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1829 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid
), mac
[0]);
1830 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1831 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid
), mac
[1]);
1832 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1833 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid
), mac
[2]);
1834 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1835 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid
), mac
[3]);
1836 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1837 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid
), mac
[4]);
1838 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1839 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid
), mac
[5]);
1841 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1842 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[5]);
1843 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1844 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
1846 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1847 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[3]);
1848 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1849 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
1851 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1852 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 2,
1854 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1855 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 3,
1859 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
1861 struct cnic_local
*cp
= dev
->cnic_priv
;
1862 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
1863 u16 tstorm_flags
= 0;
1866 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1867 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
1870 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1871 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), xstorm_flags
);
1873 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1874 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), tstorm_flags
);
1877 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1880 struct cnic_local
*cp
= dev
->cnic_priv
;
1881 struct l4_kwq_connect_req1
*kwqe1
=
1882 (struct l4_kwq_connect_req1
*) wqes
[0];
1883 struct l4_kwq_connect_req3
*kwqe3
;
1884 struct l5cm_active_conn_buffer
*conn_buf
;
1885 struct l5cm_conn_addr_params
*conn_addr
;
1886 union l5cm_specific_data l5_data
;
1887 u32 l5_cid
= kwqe1
->pg_cid
;
1888 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1889 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1897 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
1907 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
1908 netdev_err(dev
->netdev
, "conn_buf size too big\n");
1911 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1915 memset(conn_buf
, 0, sizeof(*conn_buf
));
1917 conn_addr
= &conn_buf
->conn_addr_buf
;
1918 conn_addr
->remote_addr_0
= csk
->ha
[0];
1919 conn_addr
->remote_addr_1
= csk
->ha
[1];
1920 conn_addr
->remote_addr_2
= csk
->ha
[2];
1921 conn_addr
->remote_addr_3
= csk
->ha
[3];
1922 conn_addr
->remote_addr_4
= csk
->ha
[4];
1923 conn_addr
->remote_addr_5
= csk
->ha
[5];
1925 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
1926 struct l4_kwq_connect_req2
*kwqe2
=
1927 (struct l4_kwq_connect_req2
*) wqes
[1];
1929 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
1930 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
1931 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
1933 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
1934 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
1935 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
1936 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
1938 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
1940 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
1941 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
1942 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
1943 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
1945 conn_addr
->pmtu
= kwqe3
->pmtu
;
1946 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
1948 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1949 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->pfid
), csk
->vlan_id
);
1951 cnic_bnx2x_set_tcp_timestamp(dev
,
1952 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
1954 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
1955 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1957 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
1962 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1964 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
1965 union l5cm_specific_data l5_data
;
1968 memset(&l5_data
, 0, sizeof(l5_data
));
1969 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
1970 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1974 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1976 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
1977 union l5cm_specific_data l5_data
;
1980 memset(&l5_data
, 0, sizeof(l5_data
));
1981 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
1982 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1985 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1987 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
1989 struct kcqe
*cqes
[1];
1991 memset(&kcqe
, 0, sizeof(kcqe
));
1992 kcqe
.pg_host_opaque
= req
->host_opaque
;
1993 kcqe
.pg_cid
= req
->host_opaque
;
1994 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
1995 cqes
[0] = (struct kcqe
*) &kcqe
;
1996 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2000 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2002 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
2004 struct kcqe
*cqes
[1];
2006 memset(&kcqe
, 0, sizeof(kcqe
));
2007 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
2008 kcqe
.pg_cid
= req
->pg_cid
;
2009 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
2010 cqes
[0] = (struct kcqe
*) &kcqe
;
2011 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2015 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2022 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2023 return -EAGAIN
; /* bnx2 is down */
2025 for (i
= 0; i
< num_wqes
; ) {
2027 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2031 case ISCSI_KWQE_OPCODE_INIT1
:
2032 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
2034 case ISCSI_KWQE_OPCODE_INIT2
:
2035 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
2037 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
2038 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2039 num_wqes
- i
, &work
);
2041 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2042 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2044 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2045 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2047 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2048 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2051 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2052 ret
= cnic_bnx2x_close(dev
, kwqe
);
2054 case L4_KWQE_OPCODE_VALUE_RESET
:
2055 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2057 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2058 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2060 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2061 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2063 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2068 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2073 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2080 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2082 struct cnic_local
*cp
= dev
->cnic_priv
;
2088 struct cnic_ulp_ops
*ulp_ops
;
2090 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2091 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
2093 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2096 while (j
< num_cqes
) {
2097 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2099 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
2102 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2107 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2108 ulp_type
= CNIC_ULP_RDMA
;
2109 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2110 ulp_type
= CNIC_ULP_ISCSI
;
2111 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2112 ulp_type
= CNIC_ULP_L4
;
2113 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2116 netdev_err(dev
->netdev
, "Unknown type of KCQE(0x%x)\n",
2122 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2123 if (likely(ulp_ops
)) {
2124 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2125 cp
->completed_kcq
+ i
, j
);
2134 cnic_spq_completion(dev
, DRV_CTL_RET_L5_SPQ_CREDIT_CMD
, comp
);
2137 static u16
cnic_bnx2_next_idx(u16 idx
)
2142 static u16
cnic_bnx2_hw_idx(u16 idx
)
2147 static u16
cnic_bnx2x_next_idx(u16 idx
)
2150 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2156 static u16
cnic_bnx2x_hw_idx(u16 idx
)
2158 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
2163 static int cnic_get_kcqes(struct cnic_dev
*dev
, struct kcq_info
*info
)
2165 struct cnic_local
*cp
= dev
->cnic_priv
;
2166 u16 i
, ri
, hw_prod
, last
;
2168 int kcqe_cnt
= 0, last_cnt
= 0;
2170 i
= ri
= last
= info
->sw_prod_idx
;
2172 hw_prod
= *info
->hw_prod_idx_ptr
;
2173 hw_prod
= cp
->hw_idx(hw_prod
);
2175 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2176 kcqe
= &info
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2177 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2178 i
= cp
->next_idx(i
);
2179 ri
= i
& MAX_KCQ_IDX
;
2180 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2181 last_cnt
= kcqe_cnt
;
2186 info
->sw_prod_idx
= last
;
2190 static int cnic_l2_completion(struct cnic_local
*cp
)
2192 u16 hw_cons
, sw_cons
;
2193 union eth_rx_cqe
*cqe
, *cqe_ring
= (union eth_rx_cqe
*)
2194 (cp
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
2198 if (!test_bit(CNIC_F_BNX2X_CLASS
, &cp
->dev
->flags
))
2201 hw_cons
= *cp
->rx_cons_ptr
;
2202 if ((hw_cons
& BNX2X_MAX_RCQ_DESC_CNT
) == BNX2X_MAX_RCQ_DESC_CNT
)
2205 sw_cons
= cp
->rx_cons
;
2206 while (sw_cons
!= hw_cons
) {
2209 cqe
= &cqe_ring
[sw_cons
& BNX2X_MAX_RCQ_DESC_CNT
];
2210 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
2211 if (cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
) {
2212 cmd
= le32_to_cpu(cqe
->ramrod_cqe
.conn_and_cmd_data
);
2213 cmd
>>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT
;
2214 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
||
2215 cmd
== RAMROD_CMD_ID_ETH_HALT
)
2218 sw_cons
= BNX2X_NEXT_RCQE(sw_cons
);
2223 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2225 u16 rx_cons
, tx_cons
;
2228 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
2231 rx_cons
= *cp
->rx_cons_ptr
;
2232 tx_cons
= *cp
->tx_cons_ptr
;
2233 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2234 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
2235 comp
= cnic_l2_completion(cp
);
2237 cp
->tx_cons
= tx_cons
;
2238 cp
->rx_cons
= rx_cons
;
2240 uio_event_notify(cp
->cnic_uinfo
);
2243 clear_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
2246 static u32
cnic_service_bnx2_queues(struct cnic_dev
*dev
)
2248 struct cnic_local
*cp
= dev
->cnic_priv
;
2249 u32 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2252 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2254 while ((kcqe_cnt
= cnic_get_kcqes(dev
, &cp
->kcq1
))) {
2256 service_kcqes(dev
, kcqe_cnt
);
2258 /* Tell compiler that status_blk fields can change. */
2260 if (status_idx
!= *cp
->kcq1
.status_idx_ptr
) {
2261 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2262 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2267 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
);
2269 cnic_chk_pkt_rings(cp
);
2274 static int cnic_service_bnx2(void *data
, void *status_blk
)
2276 struct cnic_dev
*dev
= data
;
2277 struct cnic_local
*cp
= dev
->cnic_priv
;
2278 u32 status_idx
= *cp
->kcq1
.status_idx_ptr
;
2280 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2283 return cnic_service_bnx2_queues(dev
);
2286 static void cnic_service_bnx2_msix(unsigned long data
)
2288 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2289 struct cnic_local
*cp
= dev
->cnic_priv
;
2291 cp
->last_status_idx
= cnic_service_bnx2_queues(dev
);
2293 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2294 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2297 static void cnic_doirq(struct cnic_dev
*dev
)
2299 struct cnic_local
*cp
= dev
->cnic_priv
;
2300 u16 prod
= cp
->kcq1
.sw_prod_idx
& MAX_KCQ_IDX
;
2302 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2303 prefetch(cp
->status_blk
.gen
);
2304 prefetch(&cp
->kcq1
.kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2306 tasklet_schedule(&cp
->cnic_irq_task
);
2310 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
2312 struct cnic_dev
*dev
= dev_instance
;
2313 struct cnic_local
*cp
= dev
->cnic_priv
;
2323 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
2324 u16 index
, u8 op
, u8 update
)
2326 struct cnic_local
*cp
= dev
->cnic_priv
;
2327 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
2328 COMMAND_REG_INT_ACK
);
2329 struct igu_ack_register igu_ack
;
2331 igu_ack
.status_block_index
= index
;
2332 igu_ack
.sb_id_and_flags
=
2333 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
2334 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
2335 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
2336 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
2338 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
2341 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
2343 struct cnic_local
*cp
= dev
->cnic_priv
;
2345 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, CSTORM_ID
, 0,
2346 IGU_INT_DISABLE
, 0);
2349 static u32
cnic_service_bnx2x_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
2351 u32 last_status
= *info
->status_idx_ptr
;
2354 while ((kcqe_cnt
= cnic_get_kcqes(dev
, info
))) {
2356 service_kcqes(dev
, kcqe_cnt
);
2358 /* Tell compiler that sblk fields can change. */
2360 if (last_status
== *info
->status_idx_ptr
)
2363 last_status
= *info
->status_idx_ptr
;
2368 static void cnic_service_bnx2x_bh(unsigned long data
)
2370 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2371 struct cnic_local
*cp
= dev
->cnic_priv
;
2374 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
2377 status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq1
);
2379 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
+ MAX_KCQ_IDX
);
2380 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, USTORM_ID
,
2381 status_idx
, IGU_INT_ENABLE
, 1);
2384 static int cnic_service_bnx2x(void *data
, void *status_blk
)
2386 struct cnic_dev
*dev
= data
;
2387 struct cnic_local
*cp
= dev
->cnic_priv
;
2389 if (!(cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2392 cnic_chk_pkt_rings(cp
);
2397 static void cnic_ulp_stop(struct cnic_dev
*dev
)
2399 struct cnic_local
*cp
= dev
->cnic_priv
;
2403 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
2405 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2406 struct cnic_ulp_ops
*ulp_ops
;
2408 mutex_lock(&cnic_lock
);
2409 ulp_ops
= cp
->ulp_ops
[if_type
];
2411 mutex_unlock(&cnic_lock
);
2414 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2415 mutex_unlock(&cnic_lock
);
2417 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2418 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
2420 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2424 static void cnic_ulp_start(struct cnic_dev
*dev
)
2426 struct cnic_local
*cp
= dev
->cnic_priv
;
2429 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2430 struct cnic_ulp_ops
*ulp_ops
;
2432 mutex_lock(&cnic_lock
);
2433 ulp_ops
= cp
->ulp_ops
[if_type
];
2434 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
2435 mutex_unlock(&cnic_lock
);
2438 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2439 mutex_unlock(&cnic_lock
);
2441 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
2442 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
2444 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
2448 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
2450 struct cnic_dev
*dev
= data
;
2452 switch (info
->cmd
) {
2453 case CNIC_CTL_STOP_CMD
:
2461 case CNIC_CTL_START_CMD
:
2464 if (!cnic_start_hw(dev
))
2465 cnic_ulp_start(dev
);
2469 case CNIC_CTL_COMPLETION_CMD
: {
2470 u32 cid
= BNX2X_SW_CID(info
->data
.comp
.cid
);
2472 struct cnic_local
*cp
= dev
->cnic_priv
;
2474 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
2475 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2478 wake_up(&ctx
->waitq
);
2488 static void cnic_ulp_init(struct cnic_dev
*dev
)
2491 struct cnic_local
*cp
= dev
->cnic_priv
;
2493 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2494 struct cnic_ulp_ops
*ulp_ops
;
2496 mutex_lock(&cnic_lock
);
2497 ulp_ops
= cnic_ulp_tbl
[i
];
2498 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
2499 mutex_unlock(&cnic_lock
);
2503 mutex_unlock(&cnic_lock
);
2505 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2506 ulp_ops
->cnic_init(dev
);
2512 static void cnic_ulp_exit(struct cnic_dev
*dev
)
2515 struct cnic_local
*cp
= dev
->cnic_priv
;
2517 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
2518 struct cnic_ulp_ops
*ulp_ops
;
2520 mutex_lock(&cnic_lock
);
2521 ulp_ops
= cnic_ulp_tbl
[i
];
2522 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
2523 mutex_unlock(&cnic_lock
);
2527 mutex_unlock(&cnic_lock
);
2529 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
2530 ulp_ops
->cnic_exit(dev
);
2536 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
2538 struct cnic_dev
*dev
= csk
->dev
;
2539 struct l4_kwq_offload_pg
*l4kwqe
;
2540 struct kwqe
*wqes
[1];
2542 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
2543 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2544 wqes
[0] = (struct kwqe
*) l4kwqe
;
2546 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
2548 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
2549 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
2551 l4kwqe
->da0
= csk
->ha
[0];
2552 l4kwqe
->da1
= csk
->ha
[1];
2553 l4kwqe
->da2
= csk
->ha
[2];
2554 l4kwqe
->da3
= csk
->ha
[3];
2555 l4kwqe
->da4
= csk
->ha
[4];
2556 l4kwqe
->da5
= csk
->ha
[5];
2558 l4kwqe
->sa0
= dev
->mac_addr
[0];
2559 l4kwqe
->sa1
= dev
->mac_addr
[1];
2560 l4kwqe
->sa2
= dev
->mac_addr
[2];
2561 l4kwqe
->sa3
= dev
->mac_addr
[3];
2562 l4kwqe
->sa4
= dev
->mac_addr
[4];
2563 l4kwqe
->sa5
= dev
->mac_addr
[5];
2565 l4kwqe
->etype
= ETH_P_IP
;
2566 l4kwqe
->ipid_start
= DEF_IPID_START
;
2567 l4kwqe
->host_opaque
= csk
->l5_cid
;
2570 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
2571 l4kwqe
->vlan_tag
= csk
->vlan_id
;
2572 l4kwqe
->l2hdr_nbytes
+= 4;
2575 return dev
->submit_kwqes(dev
, wqes
, 1);
2578 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
2580 struct cnic_dev
*dev
= csk
->dev
;
2581 struct l4_kwq_update_pg
*l4kwqe
;
2582 struct kwqe
*wqes
[1];
2584 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
2585 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2586 wqes
[0] = (struct kwqe
*) l4kwqe
;
2588 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
2590 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
2591 l4kwqe
->pg_cid
= csk
->pg_cid
;
2593 l4kwqe
->da0
= csk
->ha
[0];
2594 l4kwqe
->da1
= csk
->ha
[1];
2595 l4kwqe
->da2
= csk
->ha
[2];
2596 l4kwqe
->da3
= csk
->ha
[3];
2597 l4kwqe
->da4
= csk
->ha
[4];
2598 l4kwqe
->da5
= csk
->ha
[5];
2600 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
2601 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
2603 return dev
->submit_kwqes(dev
, wqes
, 1);
2606 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
2608 struct cnic_dev
*dev
= csk
->dev
;
2609 struct l4_kwq_upload
*l4kwqe
;
2610 struct kwqe
*wqes
[1];
2612 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
2613 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2614 wqes
[0] = (struct kwqe
*) l4kwqe
;
2616 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
2618 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
2619 l4kwqe
->cid
= csk
->pg_cid
;
2621 return dev
->submit_kwqes(dev
, wqes
, 1);
2624 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
2626 struct cnic_dev
*dev
= csk
->dev
;
2627 struct l4_kwq_connect_req1
*l4kwqe1
;
2628 struct l4_kwq_connect_req2
*l4kwqe2
;
2629 struct l4_kwq_connect_req3
*l4kwqe3
;
2630 struct kwqe
*wqes
[3];
2634 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
2635 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
2636 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
2637 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
2638 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
2639 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
2641 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
2643 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
2644 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
2645 l4kwqe3
->ka_interval
= csk
->ka_interval
;
2646 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
2647 l4kwqe3
->tos
= csk
->tos
;
2648 l4kwqe3
->ttl
= csk
->ttl
;
2649 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
2650 l4kwqe3
->pmtu
= csk
->mtu
;
2651 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
2652 l4kwqe3
->snd_buf
= csk
->snd_buf
;
2653 l4kwqe3
->seed
= csk
->seed
;
2655 wqes
[0] = (struct kwqe
*) l4kwqe1
;
2656 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
2657 wqes
[1] = (struct kwqe
*) l4kwqe2
;
2658 wqes
[2] = (struct kwqe
*) l4kwqe3
;
2661 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
2662 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
2664 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
2665 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
2666 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
2667 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
2668 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
2669 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
2670 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
2671 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
2672 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
2673 sizeof(struct tcphdr
);
2675 wqes
[1] = (struct kwqe
*) l4kwqe3
;
2676 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
2677 sizeof(struct tcphdr
);
2680 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
2682 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
2683 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
2684 l4kwqe1
->cid
= csk
->cid
;
2685 l4kwqe1
->pg_cid
= csk
->pg_cid
;
2686 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
2687 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
2688 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
2689 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
2690 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
2691 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
2692 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
2693 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
2694 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
2695 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
2696 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
2697 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
2698 if (csk
->tcp_flags
& SK_TCP_SACK
)
2699 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
2700 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
2701 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
2703 l4kwqe1
->tcp_flags
= tcp_flags
;
2705 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
2708 static int cnic_cm_close_req(struct cnic_sock
*csk
)
2710 struct cnic_dev
*dev
= csk
->dev
;
2711 struct l4_kwq_close_req
*l4kwqe
;
2712 struct kwqe
*wqes
[1];
2714 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
2715 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2716 wqes
[0] = (struct kwqe
*) l4kwqe
;
2718 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
2719 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
2720 l4kwqe
->cid
= csk
->cid
;
2722 return dev
->submit_kwqes(dev
, wqes
, 1);
2725 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
2727 struct cnic_dev
*dev
= csk
->dev
;
2728 struct l4_kwq_reset_req
*l4kwqe
;
2729 struct kwqe
*wqes
[1];
2731 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
2732 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
2733 wqes
[0] = (struct kwqe
*) l4kwqe
;
2735 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
2736 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
2737 l4kwqe
->cid
= csk
->cid
;
2739 return dev
->submit_kwqes(dev
, wqes
, 1);
2742 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
2743 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
2745 struct cnic_local
*cp
= dev
->cnic_priv
;
2746 struct cnic_sock
*csk1
;
2748 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
2751 csk1
= &cp
->csk_tbl
[l5_cid
];
2752 if (atomic_read(&csk1
->ref_count
))
2755 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
2760 csk1
->l5_cid
= l5_cid
;
2761 csk1
->ulp_type
= ulp_type
;
2762 csk1
->context
= context
;
2764 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
2765 csk1
->ka_interval
= DEF_KA_INTERVAL
;
2766 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
2767 csk1
->tos
= DEF_TOS
;
2768 csk1
->ttl
= DEF_TTL
;
2769 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
2770 csk1
->rcv_buf
= DEF_RCV_BUF
;
2771 csk1
->snd_buf
= DEF_SND_BUF
;
2772 csk1
->seed
= DEF_SEED
;
2778 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
2780 if (csk
->src_port
) {
2781 struct cnic_dev
*dev
= csk
->dev
;
2782 struct cnic_local
*cp
= dev
->cnic_priv
;
2784 cnic_free_id(&cp
->csk_port_tbl
, csk
->src_port
);
2789 static void cnic_close_conn(struct cnic_sock
*csk
)
2791 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
2792 cnic_cm_upload_pg(csk
);
2793 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
2795 cnic_cm_cleanup(csk
);
2798 static int cnic_cm_destroy(struct cnic_sock
*csk
)
2800 if (!cnic_in_use(csk
))
2804 clear_bit(SK_F_INUSE
, &csk
->flags
);
2805 smp_mb__after_clear_bit();
2806 while (atomic_read(&csk
->ref_count
) != 1)
2808 cnic_cm_cleanup(csk
);
2815 static inline u16
cnic_get_vlan(struct net_device
*dev
,
2816 struct net_device
**vlan_dev
)
2818 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
2819 *vlan_dev
= vlan_dev_real_dev(dev
);
2820 return vlan_dev_vlan_id(dev
);
2826 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
2827 struct dst_entry
**dst
)
2829 #if defined(CONFIG_INET)
2834 memset(&fl
, 0, sizeof(fl
));
2835 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
2837 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
2842 return -ENETUNREACH
;
2846 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
2847 struct dst_entry
**dst
)
2849 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
2852 memset(&fl
, 0, sizeof(fl
));
2853 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
2854 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
2855 fl
.oif
= dst_addr
->sin6_scope_id
;
2857 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
2862 return -ENETUNREACH
;
2865 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
2868 struct cnic_dev
*dev
= NULL
;
2869 struct dst_entry
*dst
;
2870 struct net_device
*netdev
= NULL
;
2871 int err
= -ENETUNREACH
;
2873 if (dst_addr
->sin_family
== AF_INET
)
2874 err
= cnic_get_v4_route(dst_addr
, &dst
);
2875 else if (dst_addr
->sin_family
== AF_INET6
) {
2876 struct sockaddr_in6
*dst_addr6
=
2877 (struct sockaddr_in6
*) dst_addr
;
2879 err
= cnic_get_v6_route(dst_addr6
, &dst
);
2889 cnic_get_vlan(dst
->dev
, &netdev
);
2891 dev
= cnic_from_netdev(netdev
);
2900 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2902 struct cnic_dev
*dev
= csk
->dev
;
2903 struct cnic_local
*cp
= dev
->cnic_priv
;
2905 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
2908 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2910 struct cnic_dev
*dev
= csk
->dev
;
2911 struct cnic_local
*cp
= dev
->cnic_priv
;
2913 struct dst_entry
*dst
= NULL
;
2914 struct net_device
*realdev
;
2917 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
2918 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
2920 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
2921 saddr
->remote
.v4
.sin_family
== AF_INET
)
2926 clear_bit(SK_F_IPV6
, &csk
->flags
);
2929 set_bit(SK_F_IPV6
, &csk
->flags
);
2930 cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
2932 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
2933 sizeof(struct in6_addr
));
2934 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
2935 local_port
= saddr
->local
.v6
.sin6_port
;
2938 cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
2940 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
2941 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
2942 local_port
= saddr
->local
.v4
.sin_port
;
2946 csk
->mtu
= dev
->netdev
->mtu
;
2947 if (dst
&& dst
->dev
) {
2948 u16 vlan
= cnic_get_vlan(dst
->dev
, &realdev
);
2949 if (realdev
== dev
->netdev
) {
2950 csk
->vlan_id
= vlan
;
2951 csk
->mtu
= dst_mtu(dst
);
2955 if (local_port
>= CNIC_LOCAL_PORT_MIN
&&
2956 local_port
< CNIC_LOCAL_PORT_MAX
) {
2957 if (cnic_alloc_id(&cp
->csk_port_tbl
, local_port
))
2963 local_port
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
2964 if (local_port
== -1) {
2969 csk
->src_port
= local_port
;
2976 static void cnic_init_csk_state(struct cnic_sock
*csk
)
2979 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
2980 clear_bit(SK_F_CLOSING
, &csk
->flags
);
2983 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
2987 if (!cnic_in_use(csk
))
2990 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
2993 cnic_init_csk_state(csk
);
2995 err
= cnic_get_route(csk
, saddr
);
2999 err
= cnic_resolve_addr(csk
, saddr
);
3004 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3008 static int cnic_cm_abort(struct cnic_sock
*csk
)
3010 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3011 u32 opcode
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
3013 if (!cnic_in_use(csk
))
3016 if (cnic_abort_prep(csk
))
3017 return cnic_cm_abort_req(csk
);
3019 /* Getting here means that we haven't started connect, or
3020 * connect was not successful.
3023 cp
->close_conn(csk
, opcode
);
3024 if (csk
->state
!= opcode
)
3030 static int cnic_cm_close(struct cnic_sock
*csk
)
3032 if (!cnic_in_use(csk
))
3035 if (cnic_close_prep(csk
)) {
3036 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
3037 return cnic_cm_close_req(csk
);
3044 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
3047 struct cnic_ulp_ops
*ulp_ops
;
3048 int ulp_type
= csk
->ulp_type
;
3051 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3053 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3054 ulp_ops
->cm_connect_complete(csk
);
3055 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3056 ulp_ops
->cm_close_complete(csk
);
3057 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3058 ulp_ops
->cm_remote_abort(csk
);
3059 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3060 ulp_ops
->cm_abort_complete(csk
);
3061 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3062 ulp_ops
->cm_remote_close(csk
);
3067 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3069 if (cnic_offld_prep(csk
)) {
3070 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3071 cnic_cm_update_pg(csk
);
3073 cnic_cm_offload_pg(csk
);
3078 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3080 struct cnic_local
*cp
= dev
->cnic_priv
;
3081 u32 l5_cid
= kcqe
->pg_host_opaque
;
3082 u8 opcode
= kcqe
->op_code
;
3083 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3086 if (!cnic_in_use(csk
))
3089 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3090 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3093 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3094 if (kcqe
->status
== L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL
) {
3095 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3096 cnic_cm_upcall(cp
, csk
,
3097 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3101 csk
->pg_cid
= kcqe
->pg_cid
;
3102 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3103 cnic_cm_conn_req(csk
);
3109 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3111 struct cnic_local
*cp
= dev
->cnic_priv
;
3112 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3113 u8 opcode
= l4kcqe
->op_code
;
3115 struct cnic_sock
*csk
;
3117 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3118 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3119 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3123 l5_cid
= l4kcqe
->conn_id
;
3125 l5_cid
= l4kcqe
->cid
;
3126 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3129 csk
= &cp
->csk_tbl
[l5_cid
];
3132 if (!cnic_in_use(csk
)) {
3138 case L5CM_RAMROD_CMD_ID_TCP_CONNECT
:
3139 if (l4kcqe
->status
!= 0) {
3140 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3141 cnic_cm_upcall(cp
, csk
,
3142 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3145 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
3146 if (l4kcqe
->status
== 0)
3147 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
3149 smp_mb__before_clear_bit();
3150 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3151 cnic_cm_upcall(cp
, csk
, opcode
);
3154 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3155 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3156 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3157 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3158 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3159 cp
->close_conn(csk
, opcode
);
3162 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
3163 cnic_cm_upcall(cp
, csk
, opcode
);
3169 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
3171 struct cnic_dev
*dev
= data
;
3174 for (i
= 0; i
< num
; i
++)
3175 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
3178 static struct cnic_ulp_ops cm_ulp_ops
= {
3179 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
3182 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
3184 struct cnic_local
*cp
= dev
->cnic_priv
;
3188 cnic_free_id_tbl(&cp
->csk_port_tbl
);
3191 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
3193 struct cnic_local
*cp
= dev
->cnic_priv
;
3195 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
3200 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
3201 CNIC_LOCAL_PORT_MIN
)) {
3202 cnic_cm_free_mem(dev
);
3208 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
3210 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
3211 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3212 opcode
= L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
;
3213 csk
->state
= opcode
;
3216 /* 1. If event opcode matches the expected event in csk->state
3217 * 2. If the expected event is CLOSE_COMP, we accept any event
3218 * 3. If the expected event is 0, meaning the connection was never
3219 * never established, we accept the opcode from cm_abort.
3221 if (opcode
== csk
->state
|| csk
->state
== 0 ||
3222 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
) {
3223 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
)) {
3224 if (csk
->state
== 0)
3225 csk
->state
= opcode
;
3232 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
3234 struct cnic_dev
*dev
= csk
->dev
;
3235 struct cnic_local
*cp
= dev
->cnic_priv
;
3237 if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
) {
3238 cnic_cm_upcall(cp
, csk
, opcode
);
3242 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3243 cnic_close_conn(csk
);
3244 csk
->state
= opcode
;
3245 cnic_cm_upcall(cp
, csk
, opcode
);
3248 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
3252 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
3256 get_random_bytes(&seed
, 4);
3257 cnic_ctx_wr(dev
, 45, 0, seed
);
3261 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
3263 struct cnic_dev
*dev
= csk
->dev
;
3264 struct cnic_local
*cp
= dev
->cnic_priv
;
3265 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
3266 union l5cm_specific_data l5_data
;
3268 int close_complete
= 0;
3271 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3272 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3273 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3274 if (cnic_ready_to_close(csk
, opcode
)) {
3275 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3276 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
3281 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3282 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
3284 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3289 memset(&l5_data
, 0, sizeof(l5_data
));
3291 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
3293 } else if (close_complete
) {
3294 ctx
->timestamp
= jiffies
;
3295 cnic_close_conn(csk
);
3296 cnic_cm_upcall(cp
, csk
, csk
->state
);
3300 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
3304 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
3306 struct cnic_local
*cp
= dev
->cnic_priv
;
3307 u32 pfid
= cp
->pfid
;
3308 u32 port
= CNIC_PORT(cp
);
3310 cnic_init_bnx2x_mac(dev
);
3311 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
3313 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
3314 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid
), 0);
3316 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3317 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port
), 1);
3318 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3319 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port
),
3322 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3323 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid
), DEF_TTL
);
3324 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3325 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid
), DEF_TOS
);
3326 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
3327 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid
), 2);
3328 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
3329 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid
), DEF_SWS_TIMER
);
3331 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(pfid
),
3336 static int cnic_cm_open(struct cnic_dev
*dev
)
3338 struct cnic_local
*cp
= dev
->cnic_priv
;
3341 err
= cnic_cm_alloc_mem(dev
);
3345 err
= cp
->start_cm(dev
);
3350 dev
->cm_create
= cnic_cm_create
;
3351 dev
->cm_destroy
= cnic_cm_destroy
;
3352 dev
->cm_connect
= cnic_cm_connect
;
3353 dev
->cm_abort
= cnic_cm_abort
;
3354 dev
->cm_close
= cnic_cm_close
;
3355 dev
->cm_select_dev
= cnic_cm_select_dev
;
3357 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
3358 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
3362 cnic_cm_free_mem(dev
);
3366 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
3368 struct cnic_local
*cp
= dev
->cnic_priv
;
3376 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
3377 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
3379 clear_bit(SK_F_INUSE
, &csk
->flags
);
3380 cnic_cm_cleanup(csk
);
3382 cnic_cm_free_mem(dev
);
3387 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
3392 cid_addr
= GET_CID_ADDR(cid
);
3394 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
3395 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
3398 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
3400 struct cnic_local
*cp
= dev
->cnic_priv
;
3402 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
3404 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3407 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3409 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
3412 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
3414 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
3415 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
3416 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
3417 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
3418 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
3419 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
3420 for (j
= 0; j
< 10; j
++) {
3422 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
3423 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
3427 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
3435 static void cnic_free_irq(struct cnic_dev
*dev
)
3437 struct cnic_local
*cp
= dev
->cnic_priv
;
3438 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3440 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3441 cp
->disable_int_sync(dev
);
3442 tasklet_kill(&cp
->cnic_irq_task
);
3443 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
3447 static int cnic_request_irq(struct cnic_dev
*dev
)
3449 struct cnic_local
*cp
= dev
->cnic_priv
;
3450 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3453 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0, "cnic", dev
);
3455 tasklet_disable(&cp
->cnic_irq_task
);
3460 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
3462 struct cnic_local
*cp
= dev
->cnic_priv
;
3463 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3465 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3467 int sblk_num
= cp
->status_blk_num
;
3468 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
3469 BNX2_HC_SB_CONFIG_1
;
3471 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
3473 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
3474 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
3475 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
3477 cp
->last_status_idx
= cp
->status_blk
.bnx2
->status_idx
;
3478 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
3479 (unsigned long) dev
);
3480 err
= cnic_request_irq(dev
);
3484 while (cp
->status_blk
.bnx2
->status_completion_producer_index
&&
3486 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
3487 1 << (11 + sblk_num
));
3492 if (cp
->status_blk
.bnx2
->status_completion_producer_index
) {
3498 struct status_block
*sblk
= cp
->status_blk
.gen
;
3499 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
3502 while (sblk
->status_completion_producer_index
&& i
< 10) {
3503 CNIC_WR(dev
, BNX2_HC_COMMAND
,
3504 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3509 if (sblk
->status_completion_producer_index
)
3516 netdev_err(dev
->netdev
, "KCQ index not resetting to 0\n");
3520 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
3522 struct cnic_local
*cp
= dev
->cnic_priv
;
3523 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3525 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3528 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3529 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
3532 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
3534 struct cnic_local
*cp
= dev
->cnic_priv
;
3535 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3537 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3540 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3541 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3542 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
3543 synchronize_irq(ethdev
->irq_arr
[0].vector
);
3546 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
3548 struct cnic_local
*cp
= dev
->cnic_priv
;
3549 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3550 u32 cid_addr
, tx_cid
, sb_id
;
3551 u32 val
, offset0
, offset1
, offset2
, offset3
;
3555 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3557 sb_id
= cp
->status_blk_num
;
3559 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
3560 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3561 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3563 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
3564 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
3566 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
3568 cp
->tx_cons
= *cp
->tx_cons_ptr
;
3570 cid_addr
= GET_CID_ADDR(tx_cid
);
3571 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
3572 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
3574 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
3575 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
3577 offset0
= BNX2_L2CTX_TYPE_XI
;
3578 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3579 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3580 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3582 cnic_init_context(dev
, tx_cid
);
3583 cnic_init_context(dev
, tx_cid
+ 1);
3585 offset0
= BNX2_L2CTX_TYPE
;
3586 offset1
= BNX2_L2CTX_CMD_TYPE
;
3587 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3588 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3590 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3591 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
3593 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3594 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
3596 txbd
= (struct tx_bd
*) cp
->l2_ring
;
3598 buf_map
= cp
->l2_buf_map
;
3599 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
3600 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3601 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3603 val
= (u64
) cp
->l2_ring_map
>> 32;
3604 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
3605 txbd
->tx_bd_haddr_hi
= val
;
3607 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
3608 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
3609 txbd
->tx_bd_haddr_lo
= val
;
3612 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
3614 struct cnic_local
*cp
= dev
->cnic_priv
;
3615 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3616 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
3619 struct status_block
*s_blk
= cp
->status_blk
.gen
;
3621 sb_id
= cp
->status_blk_num
;
3622 cnic_init_context(dev
, 2);
3623 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
3624 coal_reg
= BNX2_HC_COMMAND
;
3625 coal_val
= CNIC_RD(dev
, coal_reg
);
3626 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3627 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
3629 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
3630 coal_reg
= BNX2_HC_COALESCE_NOW
;
3631 coal_val
= 1 << (11 + sb_id
);
3634 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
3635 CNIC_WR(dev
, coal_reg
, coal_val
);
3640 cp
->rx_cons
= *cp
->rx_cons_ptr
;
3642 cid_addr
= GET_CID_ADDR(2);
3643 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
3644 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
3645 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
3648 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
3650 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
3651 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
3653 rxbd
= (struct rx_bd
*) (cp
->l2_ring
+ BCM_PAGE_SIZE
);
3654 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
3656 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
3658 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
3659 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
3660 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3661 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
3662 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
3664 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
3665 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3666 rxbd
->rx_bd_haddr_hi
= val
;
3668 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
3669 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3670 rxbd
->rx_bd_haddr_lo
= val
;
3672 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
3673 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
3676 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
3678 struct kwqe
*wqes
[1], l2kwqe
;
3680 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
3682 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
3683 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
3684 KWQE_OPCODE_SHIFT
) | 2;
3685 dev
->submit_kwqes(dev
, wqes
, 1);
3688 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
3690 struct cnic_local
*cp
= dev
->cnic_priv
;
3693 val
= cp
->func
<< 2;
3695 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
3697 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3698 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
3699 dev
->mac_addr
[0] = (u8
) (val
>> 8);
3700 dev
->mac_addr
[1] = (u8
) val
;
3702 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
3704 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
3705 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
3706 dev
->mac_addr
[2] = (u8
) (val
>> 24);
3707 dev
->mac_addr
[3] = (u8
) (val
>> 16);
3708 dev
->mac_addr
[4] = (u8
) (val
>> 8);
3709 dev
->mac_addr
[5] = (u8
) val
;
3711 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
3713 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
3714 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
3715 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
3717 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
3718 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
3719 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
3722 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
3724 struct cnic_local
*cp
= dev
->cnic_priv
;
3725 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3726 struct status_block
*sblk
= cp
->status_blk
.gen
;
3727 u32 val
, kcq_cid_addr
, kwq_cid_addr
;
3730 cnic_set_bnx2_mac(dev
);
3732 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
3733 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3734 if (BCM_PAGE_BITS
> 12)
3735 val
|= (12 - 8) << 4;
3737 val
|= (BCM_PAGE_BITS
- 8) << 4;
3739 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
3741 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
3742 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
3743 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
3745 err
= cnic_setup_5709_context(dev
, 1);
3749 cnic_init_context(dev
, KWQ_CID
);
3750 cnic_init_context(dev
, KCQ_CID
);
3752 kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
3753 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3755 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
3756 cp
->kwq_prod_idx
= 0;
3757 cp
->kwq_con_idx
= 0;
3758 set_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
3760 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
3761 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
3763 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
3765 /* Initialize the kernel work queue context. */
3766 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3767 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3768 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3770 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
3771 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3773 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
3774 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3776 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
3777 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3779 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
3780 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3782 kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
3783 cp
->kcq1
.io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
3785 cp
->kcq1
.sw_prod_idx
= 0;
3786 cp
->kcq1
.hw_prod_idx_ptr
=
3787 (u16
*) &sblk
->status_completion_producer_index
;
3789 cp
->kcq1
.status_idx_ptr
= (u16
*) &sblk
->status_idx
;
3791 /* Initialize the kernel complete queue context. */
3792 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
3793 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
3794 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
3796 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
3797 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
3799 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
3800 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
3802 val
= (u32
) ((u64
) cp
->kcq1
.dma
.pgtbl_map
>> 32);
3803 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
3805 val
= (u32
) cp
->kcq1
.dma
.pgtbl_map
;
3806 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
3809 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
3810 struct status_block_msix
*msblk
= cp
->status_blk
.bnx2
;
3811 u32 sb_id
= cp
->status_blk_num
;
3812 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
3814 cp
->kcq1
.hw_prod_idx_ptr
=
3815 (u16
*) &msblk
->status_completion_producer_index
;
3816 cp
->kcq1
.status_idx_ptr
= (u16
*) &msblk
->status_idx
;
3817 cp
->kwq_con_idx_ptr
= (u16
*) &msblk
->status_cmd_consumer_index
;
3818 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
3819 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
3820 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
3823 /* Enable Commnad Scheduler notification when we write to the
3824 * host producer index of the kernel contexts. */
3825 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
3827 /* Enable Command Scheduler notification when we write to either
3828 * the Send Queue or Receive Queue producer indexes of the kernel
3829 * bypass contexts. */
3830 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
3831 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
3833 /* Notify COM when the driver post an application buffer. */
3834 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
3836 /* Set the CP and COM doorbells. These two processors polls the
3837 * doorbell for a non zero value before running. This must be done
3838 * after setting up the kernel queue contexts. */
3839 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
3840 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
3842 cnic_init_bnx2_tx_ring(dev
);
3843 cnic_init_bnx2_rx_ring(dev
);
3845 err
= cnic_init_bnx2_irq(dev
);
3847 netdev_err(dev
->netdev
, "cnic_init_irq failed\n");
3848 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
3849 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
3856 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
3858 struct cnic_local
*cp
= dev
->cnic_priv
;
3859 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3860 u32 start_offset
= ethdev
->ctx_tbl_offset
;
3863 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
3864 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
3865 dma_addr_t map
= ctx
->mapping
;
3867 if (cp
->ctx_align
) {
3868 unsigned long mask
= cp
->ctx_align
- 1;
3870 map
= (map
+ mask
) & ~mask
;
3873 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
3877 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
3879 struct cnic_local
*cp
= dev
->cnic_priv
;
3880 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
3883 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
3884 (unsigned long) dev
);
3885 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
3886 err
= cnic_request_irq(dev
);
3891 static inline void cnic_storm_memset_hc_disable(struct cnic_dev
*dev
,
3892 u16 sb_id
, u8 sb_index
,
3896 u32 addr
= BAR_CSTRORM_INTMEM
+
3897 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
3898 offsetof(struct hc_status_block_data_e1x
, index_data
) +
3899 sizeof(struct hc_index_data
)*sb_index
+
3900 offsetof(struct hc_index_data
, flags
);
3901 u16 flags
= CNIC_RD16(dev
, addr
);
3903 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
3904 flags
|= (((~disable
) << HC_INDEX_DATA_HC_ENABLED_SHIFT
) &
3905 HC_INDEX_DATA_HC_ENABLED
);
3906 CNIC_WR16(dev
, addr
, flags
);
3909 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
3911 struct cnic_local
*cp
= dev
->cnic_priv
;
3912 u8 sb_id
= cp
->status_blk_num
;
3914 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
3915 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
3916 offsetof(struct hc_status_block_data_e1x
, index_data
) +
3917 sizeof(struct hc_index_data
)*HC_INDEX_ISCSI_EQ_CONS
+
3918 offsetof(struct hc_index_data
, timeout
), 64 / 12);
3919 cnic_storm_memset_hc_disable(dev
, sb_id
, HC_INDEX_ISCSI_EQ_CONS
, 0);
3922 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
3926 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
,
3927 struct client_init_ramrod_data
*data
)
3929 struct cnic_local
*cp
= dev
->cnic_priv
;
3930 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) cp
->l2_ring
;
3931 dma_addr_t buf_map
, ring_map
= cp
->l2_ring_map
;
3932 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
3933 int port
= CNIC_PORT(cp
);
3935 int cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
3938 memset(txbd
, 0, BCM_PAGE_SIZE
);
3940 buf_map
= cp
->l2_buf_map
;
3941 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
3942 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
3943 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
3945 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
3946 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
3947 reg_bd
->addr_hi
= start_bd
->addr_hi
;
3948 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
3949 start_bd
->nbytes
= cpu_to_le16(0x10);
3950 start_bd
->nbd
= cpu_to_le16(3);
3951 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3952 start_bd
->general_data
= (UNICAST_ADDRESS
<<
3953 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
3954 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
3958 val
= (u64
) ring_map
>> 32;
3959 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
3961 data
->tx
.tx_bd_page_base
.hi
= cpu_to_le32(val
);
3963 val
= (u64
) ring_map
& 0xffffffff;
3964 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
3966 data
->tx
.tx_bd_page_base
.lo
= cpu_to_le32(val
);
3968 /* Other ramrod params */
3969 data
->tx
.tx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_CQ_CONS
;
3970 data
->tx
.tx_status_block_id
= BNX2X_DEF_SB_ID
;
3972 /* reset xstorm per client statistics */
3973 if (cli
< MAX_STAT_COUNTER_ID
) {
3974 val
= BAR_XSTRORM_INTMEM
+
3975 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
3976 for (i
= 0; i
< sizeof(struct xstorm_per_client_stats
) / 4; i
++)
3977 CNIC_WR(dev
, val
+ i
* 4, 0);
3981 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_CQ_CONS
];
3984 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
,
3985 struct client_init_ramrod_data
*data
)
3987 struct cnic_local
*cp
= dev
->cnic_priv
;
3988 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (cp
->l2_ring
+
3990 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
3991 (cp
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
3992 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
3994 int port
= CNIC_PORT(cp
);
3995 int cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
3996 int cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
3998 dma_addr_t ring_map
= cp
->l2_ring_map
;
4001 data
->general
.client_id
= cli
;
4002 data
->general
.statistics_en_flg
= 1;
4003 data
->general
.statistics_counter_id
= cli
;
4004 data
->general
.activate_flg
= 1;
4005 data
->general
.sp_client_id
= cli
;
4007 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4009 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4011 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4012 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4013 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4016 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
4017 rxbd
->addr_hi
= cpu_to_le32(val
);
4018 data
->rx
.bd_page_base
.hi
= cpu_to_le32(val
);
4020 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
4021 rxbd
->addr_lo
= cpu_to_le32(val
);
4022 data
->rx
.bd_page_base
.lo
= cpu_to_le32(val
);
4024 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
4025 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) >> 32;
4026 rxcqe
->addr_hi
= cpu_to_le32(val
);
4027 data
->rx
.cqe_page_base
.hi
= cpu_to_le32(val
);
4029 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) & 0xffffffff;
4030 rxcqe
->addr_lo
= cpu_to_le32(val
);
4031 data
->rx
.cqe_page_base
.lo
= cpu_to_le32(val
);
4033 /* Other ramrod params */
4034 data
->rx
.client_qzone_id
= cl_qzone_id
;
4035 data
->rx
.rx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
;
4036 data
->rx
.status_block_id
= BNX2X_DEF_SB_ID
;
4038 data
->rx
.cache_line_alignment_log_size
= L1_CACHE_SHIFT
;
4039 data
->rx
.bd_buff_size
= cpu_to_le16(cp
->l2_single_buf_size
);
4041 data
->rx
.mtu
= cpu_to_le16(cp
->l2_single_buf_size
- 14);
4042 data
->rx
.outer_vlan_removal_enable_flg
= 1;
4044 /* reset tstorm and ustorm per client statistics */
4045 if (cli
< MAX_STAT_COUNTER_ID
) {
4046 val
= BAR_TSTRORM_INTMEM
+
4047 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4048 for (i
= 0; i
< sizeof(struct tstorm_per_client_stats
) / 4; i
++)
4049 CNIC_WR(dev
, val
+ i
* 4, 0);
4051 val
= BAR_USTRORM_INTMEM
+
4052 USTORM_PER_COUNTER_ID_STATS_OFFSET(port
, cli
);
4053 for (i
= 0; i
< sizeof(struct ustorm_per_client_stats
) / 4; i
++)
4054 CNIC_WR(dev
, val
+ i
* 4, 0);
4058 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
];
4061 static void cnic_get_bnx2x_iscsi_info(struct cnic_dev
*dev
)
4063 struct cnic_local
*cp
= dev
->cnic_priv
;
4064 u32 base
, addr
, val
;
4065 int port
= CNIC_PORT(cp
);
4067 dev
->max_iscsi_conn
= 0;
4068 base
= CNIC_RD(dev
, MISC_REG_SHARED_MEM_ADDR
);
4072 addr
= BNX2X_SHMEM_ADDR(base
,
4073 dev_info
.port_hw_config
[port
].iscsi_mac_upper
);
4075 val
= CNIC_RD(dev
, addr
);
4077 dev
->mac_addr
[0] = (u8
) (val
>> 8);
4078 dev
->mac_addr
[1] = (u8
) val
;
4080 addr
= BNX2X_SHMEM_ADDR(base
,
4081 dev_info
.port_hw_config
[port
].iscsi_mac_lower
);
4083 val
= CNIC_RD(dev
, addr
);
4085 dev
->mac_addr
[2] = (u8
) (val
>> 24);
4086 dev
->mac_addr
[3] = (u8
) (val
>> 16);
4087 dev
->mac_addr
[4] = (u8
) (val
>> 8);
4088 dev
->mac_addr
[5] = (u8
) val
;
4090 addr
= BNX2X_SHMEM_ADDR(base
, validity_map
[port
]);
4091 val
= CNIC_RD(dev
, addr
);
4093 if (!(val
& SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT
)) {
4096 addr
= BNX2X_SHMEM_ADDR(base
,
4097 drv_lic_key
[port
].max_iscsi_init_conn
);
4098 val16
= CNIC_RD16(dev
, addr
);
4102 dev
->max_iscsi_conn
= val16
;
4104 if (BNX2X_CHIP_IS_E1H(cp
->chip_id
)) {
4105 int func
= CNIC_FUNC(cp
);
4108 mf_cfg_addr
= base
+ BNX2X_SHMEM_MF_BLK_OFFSET
;
4110 addr
= mf_cfg_addr
+
4111 offsetof(struct mf_cfg
, func_mf_config
[func
].e1hov_tag
);
4113 val
= CNIC_RD(dev
, addr
);
4114 val
&= FUNC_MF_CFG_E1HOV_TAG_MASK
;
4115 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
4116 addr
= mf_cfg_addr
+
4117 offsetof(struct mf_cfg
,
4118 func_mf_config
[func
].config
);
4119 val
= CNIC_RD(dev
, addr
);
4120 val
&= FUNC_MF_CFG_PROTOCOL_MASK
;
4121 if (val
!= FUNC_MF_CFG_PROTOCOL_ISCSI
)
4122 dev
->max_iscsi_conn
= 0;
4127 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
4129 struct cnic_local
*cp
= dev
->cnic_priv
;
4130 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4131 int func
= CNIC_FUNC(cp
), ret
, i
;
4133 struct host_hc_status_block_e1x
*sb
= cp
->status_blk
.gen
;
4138 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
4139 cp
->iscsi_start_cid
);
4144 cp
->bnx2x_igu_sb_id
= ethdev
->irq_arr
[0].status_blk_num2
;
4146 cp
->kcq1
.io_addr
= BAR_CSTRORM_INTMEM
+
4147 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid
, 0);
4148 cp
->kcq1
.sw_prod_idx
= 0;
4150 cp
->kcq1
.hw_prod_idx_ptr
=
4151 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4152 cp
->kcq1
.status_idx_ptr
=
4153 &sb
->sb
.running_index
[SM_RX_ID
];
4155 cnic_get_bnx2x_iscsi_info(dev
);
4158 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, MAX_KCQ_IDX
);
4159 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4160 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid
, 0), 0);
4161 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4162 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0),
4163 cp
->kcq1
.dma
.pg_map_arr
[1] & 0xffffffff);
4164 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4165 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0) + 4,
4166 (u64
) cp
->kcq1
.dma
.pg_map_arr
[1] >> 32);
4167 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4168 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0),
4169 cp
->kcq1
.dma
.pg_map_arr
[0] & 0xffffffff);
4170 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4171 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0) + 4,
4172 (u64
) cp
->kcq1
.dma
.pg_map_arr
[0] >> 32);
4173 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4174 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid
, 0), 1);
4175 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
4176 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid
, 0), cp
->status_blk_num
);
4177 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4178 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid
, 0),
4179 HC_INDEX_ISCSI_EQ_CONS
);
4181 for (i
= 0; i
< cp
->conn_buf_info
.num_pages
; i
++) {
4182 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4183 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid
, i
),
4184 cp
->conn_buf_info
.pgtbl
[2 * i
]);
4185 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4186 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid
, i
) + 4,
4187 cp
->conn_buf_info
.pgtbl
[(2 * i
) + 1]);
4190 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4191 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
),
4192 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
4193 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
4194 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
) + 4,
4195 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
4197 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
4198 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid
), DEF_RCV_BUF
);
4200 cnic_setup_bnx2x_context(dev
);
4202 ret
= cnic_init_bnx2x_irq(dev
);
4209 static void cnic_init_rings(struct cnic_dev
*dev
)
4211 struct cnic_local
*cp
= dev
->cnic_priv
;
4213 if (test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4216 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4217 cnic_init_bnx2_tx_ring(dev
);
4218 cnic_init_bnx2_rx_ring(dev
);
4219 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4220 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4221 u32 cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
4222 u32 cl_qzone_id
, type
;
4223 struct client_init_ramrod_data
*data
;
4224 union l5cm_specific_data l5_data
;
4225 struct ustorm_eth_rx_producers rx_prods
= {0};
4228 rx_prods
.bd_prod
= 0;
4229 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
4232 cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4234 off
= BAR_USTRORM_INTMEM
+
4235 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp
), cli
);
4237 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
4238 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
4240 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4244 memset(data
, 0, sizeof(*data
));
4246 cnic_init_bnx2x_tx_ring(dev
, data
);
4247 cnic_init_bnx2x_rx_ring(dev
, data
);
4249 l5_data
.phy_address
.lo
= cp
->l2_buf_map
& 0xffffffff;
4250 l5_data
.phy_address
.hi
= (u64
) cp
->l2_buf_map
>> 32;
4252 type
= (ETH_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
4253 & SPE_HDR_CONN_TYPE
;
4254 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
4255 SPE_HDR_FUNCTION_ID
);
4257 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4259 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
4260 BNX2X_ISCSI_L2_CID
, type
, &l5_data
);
4263 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
4267 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
4268 netdev_err(dev
->netdev
,
4269 "iSCSI CLIENT_SETUP did not complete\n");
4270 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
4271 cnic_ring_ctl(dev
, BNX2X_ISCSI_L2_CID
, cli
, 1);
4275 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
4277 struct cnic_local
*cp
= dev
->cnic_priv
;
4279 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
4282 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
4283 cnic_shutdown_bnx2_rx_ring(dev
);
4284 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
4285 struct cnic_local
*cp
= dev
->cnic_priv
;
4286 u32 cli
= BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp
));
4287 union l5cm_specific_data l5_data
;
4291 cnic_ring_ctl(dev
, BNX2X_ISCSI_L2_CID
, cli
, 0);
4293 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
4295 l5_data
.phy_address
.lo
= cli
;
4296 l5_data
.phy_address
.hi
= 0;
4297 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
4298 BNX2X_ISCSI_L2_CID
, ETH_CONNECTION_TYPE
, &l5_data
);
4300 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
4304 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
4305 netdev_err(dev
->netdev
,
4306 "iSCSI CLIENT_HALT did not complete\n");
4307 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
4309 memset(&l5_data
, 0, sizeof(l5_data
));
4310 type
= (NONE_CONNECTION_TYPE
<< SPE_HDR_CONN_TYPE_SHIFT
)
4311 & SPE_HDR_CONN_TYPE
;
4312 type
|= ((cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
4313 SPE_HDR_FUNCTION_ID
);
4314 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
4315 BNX2X_ISCSI_L2_CID
, type
, &l5_data
);
4318 clear_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
4321 static int cnic_register_netdev(struct cnic_dev
*dev
)
4323 struct cnic_local
*cp
= dev
->cnic_priv
;
4324 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4330 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
4333 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
4335 netdev_err(dev
->netdev
, "register_cnic failed\n");
4340 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
4342 struct cnic_local
*cp
= dev
->cnic_priv
;
4343 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4348 ethdev
->drv_unregister_cnic(dev
->netdev
);
4351 static int cnic_start_hw(struct cnic_dev
*dev
)
4353 struct cnic_local
*cp
= dev
->cnic_priv
;
4354 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4357 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
4360 dev
->regview
= ethdev
->io_base
;
4361 cp
->chip_id
= ethdev
->chip_id
;
4362 pci_dev_get(dev
->pcidev
);
4363 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
4364 cp
->status_blk
.gen
= ethdev
->irq_arr
[0].status_blk
;
4365 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
4367 err
= cp
->alloc_resc(dev
);
4369 netdev_err(dev
->netdev
, "allocate resource failure\n");
4373 err
= cp
->start_hw(dev
);
4377 err
= cnic_cm_open(dev
);
4381 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4383 cp
->enable_int(dev
);
4389 pci_dev_put(dev
->pcidev
);
4393 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
4395 cnic_disable_bnx2_int_sync(dev
);
4397 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4398 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4400 cnic_init_context(dev
, KWQ_CID
);
4401 cnic_init_context(dev
, KCQ_CID
);
4403 cnic_setup_5709_context(dev
, 0);
4406 cnic_free_resc(dev
);
4410 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
4412 struct cnic_local
*cp
= dev
->cnic_priv
;
4415 *cp
->kcq1
.hw_prod_idx_ptr
= 0;
4416 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
4417 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->pfid
, 0), 0);
4418 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, 0);
4419 cnic_free_resc(dev
);
4422 static void cnic_stop_hw(struct cnic_dev
*dev
)
4424 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4425 struct cnic_local
*cp
= dev
->cnic_priv
;
4428 /* Need to wait for the ring shutdown event to complete
4429 * before clearing the CNIC_UP flag.
4431 while (cp
->uio_dev
!= -1 && i
< 15) {
4435 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
4436 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
4438 cnic_cm_shutdown(dev
);
4440 pci_dev_put(dev
->pcidev
);
4444 static void cnic_free_dev(struct cnic_dev
*dev
)
4448 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
4452 if (atomic_read(&dev
->ref_count
) != 0)
4453 netdev_err(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
4455 netdev_info(dev
->netdev
, "Removed CNIC device\n");
4456 dev_put(dev
->netdev
);
4460 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
4461 struct pci_dev
*pdev
)
4463 struct cnic_dev
*cdev
;
4464 struct cnic_local
*cp
;
4467 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
4469 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
4471 netdev_err(dev
, "allocate dev struct failure\n");
4476 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
4477 cdev
->register_device
= cnic_register_device
;
4478 cdev
->unregister_device
= cnic_unregister_device
;
4479 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
4481 cp
= cdev
->cnic_priv
;
4484 cp
->l2_single_buf_size
= 0x400;
4485 cp
->l2_rx_ring_size
= 3;
4487 spin_lock_init(&cp
->cnic_ulp_lock
);
4489 netdev_info(dev
, "Added CNIC device\n");
4494 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
4496 struct pci_dev
*pdev
;
4497 struct cnic_dev
*cdev
;
4498 struct cnic_local
*cp
;
4499 struct cnic_eth_dev
*ethdev
= NULL
;
4500 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4502 probe
= symbol_get(bnx2_cnic_probe
);
4504 ethdev
= (*probe
)(dev
);
4505 symbol_put(bnx2_cnic_probe
);
4510 pdev
= ethdev
->pdev
;
4516 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
4517 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
4520 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
4528 cdev
= cnic_alloc_dev(dev
, pdev
);
4532 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
4533 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
4535 cp
= cdev
->cnic_priv
;
4536 cp
->ethdev
= ethdev
;
4537 cdev
->pcidev
= pdev
;
4539 cp
->cnic_ops
= &cnic_bnx2_ops
;
4540 cp
->start_hw
= cnic_start_bnx2_hw
;
4541 cp
->stop_hw
= cnic_stop_bnx2_hw
;
4542 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
4543 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
4544 cp
->free_resc
= cnic_free_resc
;
4545 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
4546 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
4547 cp
->enable_int
= cnic_enable_bnx2_int
;
4548 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
4549 cp
->close_conn
= cnic_close_bnx2_conn
;
4550 cp
->next_idx
= cnic_bnx2_next_idx
;
4551 cp
->hw_idx
= cnic_bnx2_hw_idx
;
4559 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
4561 struct pci_dev
*pdev
;
4562 struct cnic_dev
*cdev
;
4563 struct cnic_local
*cp
;
4564 struct cnic_eth_dev
*ethdev
= NULL
;
4565 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
4567 probe
= symbol_get(bnx2x_cnic_probe
);
4569 ethdev
= (*probe
)(dev
);
4570 symbol_put(bnx2x_cnic_probe
);
4575 pdev
= ethdev
->pdev
;
4580 cdev
= cnic_alloc_dev(dev
, pdev
);
4586 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
4587 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
4589 cp
= cdev
->cnic_priv
;
4590 cp
->ethdev
= ethdev
;
4591 cdev
->pcidev
= pdev
;
4593 cp
->cnic_ops
= &cnic_bnx2x_ops
;
4594 cp
->start_hw
= cnic_start_bnx2x_hw
;
4595 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
4596 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
4597 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
4598 cp
->free_resc
= cnic_free_resc
;
4599 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
4600 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
4601 cp
->enable_int
= cnic_enable_bnx2x_int
;
4602 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
4603 cp
->ack_int
= cnic_ack_bnx2x_msix
;
4604 cp
->close_conn
= cnic_close_bnx2x_conn
;
4605 cp
->next_idx
= cnic_bnx2x_next_idx
;
4606 cp
->hw_idx
= cnic_bnx2x_hw_idx
;
4610 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
4612 struct ethtool_drvinfo drvinfo
;
4613 struct cnic_dev
*cdev
= NULL
;
4615 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
4616 memset(&drvinfo
, 0, sizeof(drvinfo
));
4617 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
4619 if (!strcmp(drvinfo
.driver
, "bnx2"))
4620 cdev
= init_bnx2_cnic(dev
);
4621 if (!strcmp(drvinfo
.driver
, "bnx2x"))
4622 cdev
= init_bnx2x_cnic(dev
);
4624 write_lock(&cnic_dev_lock
);
4625 list_add(&cdev
->list
, &cnic_dev_list
);
4626 write_unlock(&cnic_dev_lock
);
4633 * netdev event handler
4635 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
4638 struct net_device
*netdev
= ptr
;
4639 struct cnic_dev
*dev
;
4643 dev
= cnic_from_netdev(netdev
);
4645 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
4646 /* Check for the hot-plug device */
4647 dev
= is_cnic_dev(netdev
);
4654 struct cnic_local
*cp
= dev
->cnic_priv
;
4658 else if (event
== NETDEV_UNREGISTER
)
4661 if (event
== NETDEV_UP
) {
4662 if (cnic_register_netdev(dev
) != 0) {
4666 if (!cnic_start_hw(dev
))
4667 cnic_ulp_start(dev
);
4671 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
4672 struct cnic_ulp_ops
*ulp_ops
;
4675 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
4676 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
4679 ctx
= cp
->ulp_handle
[if_type
];
4681 ulp_ops
->indicate_netevent(ctx
, event
);
4685 if (event
== NETDEV_GOING_DOWN
) {
4688 cnic_unregister_netdev(dev
);
4689 } else if (event
== NETDEV_UNREGISTER
) {
4690 write_lock(&cnic_dev_lock
);
4691 list_del_init(&dev
->list
);
4692 write_unlock(&cnic_dev_lock
);
4704 static struct notifier_block cnic_netdev_notifier
= {
4705 .notifier_call
= cnic_netdev_event
4708 static void cnic_release(void)
4710 struct cnic_dev
*dev
;
4712 while (!list_empty(&cnic_dev_list
)) {
4713 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
4714 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
4720 cnic_unregister_netdev(dev
);
4721 list_del_init(&dev
->list
);
4726 static int __init
cnic_init(void)
4730 pr_info("%s", version
);
4732 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
4741 static void __exit
cnic_exit(void)
4743 unregister_netdevice_notifier(&cnic_netdev_notifier
);
4747 module_init(cnic_init
);
4748 module_exit(cnic_exit
);