X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=drivers%2Fnet%2Fcnic.c;h=263a2944566f6948f759f029a875d6f4eb01e3ce;hb=79ae79c9aa496309ecc96a2c00b5427735dd3370;hp=1240deaf24206eb6ffc3e29983dddb803632b499;hpb=eaaa6e9c222d5c398488ed4216f0fd94e4b81759;p=deliverable%2Flinux.git diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 1240deaf2420..263a2944566f 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -322,6 +322,8 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, return 0; } +static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); + static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, char *buf, u16 len) { @@ -351,7 +353,9 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, } csk = &cp->csk_tbl[l5_cid]; csk_hold(csk); - if (cnic_in_use(csk)) { + if (cnic_in_use(csk) && + test_bit(SK_F_CONNECT_START, &csk->flags)) { + memcpy(csk->ha, path_resp->mac_addr, 6); if (test_bit(SK_F_IPV6, &csk->flags)) memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, @@ -359,8 +363,16 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, else memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, sizeof(struct in_addr)); - if (is_valid_ether_addr(csk->ha)) + + if (is_valid_ether_addr(csk->ha)) { cnic_cm_set_pg(csk); + } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && + !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + clear_bit(SK_F_CONNECT_START, &csk->flags); + } } csk_put(csk); rcu_read_unlock(); @@ -414,19 +426,6 @@ static int cnic_abort_prep(struct cnic_sock *csk) return 0; } -static void cnic_uio_stop(void) -{ - struct cnic_dev *dev; - - read_lock(&cnic_dev_lock); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); - } - read_unlock(&cnic_dev_lock); -} - int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) { struct cnic_dev *dev; @@ -498,9 +497,6 @@ int cnic_unregister_driver(int ulp_type) } read_unlock(&cnic_dev_lock); - if (ulp_type == CNIC_ULP_ISCSI) - cnic_uio_stop(); - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); @@ -584,6 +580,9 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) } mutex_unlock(&cnic_lock); + if (ulp_type == CNIC_ULP_ISCSI) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + synchronize_rcu(); while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && @@ -831,12 +830,14 @@ static void cnic_free_resc(struct cnic_dev *dev) cnic_free_dma(dev, &cp->conn_buf_info); cnic_free_dma(dev, &cp->kwq_info); cnic_free_dma(dev, &cp->kwq_16_data_info); + cnic_free_dma(dev, &cp->kcq2.dma); cnic_free_dma(dev, &cp->kcq1.dma); kfree(cp->iscsi_tbl); cp->iscsi_tbl = NULL; kfree(cp->ctx_tbl); cp->ctx_tbl = NULL; + cnic_free_id_tbl(&cp->fcoe_cid_tbl); cnic_free_id_tbl(&cp->cid_tbl); } @@ -950,7 +951,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) &udev->l2_ring_map, GFP_KERNEL | __GFP_COMP); if (!udev->l2_ring) - return -ENOMEM; + goto err_udev; udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); @@ -958,7 +959,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) &udev->l2_buf_map, GFP_KERNEL | __GFP_COMP); if (!udev->l2_buf) - return -ENOMEM; + goto err_dma; write_lock(&cnic_dev_lock); list_add(&udev->list, &cnic_udev_list); @@ -969,6 +970,12 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) cp->udev = udev; return 0; + err_dma: + dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, + udev->l2_ring, udev->l2_ring_map); + err_udev: + kfree(udev); + return -ENOMEM; } static int cnic_init_uio(struct cnic_dev *dev) @@ -1124,12 +1131,22 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) cp->iro_arr = ethdev->iro_arr; - cp->max_cid_space = MAX_ISCSI_TBL_SZ; + cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS; cp->iscsi_start_cid = start_cid; + cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; + cp->fcoe_init_cid = ethdev->fcoe_init_cid; + if (!cp->fcoe_init_cid) + cp->fcoe_init_cid = 0x10; + } + if (start_cid < BNX2X_ISCSI_START_CID) { u32 delta = BNX2X_ISCSI_START_CID - start_cid; cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; + cp->fcoe_start_cid += delta; cp->max_cid_space += delta; } @@ -1148,6 +1165,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; } + for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) + cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; + pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / PAGE_SIZE; @@ -1171,6 +1191,12 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + ret = cnic_alloc_kcq(dev, &cp->kcq2); + if (ret) + goto error; + } + pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); @@ -1264,12 +1290,18 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, struct cnic_local *cp = dev->cnic_priv; struct l5cm_spe kwqe; struct kwqe_16 *kwq[1]; + u16 type_16; int ret; kwqe.hdr.conn_and_cmd_data = cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | BNX2X_HW_CID(cp, cid))); - kwqe.hdr.type = cpu_to_le16(type); + + type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID; + + kwqe.hdr.type = cpu_to_le16(type_16); kwqe.hdr.reserved1 = 0; kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); @@ -1435,8 +1467,11 @@ static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) cnic_free_dma(dev, &iscsi->hq_info); cnic_free_dma(dev, &iscsi->r2tq_info); cnic_free_dma(dev, &iscsi->task_array_info); + cnic_free_id(&cp->cid_tbl, ctx->cid); + } else { + cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); } - cnic_free_id(&cp->cid_tbl, ctx->cid); + ctx->cid = 0; } @@ -1448,6 +1483,16 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; struct cnic_iscsi *iscsi = ctx->proto.iscsi; + if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { + cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); + if (cid == -1) { + ret = -ENOMEM; + goto error; + } + ctx->cid = cid; + return 0; + } + cid = cnic_alloc_new_id(&cp->cid_tbl); if (cid == -1) { ret = -ENOMEM; @@ -1780,19 +1825,15 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; union l5cm_specific_data l5_data; int ret; - u32 hw_cid, type; + u32 hw_cid; init_waitqueue_head(&ctx->waitq); ctx->wait_cond = 0; memset(&l5_data, 0, sizeof(l5_data)); hw_cid = BNX2X_HW_CID(cp, ctx->cid); - type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; - type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, - hw_cid, type, &l5_data); + hw_cid, NONE_CONNECTION_TYPE, &l5_data); if (ret == 0) wait_event(ctx->waitq, ctx->wait_cond); @@ -2088,8 +2129,306 @@ static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) return 0; } -static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num_wqes) +static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_stat *req; + struct fcoe_stat_ramrod_params *fcoe_stat; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + int ret; + u32 cid; + + req = (struct fcoe_kwqe_stat *) kwqe; + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + + fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_stat) + return -ENOMEM; + + memset(fcoe_stat, 0, sizeof(*fcoe_stat)); + memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); + + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret; + struct cnic_local *cp = dev->cnic_priv; + u32 cid; + struct fcoe_init_ramrod_params *fcoe_init; + struct fcoe_kwqe_init1 *req1; + struct fcoe_kwqe_init2 *req2; + struct fcoe_kwqe_init3 *req3; + union l5cm_specific_data l5_data; + + if (num < 3) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_init1 *) wqes[0]; + req2 = (struct fcoe_kwqe_init2 *) wqes[1]; + req3 = (struct fcoe_kwqe_init3 *) wqes[2]; + if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { + *work = 1; + return -EINVAL; + } + if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { + *work = 2; + return -EINVAL; + } + + if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_init size too big\n"); + return -ENOMEM; + } + fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_init) + return -ENOMEM; + + memset(fcoe_init, 0, sizeof(*fcoe_init)); + memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); + fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff; + fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32; + fcoe_init->eq_next_page_addr.lo = + cp->kcq2.dma.pg_map_arr[1] & 0xffffffff; + fcoe_init->eq_next_page_addr.hi = + (u64) cp->kcq2.dma.pg_map_arr[1] >> 32; + + fcoe_init->sb_num = cp->status_blk_num; + fcoe_init->eq_prod = MAX_KCQ_IDX; + fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; + cp->kcq2.sw_prod_idx = 0; + + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid, + FCOE_CONNECTION_TYPE, &l5_data); + *work = 3; + return ret; +} + +static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret = 0; + u32 cid = -1, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct fcoe_kwqe_conn_offload1 *req1; + struct fcoe_kwqe_conn_offload2 *req2; + struct fcoe_kwqe_conn_offload3 *req3; + struct fcoe_kwqe_conn_offload4 *req4; + struct fcoe_conn_offload_ramrod_params *fcoe_offload; + struct cnic_context *ctx; + struct fcoe_context *fctx; + struct regpair ctx_addr; + union l5cm_specific_data l5_data; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + if (num < 4) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; + req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; + req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; + req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; + + *work = 4; + + l5_cid = req1->fcoe_conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + goto err_reply; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + goto err_reply; + + ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); + if (ret) { + ret = 0; + goto err_reply; + } + cid = ctx->cid; + + fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); + if (fctx) { + u32 hw_cid = BNX2X_HW_CID(cp, cid); + u32 val; + + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, + FCOE_CONNECTION_TYPE); + fctx->xstorm_ag_context.cdu_reserved = val; + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, + FCOE_CONNECTION_TYPE); + fctx->ustorm_ag_context.cdu_usage = val; + } + if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_offload size too big\n"); + goto err_reply; + } + fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_offload) + goto err_reply; + + memset(fcoe_offload, 0, sizeof(*fcoe_offload)); + memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); + memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); + + cid = BNX2X_HW_CID(cp, cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (!ret) + set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + + return ret; + +err_reply: + if (cid != -1) + cnic_free_bnx2x_conn_resc(dev, l5_cid); + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; + kcqe.fcoe_conn_id = req1->fcoe_conn_id; + kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_enable size too big\n"); + return -ENOMEM; + } + fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_enable) + return -ENOMEM; + + memset(fcoe_enable, 0, sizeof(*fcoe_enable)); + memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_disable size too big\n"); + return -ENOMEM; + } + fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_disable) + return -ENOMEM; + + memset(fcoe_disable, 0, sizeof(*fcoe_disable)); + memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_destroy *req; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + req = (struct fcoe_kwqe_conn_destroy *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + + init_waitqueue_head(&ctx->waitq); + ctx->wait_cond = 0; + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (ret == 0) { + wait_event(ctx->waitq, ctx->wait_cond); + set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, + msecs_to_jiffies(2000)); + } + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; + kcqe.fcoe_conn_id = req->conn_id; + kcqe.fcoe_conn_context_id = cid; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_destroy *req; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + int ret; + u32 cid; + + req = (struct fcoe_kwqe_destroy *) kwqe; + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) { int i, work, ret; u32 opcode; @@ -2153,6 +2492,98 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], return 0; } +static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) +{ + struct cnic_local *cp = dev->cnic_priv; + int i, work, ret; + u32 opcode; + struct kwqe *kwqe; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710) + return -EINVAL; + + for (i = 0; i < num_wqes; ) { + kwqe = wqes[i]; + opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + work = 1; + + switch (opcode) { + case FCOE_KWQE_OPCODE_INIT1: + ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: + ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_ENABLE_CONN: + ret = cnic_bnx2x_fcoe_enable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DISABLE_CONN: + ret = cnic_bnx2x_fcoe_disable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY_CONN: + ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY: + ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_STAT: + ret = cnic_bnx2x_fcoe_stat(dev, kwqe); + break; + default: + ret = 0; + netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", + opcode); + break; + } + if (ret < 0) + netdev_err(dev->netdev, "KWQE(0x%x) failed\n", + opcode); + i += work; + } + return 0; +} + +static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + int ret = -EINVAL; + u32 layer_code; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2x is down */ + + if (!num_wqes) + return 0; + + layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; + switch (layer_code) { + case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: + case KWQE_FLAGS_LAYER_MASK_L4: + case KWQE_FLAGS_LAYER_MASK_L2: + ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); + break; + + case KWQE_FLAGS_LAYER_MASK_L5_FCOE: + ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); + break; + } + return ret; +} + +static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) +{ + if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) + return KCQE_FLAGS_LAYER_MASK_L4; + + return opflag & KCQE_FLAGS_LAYER_MASK; +} + static void service_kcqes(struct cnic_dev *dev, int num_cqes) { struct cnic_local *cp = dev->cnic_priv; @@ -2164,7 +2595,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) struct cnic_ulp_ops *ulp_ops; int ulp_type; u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; - u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; + u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) comp++; @@ -2172,7 +2603,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) while (j < num_cqes) { u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; - if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) + if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) break; if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) @@ -2184,6 +2615,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) ulp_type = CNIC_ULP_RDMA; else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) ulp_type = CNIC_ULP_ISCSI; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) + ulp_type = CNIC_ULP_FCOE; else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) ulp_type = CNIC_ULP_L4; else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) @@ -2481,12 +2914,19 @@ static void cnic_service_bnx2x_bh(unsigned long data) status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); - if (BNX2X_CHIP_IS_E2(cp->chip_id)) + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + + CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + + MAX_KCQ_IDX); + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, status_idx, IGU_INT_ENABLE, 1); - else + } else { cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, status_idx, IGU_INT_ENABLE, 1); + } } static int cnic_service_bnx2x(void *data, void *status_blk) @@ -3223,6 +3663,18 @@ done: csk_put(csk); } +static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; + u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + ctx->timestamp = jiffies; + ctx->wait_cond = 1; + wake_up(&ctx->waitq); +} + static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) { struct cnic_local *cp = dev->cnic_priv; @@ -3231,6 +3683,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) u32 l5_cid; struct cnic_sock *csk; + if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { + cnic_process_fcoe_term_conn(dev, kcqe); + return; + } if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { cnic_cm_process_offld_pg(dev, l4kcqe); @@ -3867,7 +4323,7 @@ static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) memset(&l2kwqe, 0, sizeof(l2kwqe)); wqes[0] = &l2kwqe; - l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | + l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | (L2_KWQE_OPCODE_VALUE_FLUSH << KWQE_OPCODE_SHIFT) | 2; dev->submit_kwqes(dev, wqes, 1); @@ -4310,6 +4766,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) val16 ^= 0x1e1e; dev->max_iscsi_conn = val16; } + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) + dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; + if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { int func = CNIC_FUNC(cp); u32 mf_cfg_addr; @@ -4336,6 +4796,9 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)) dev->max_iscsi_conn = 0; + if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) + dev->max_fcoe_conn = 0; + addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, func_ext_config[func]. iscsi_mac_addr_upper); @@ -4357,18 +4820,52 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) val = CNIC_RD(dev, addr); val &= FUNC_MF_CFG_E1HOV_TAG_MASK; if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { - addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, - func_mf_config[func].config); - val = CNIC_RD(dev, addr); - val &= FUNC_MF_CFG_PROTOCOL_MASK; - if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) - dev->max_iscsi_conn = 0; + dev->max_fcoe_conn = 0; + dev->max_iscsi_conn = 0; } } if (!is_valid_ether_addr(dev->mac_addr)) dev->max_iscsi_conn = 0; } +static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 pfid = cp->pfid; + + cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); + cp->kcq1.sw_prod_idx = 0; + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } else { + struct host_hc_status_block_e1x *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq2.io_addr = BAR_USTRORM_INTMEM + + USTORM_FCOE_EQ_PROD_OFFSET(pfid); + cp->kcq2.sw_prod_idx = 0; + cp->kcq2.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; + cp->kcq2.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } +} + static int cnic_start_bnx2x_hw(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; @@ -4399,28 +4896,19 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) if (ret) return -ENOMEM; - cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; - - cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); - cp->kcq1.sw_prod_idx = 0; - if (BNX2X_CHIP_IS_E2(cp->chip_id)) { - struct host_hc_status_block_e2 *sb = cp->status_blk.gen; - - cp->kcq1.hw_prod_idx_ptr = - &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; - cp->kcq1.status_idx_ptr = - &sb->sb.running_index[SM_RX_ID]; - } else { - struct host_hc_status_block_e1x *sb = cp->status_blk.gen; + ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, + BNX2X_FCOE_NUM_CONNECTIONS, + cp->fcoe_start_cid); - cp->kcq1.hw_prod_idx_ptr = - &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; - cp->kcq1.status_idx_ptr = - &sb->sb.running_index[SM_RX_ID]; + if (ret) + return -ENOMEM; } + cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; + + cnic_init_bnx2x_kcq(dev); + cnic_get_bnx2x_iscsi_info(dev); /* Only 1 EQ */ @@ -4490,7 +4978,7 @@ static void cnic_init_rings(struct cnic_dev *dev) } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { u32 cli = cp->ethdev->iscsi_l2_client_id; u32 cid = cp->ethdev->iscsi_l2_cid; - u32 cl_qzone_id, type; + u32 cl_qzone_id; struct client_init_ramrod_data *data; union l5cm_specific_data l5_data; struct ustorm_eth_rx_producers rx_prods = {0}; @@ -4522,15 +5010,10 @@ static void cnic_init_rings(struct cnic_dev *dev) l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; - type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; - type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); - set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, - cid, type, &l5_data); + cid, ETH_CONNECTION_TYPE, &l5_data); i = 0; while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && @@ -4560,7 +5043,6 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) u32 cid = cp->ethdev->iscsi_l2_cid; union l5cm_specific_data l5_data; int i; - u32 type; cnic_ring_ctl(dev, cid, cli, 0); @@ -4581,12 +5063,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); memset(&l5_data, 0, sizeof(l5_data)); - type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; - type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, - cid, type, &l5_data); + cid, NONE_CONNECTION_TYPE, &l5_data); msleep(10); } clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);