1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
40 static int isert_debug_level
;
41 module_param_named(debug_level
, isert_debug_level
, int, 0644);
42 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0 (default:0)");
44 static DEFINE_MUTEX(device_list_mutex
);
45 static LIST_HEAD(device_list
);
46 static struct workqueue_struct
*isert_comp_wq
;
47 static struct workqueue_struct
*isert_release_wq
;
50 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
52 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
53 struct isert_rdma_wr
*wr
);
55 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
57 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
58 struct isert_rdma_wr
*wr
);
60 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
);
62 isert_rdma_post_recvl(struct isert_conn
*isert_conn
);
64 isert_rdma_accept(struct isert_conn
*isert_conn
);
65 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
67 static void isert_release_work(struct work_struct
*work
);
70 isert_prot_cmd(struct isert_conn
*conn
, struct se_cmd
*cmd
)
72 return (conn
->pi_support
&&
73 cmd
->prot_op
!= TARGET_PROT_NORMAL
);
78 isert_qp_event_callback(struct ib_event
*e
, void *context
)
80 struct isert_conn
*isert_conn
= context
;
82 isert_err("%s (%d): conn %p\n",
83 ib_event_msg(e
->event
), e
->event
, isert_conn
);
86 case IB_EVENT_COMM_EST
:
87 rdma_notify(isert_conn
->cm_id
, IB_EVENT_COMM_EST
);
89 case IB_EVENT_QP_LAST_WQE_REACHED
:
90 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
97 static struct isert_comp
*
98 isert_comp_get(struct isert_conn
*isert_conn
)
100 struct isert_device
*device
= isert_conn
->device
;
101 struct isert_comp
*comp
;
104 mutex_lock(&device_list_mutex
);
105 for (i
= 0; i
< device
->comps_used
; i
++)
106 if (device
->comps
[i
].active_qps
<
107 device
->comps
[min
].active_qps
)
109 comp
= &device
->comps
[min
];
111 mutex_unlock(&device_list_mutex
);
113 isert_info("conn %p, using comp %p min_index: %d\n",
114 isert_conn
, comp
, min
);
120 isert_comp_put(struct isert_comp
*comp
)
122 mutex_lock(&device_list_mutex
);
124 mutex_unlock(&device_list_mutex
);
127 static struct ib_qp
*
128 isert_create_qp(struct isert_conn
*isert_conn
,
129 struct isert_comp
*comp
,
130 struct rdma_cm_id
*cma_id
)
132 struct isert_device
*device
= isert_conn
->device
;
133 struct ib_qp_init_attr attr
;
136 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
137 attr
.event_handler
= isert_qp_event_callback
;
138 attr
.qp_context
= isert_conn
;
139 attr
.send_cq
= comp
->cq
;
140 attr
.recv_cq
= comp
->cq
;
141 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
142 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
+ 1;
143 attr
.cap
.max_send_sge
= device
->ib_device
->attrs
.max_sge
;
144 isert_conn
->max_sge
= min(device
->ib_device
->attrs
.max_sge
,
145 device
->ib_device
->attrs
.max_sge_rd
);
146 attr
.cap
.max_recv_sge
= 1;
147 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
148 attr
.qp_type
= IB_QPT_RC
;
149 if (device
->pi_capable
)
150 attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
152 ret
= rdma_create_qp(cma_id
, device
->pd
, &attr
);
154 isert_err("rdma_create_qp failed for cma_id %d\n", ret
);
162 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
164 struct isert_comp
*comp
;
167 comp
= isert_comp_get(isert_conn
);
168 isert_conn
->qp
= isert_create_qp(isert_conn
, comp
, cma_id
);
169 if (IS_ERR(isert_conn
->qp
)) {
170 ret
= PTR_ERR(isert_conn
->qp
);
176 isert_comp_put(comp
);
181 isert_cq_event_callback(struct ib_event
*e
, void *context
)
183 isert_dbg("event: %d\n", e
->event
);
187 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
189 struct isert_device
*device
= isert_conn
->device
;
190 struct ib_device
*ib_dev
= device
->ib_device
;
191 struct iser_rx_desc
*rx_desc
;
192 struct ib_sge
*rx_sg
;
196 isert_conn
->rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
197 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
198 if (!isert_conn
->rx_descs
)
201 rx_desc
= isert_conn
->rx_descs
;
203 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
204 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
205 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
206 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
209 rx_desc
->dma_addr
= dma_addr
;
211 rx_sg
= &rx_desc
->rx_sg
;
212 rx_sg
->addr
= rx_desc
->dma_addr
;
213 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
214 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
220 rx_desc
= isert_conn
->rx_descs
;
221 for (j
= 0; j
< i
; j
++, rx_desc
++) {
222 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
223 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
225 kfree(isert_conn
->rx_descs
);
226 isert_conn
->rx_descs
= NULL
;
228 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn
);
234 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
236 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
237 struct iser_rx_desc
*rx_desc
;
240 if (!isert_conn
->rx_descs
)
243 rx_desc
= isert_conn
->rx_descs
;
244 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
245 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
246 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
249 kfree(isert_conn
->rx_descs
);
250 isert_conn
->rx_descs
= NULL
;
253 static void isert_cq_work(struct work_struct
*);
254 static void isert_cq_callback(struct ib_cq
*, void *);
257 isert_free_comps(struct isert_device
*device
)
261 for (i
= 0; i
< device
->comps_used
; i
++) {
262 struct isert_comp
*comp
= &device
->comps
[i
];
265 cancel_work_sync(&comp
->work
);
266 ib_destroy_cq(comp
->cq
);
269 kfree(device
->comps
);
273 isert_alloc_comps(struct isert_device
*device
)
275 int i
, max_cqe
, ret
= 0;
277 device
->comps_used
= min(ISERT_MAX_CQ
, min_t(int, num_online_cpus(),
278 device
->ib_device
->num_comp_vectors
));
280 isert_info("Using %d CQs, %s supports %d vectors support "
281 "Fast registration %d pi_capable %d\n",
282 device
->comps_used
, device
->ib_device
->name
,
283 device
->ib_device
->num_comp_vectors
, device
->use_fastreg
,
286 device
->comps
= kcalloc(device
->comps_used
, sizeof(struct isert_comp
),
288 if (!device
->comps
) {
289 isert_err("Unable to allocate completion contexts\n");
293 max_cqe
= min(ISER_MAX_CQ_LEN
, device
->ib_device
->attrs
.max_cqe
);
295 for (i
= 0; i
< device
->comps_used
; i
++) {
296 struct ib_cq_init_attr cq_attr
= {};
297 struct isert_comp
*comp
= &device
->comps
[i
];
299 comp
->device
= device
;
300 INIT_WORK(&comp
->work
, isert_cq_work
);
301 cq_attr
.cqe
= max_cqe
;
302 cq_attr
.comp_vector
= i
;
303 comp
->cq
= ib_create_cq(device
->ib_device
,
305 isert_cq_event_callback
,
308 if (IS_ERR(comp
->cq
)) {
309 isert_err("Unable to allocate cq\n");
310 ret
= PTR_ERR(comp
->cq
);
315 ret
= ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
);
322 isert_free_comps(device
);
327 isert_create_device_ib_res(struct isert_device
*device
)
329 struct ib_device
*ib_dev
= device
->ib_device
;
332 isert_dbg("devattr->max_sge: %d\n", ib_dev
->attrs
.max_sge
);
333 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev
->attrs
.max_sge_rd
);
335 /* asign function handlers */
336 if (ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
&&
337 ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_SIGNATURE_HANDOVER
) {
338 device
->use_fastreg
= 1;
339 device
->reg_rdma_mem
= isert_reg_rdma
;
340 device
->unreg_rdma_mem
= isert_unreg_rdma
;
342 device
->use_fastreg
= 0;
343 device
->reg_rdma_mem
= isert_map_rdma
;
344 device
->unreg_rdma_mem
= isert_unmap_cmd
;
347 ret
= isert_alloc_comps(device
);
351 device
->pd
= ib_alloc_pd(ib_dev
);
352 if (IS_ERR(device
->pd
)) {
353 ret
= PTR_ERR(device
->pd
);
354 isert_err("failed to allocate pd, device %p, ret=%d\n",
359 /* Check signature cap */
360 device
->pi_capable
= ib_dev
->attrs
.device_cap_flags
&
361 IB_DEVICE_SIGNATURE_HANDOVER
? true : false;
366 isert_free_comps(device
);
371 isert_free_device_ib_res(struct isert_device
*device
)
373 isert_info("device %p\n", device
);
375 ib_dealloc_pd(device
->pd
);
376 isert_free_comps(device
);
380 isert_device_put(struct isert_device
*device
)
382 mutex_lock(&device_list_mutex
);
384 isert_info("device %p refcount %d\n", device
, device
->refcount
);
385 if (!device
->refcount
) {
386 isert_free_device_ib_res(device
);
387 list_del(&device
->dev_node
);
390 mutex_unlock(&device_list_mutex
);
393 static struct isert_device
*
394 isert_device_get(struct rdma_cm_id
*cma_id
)
396 struct isert_device
*device
;
399 mutex_lock(&device_list_mutex
);
400 list_for_each_entry(device
, &device_list
, dev_node
) {
401 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
403 isert_info("Found iser device %p refcount %d\n",
404 device
, device
->refcount
);
405 mutex_unlock(&device_list_mutex
);
410 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
412 mutex_unlock(&device_list_mutex
);
413 return ERR_PTR(-ENOMEM
);
416 INIT_LIST_HEAD(&device
->dev_node
);
418 device
->ib_device
= cma_id
->device
;
419 ret
= isert_create_device_ib_res(device
);
422 mutex_unlock(&device_list_mutex
);
427 list_add_tail(&device
->dev_node
, &device_list
);
428 isert_info("Created a new iser device %p refcount %d\n",
429 device
, device
->refcount
);
430 mutex_unlock(&device_list_mutex
);
436 isert_conn_free_fastreg_pool(struct isert_conn
*isert_conn
)
438 struct fast_reg_descriptor
*fr_desc
, *tmp
;
441 if (list_empty(&isert_conn
->fr_pool
))
444 isert_info("Freeing conn %p fastreg pool", isert_conn
);
446 list_for_each_entry_safe(fr_desc
, tmp
,
447 &isert_conn
->fr_pool
, list
) {
448 list_del(&fr_desc
->list
);
449 ib_dereg_mr(fr_desc
->data_mr
);
450 if (fr_desc
->pi_ctx
) {
451 ib_dereg_mr(fr_desc
->pi_ctx
->prot_mr
);
452 ib_dereg_mr(fr_desc
->pi_ctx
->sig_mr
);
453 kfree(fr_desc
->pi_ctx
);
459 if (i
< isert_conn
->fr_pool_size
)
460 isert_warn("Pool still has %d regions registered\n",
461 isert_conn
->fr_pool_size
- i
);
465 isert_create_pi_ctx(struct fast_reg_descriptor
*desc
,
466 struct ib_device
*device
,
469 struct pi_context
*pi_ctx
;
472 pi_ctx
= kzalloc(sizeof(*desc
->pi_ctx
), GFP_KERNEL
);
474 isert_err("Failed to allocate pi context\n");
478 pi_ctx
->prot_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
479 ISCSI_ISER_SG_TABLESIZE
);
480 if (IS_ERR(pi_ctx
->prot_mr
)) {
481 isert_err("Failed to allocate prot frmr err=%ld\n",
482 PTR_ERR(pi_ctx
->prot_mr
));
483 ret
= PTR_ERR(pi_ctx
->prot_mr
);
486 desc
->ind
|= ISERT_PROT_KEY_VALID
;
488 pi_ctx
->sig_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_SIGNATURE
, 2);
489 if (IS_ERR(pi_ctx
->sig_mr
)) {
490 isert_err("Failed to allocate signature enabled mr err=%ld\n",
491 PTR_ERR(pi_ctx
->sig_mr
));
492 ret
= PTR_ERR(pi_ctx
->sig_mr
);
496 desc
->pi_ctx
= pi_ctx
;
497 desc
->ind
|= ISERT_SIG_KEY_VALID
;
498 desc
->ind
&= ~ISERT_PROTECTED
;
503 ib_dereg_mr(pi_ctx
->prot_mr
);
511 isert_create_fr_desc(struct ib_device
*ib_device
, struct ib_pd
*pd
,
512 struct fast_reg_descriptor
*fr_desc
)
514 fr_desc
->data_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
515 ISCSI_ISER_SG_TABLESIZE
);
516 if (IS_ERR(fr_desc
->data_mr
)) {
517 isert_err("Failed to allocate data frmr err=%ld\n",
518 PTR_ERR(fr_desc
->data_mr
));
519 return PTR_ERR(fr_desc
->data_mr
);
521 fr_desc
->ind
|= ISERT_DATA_KEY_VALID
;
523 isert_dbg("Created fr_desc %p\n", fr_desc
);
529 isert_conn_create_fastreg_pool(struct isert_conn
*isert_conn
)
531 struct fast_reg_descriptor
*fr_desc
;
532 struct isert_device
*device
= isert_conn
->device
;
533 struct se_session
*se_sess
= isert_conn
->conn
->sess
->se_sess
;
534 struct se_node_acl
*se_nacl
= se_sess
->se_node_acl
;
537 * Setup the number of FRMRs based upon the number of tags
538 * available to session in iscsi_target_locate_portal().
540 tag_num
= max_t(u32
, ISCSIT_MIN_TAGS
, se_nacl
->queue_depth
);
541 tag_num
= (tag_num
* 2) + ISCSIT_EXTRA_TAGS
;
543 isert_conn
->fr_pool_size
= 0;
544 for (i
= 0; i
< tag_num
; i
++) {
545 fr_desc
= kzalloc(sizeof(*fr_desc
), GFP_KERNEL
);
547 isert_err("Failed to allocate fast_reg descriptor\n");
552 ret
= isert_create_fr_desc(device
->ib_device
,
553 device
->pd
, fr_desc
);
555 isert_err("Failed to create fastreg descriptor err=%d\n",
561 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
562 isert_conn
->fr_pool_size
++;
565 isert_dbg("Creating conn %p fastreg pool size=%d",
566 isert_conn
, isert_conn
->fr_pool_size
);
571 isert_conn_free_fastreg_pool(isert_conn
);
576 isert_init_conn(struct isert_conn
*isert_conn
)
578 isert_conn
->state
= ISER_CONN_INIT
;
579 INIT_LIST_HEAD(&isert_conn
->node
);
580 init_completion(&isert_conn
->login_comp
);
581 init_completion(&isert_conn
->login_req_comp
);
582 init_completion(&isert_conn
->wait
);
583 kref_init(&isert_conn
->kref
);
584 mutex_init(&isert_conn
->mutex
);
585 spin_lock_init(&isert_conn
->pool_lock
);
586 INIT_LIST_HEAD(&isert_conn
->fr_pool
);
587 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
591 isert_free_login_buf(struct isert_conn
*isert_conn
)
593 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
595 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
596 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
597 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
598 ISCSI_DEF_MAX_RECV_SEG_LEN
,
600 kfree(isert_conn
->login_buf
);
604 isert_alloc_login_buf(struct isert_conn
*isert_conn
,
605 struct ib_device
*ib_dev
)
609 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
610 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
611 if (!isert_conn
->login_buf
) {
612 isert_err("Unable to allocate isert_conn->login_buf\n");
616 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
617 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
618 ISCSI_DEF_MAX_RECV_SEG_LEN
;
620 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
621 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
622 isert_conn
->login_rsp_buf
);
624 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
625 (void *)isert_conn
->login_req_buf
,
626 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
628 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
630 isert_err("login_req_dma mapping error: %d\n", ret
);
631 isert_conn
->login_req_dma
= 0;
635 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
636 (void *)isert_conn
->login_rsp_buf
,
637 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
639 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
641 isert_err("login_rsp_dma mapping error: %d\n", ret
);
642 isert_conn
->login_rsp_dma
= 0;
643 goto out_req_dma_map
;
649 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
650 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
652 kfree(isert_conn
->login_buf
);
657 isert_set_nego_params(struct isert_conn
*isert_conn
,
658 struct rdma_conn_param
*param
)
660 struct ib_device_attr
*attr
= &isert_conn
->device
->ib_device
->attrs
;
662 /* Set max inflight RDMA READ requests */
663 isert_conn
->initiator_depth
= min_t(u8
, param
->initiator_depth
,
664 attr
->max_qp_init_rd_atom
);
665 isert_dbg("Using initiator_depth: %u\n", isert_conn
->initiator_depth
);
667 if (param
->private_data
) {
668 u8 flags
= *(u8
*)param
->private_data
;
671 * use remote invalidation if the both initiator
672 * and the HCA support it
674 isert_conn
->snd_w_inv
= !(flags
& ISER_SEND_W_INV_NOT_SUP
) &&
675 (attr
->device_cap_flags
&
676 IB_DEVICE_MEM_MGT_EXTENSIONS
);
677 if (isert_conn
->snd_w_inv
)
678 isert_info("Using remote invalidation\n");
683 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
685 struct isert_np
*isert_np
= cma_id
->context
;
686 struct iscsi_np
*np
= isert_np
->np
;
687 struct isert_conn
*isert_conn
;
688 struct isert_device
*device
;
691 spin_lock_bh(&np
->np_thread_lock
);
693 spin_unlock_bh(&np
->np_thread_lock
);
694 isert_dbg("iscsi_np is not enabled, reject connect request\n");
695 return rdma_reject(cma_id
, NULL
, 0);
697 spin_unlock_bh(&np
->np_thread_lock
);
699 isert_dbg("cma_id: %p, portal: %p\n",
700 cma_id
, cma_id
->context
);
702 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
706 isert_init_conn(isert_conn
);
707 isert_conn
->cm_id
= cma_id
;
709 ret
= isert_alloc_login_buf(isert_conn
, cma_id
->device
);
713 device
= isert_device_get(cma_id
);
714 if (IS_ERR(device
)) {
715 ret
= PTR_ERR(device
);
716 goto out_rsp_dma_map
;
718 isert_conn
->device
= device
;
720 isert_set_nego_params(isert_conn
, &event
->param
.conn
);
722 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
726 ret
= isert_rdma_post_recvl(isert_conn
);
730 ret
= isert_rdma_accept(isert_conn
);
734 mutex_lock(&isert_np
->mutex
);
735 list_add_tail(&isert_conn
->node
, &isert_np
->accepted
);
736 mutex_unlock(&isert_np
->mutex
);
741 isert_device_put(device
);
743 isert_free_login_buf(isert_conn
);
746 rdma_reject(cma_id
, NULL
, 0);
751 isert_connect_release(struct isert_conn
*isert_conn
)
753 struct isert_device
*device
= isert_conn
->device
;
755 isert_dbg("conn %p\n", isert_conn
);
759 if (device
->use_fastreg
)
760 isert_conn_free_fastreg_pool(isert_conn
);
762 isert_free_rx_descriptors(isert_conn
);
763 if (isert_conn
->cm_id
)
764 rdma_destroy_id(isert_conn
->cm_id
);
766 if (isert_conn
->qp
) {
767 struct isert_comp
*comp
= isert_conn
->qp
->recv_cq
->cq_context
;
769 isert_comp_put(comp
);
770 ib_destroy_qp(isert_conn
->qp
);
773 if (isert_conn
->login_buf
)
774 isert_free_login_buf(isert_conn
);
776 isert_device_put(device
);
782 isert_connected_handler(struct rdma_cm_id
*cma_id
)
784 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
785 struct isert_np
*isert_np
= cma_id
->context
;
787 isert_info("conn %p\n", isert_conn
);
789 mutex_lock(&isert_conn
->mutex
);
790 isert_conn
->state
= ISER_CONN_UP
;
791 kref_get(&isert_conn
->kref
);
792 mutex_unlock(&isert_conn
->mutex
);
794 mutex_lock(&isert_np
->mutex
);
795 list_move_tail(&isert_conn
->node
, &isert_np
->pending
);
796 mutex_unlock(&isert_np
->mutex
);
798 isert_info("np %p: Allow accept_np to continue\n", isert_np
);
803 isert_release_kref(struct kref
*kref
)
805 struct isert_conn
*isert_conn
= container_of(kref
,
806 struct isert_conn
, kref
);
808 isert_info("conn %p final kref %s/%d\n", isert_conn
, current
->comm
,
811 isert_connect_release(isert_conn
);
815 isert_put_conn(struct isert_conn
*isert_conn
)
817 kref_put(&isert_conn
->kref
, isert_release_kref
);
821 * isert_conn_terminate() - Initiate connection termination
822 * @isert_conn: isert connection struct
825 * In case the connection state is FULL_FEATURE, move state
826 * to TEMINATING and start teardown sequence (rdma_disconnect).
827 * In case the connection state is UP, complete flush as well.
829 * This routine must be called with mutex held. Thus it is
830 * safe to call multiple times.
833 isert_conn_terminate(struct isert_conn
*isert_conn
)
837 switch (isert_conn
->state
) {
838 case ISER_CONN_TERMINATING
:
841 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
842 isert_info("Terminating conn %p state %d\n",
843 isert_conn
, isert_conn
->state
);
844 isert_conn
->state
= ISER_CONN_TERMINATING
;
845 err
= rdma_disconnect(isert_conn
->cm_id
);
847 isert_warn("Failed rdma_disconnect isert_conn %p\n",
851 isert_warn("conn %p teminating in state %d\n",
852 isert_conn
, isert_conn
->state
);
857 isert_np_cma_handler(struct isert_np
*isert_np
,
858 enum rdma_cm_event_type event
)
860 isert_dbg("%s (%d): isert np %p\n",
861 rdma_event_msg(event
), event
, isert_np
);
864 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
865 isert_np
->cm_id
= NULL
;
867 case RDMA_CM_EVENT_ADDR_CHANGE
:
868 isert_np
->cm_id
= isert_setup_id(isert_np
);
869 if (IS_ERR(isert_np
->cm_id
)) {
870 isert_err("isert np %p setup id failed: %ld\n",
871 isert_np
, PTR_ERR(isert_np
->cm_id
));
872 isert_np
->cm_id
= NULL
;
876 isert_err("isert np %p Unexpected event %d\n",
884 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
885 enum rdma_cm_event_type event
)
887 struct isert_np
*isert_np
= cma_id
->context
;
888 struct isert_conn
*isert_conn
;
889 bool terminating
= false;
891 if (isert_np
->cm_id
== cma_id
)
892 return isert_np_cma_handler(cma_id
->context
, event
);
894 isert_conn
= cma_id
->qp
->qp_context
;
896 mutex_lock(&isert_conn
->mutex
);
897 terminating
= (isert_conn
->state
== ISER_CONN_TERMINATING
);
898 isert_conn_terminate(isert_conn
);
899 mutex_unlock(&isert_conn
->mutex
);
901 isert_info("conn %p completing wait\n", isert_conn
);
902 complete(&isert_conn
->wait
);
907 mutex_lock(&isert_np
->mutex
);
908 if (!list_empty(&isert_conn
->node
)) {
909 list_del_init(&isert_conn
->node
);
910 isert_put_conn(isert_conn
);
911 queue_work(isert_release_wq
, &isert_conn
->release_work
);
913 mutex_unlock(&isert_np
->mutex
);
920 isert_connect_error(struct rdma_cm_id
*cma_id
)
922 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
924 list_del_init(&isert_conn
->node
);
925 isert_conn
->cm_id
= NULL
;
926 isert_put_conn(isert_conn
);
932 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
936 isert_info("%s (%d): status %d id %p np %p\n",
937 rdma_event_msg(event
->event
), event
->event
,
938 event
->status
, cma_id
, cma_id
->context
);
940 switch (event
->event
) {
941 case RDMA_CM_EVENT_CONNECT_REQUEST
:
942 ret
= isert_connect_request(cma_id
, event
);
944 isert_err("failed handle connect request %d\n", ret
);
946 case RDMA_CM_EVENT_ESTABLISHED
:
947 isert_connected_handler(cma_id
);
949 case RDMA_CM_EVENT_ADDR_CHANGE
: /* FALLTHRU */
950 case RDMA_CM_EVENT_DISCONNECTED
: /* FALLTHRU */
951 case RDMA_CM_EVENT_DEVICE_REMOVAL
: /* FALLTHRU */
952 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
953 ret
= isert_disconnected_handler(cma_id
, event
->event
);
955 case RDMA_CM_EVENT_REJECTED
: /* FALLTHRU */
956 case RDMA_CM_EVENT_UNREACHABLE
: /* FALLTHRU */
957 case RDMA_CM_EVENT_CONNECT_ERROR
:
958 ret
= isert_connect_error(cma_id
);
961 isert_err("Unhandled RDMA CMA event: %d\n", event
->event
);
969 isert_post_recvm(struct isert_conn
*isert_conn
, u32 count
)
971 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
973 struct iser_rx_desc
*rx_desc
;
975 for (rx_wr
= isert_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
976 rx_desc
= &isert_conn
->rx_descs
[i
];
977 rx_wr
->wr_id
= (uintptr_t)rx_desc
;
978 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
980 rx_wr
->next
= rx_wr
+ 1;
983 rx_wr
->next
= NULL
; /* mark end of work requests list */
985 isert_conn
->post_recv_buf_count
+= count
;
986 ret
= ib_post_recv(isert_conn
->qp
, isert_conn
->rx_wr
,
989 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
990 isert_conn
->post_recv_buf_count
-= count
;
997 isert_post_recv(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
)
999 struct ib_recv_wr
*rx_wr_failed
, rx_wr
;
1002 rx_wr
.wr_id
= (uintptr_t)rx_desc
;
1003 rx_wr
.sg_list
= &rx_desc
->rx_sg
;
1007 isert_conn
->post_recv_buf_count
++;
1008 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_failed
);
1010 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
1011 isert_conn
->post_recv_buf_count
--;
1018 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
1020 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1021 struct ib_send_wr send_wr
, *send_wr_failed
;
1024 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
1025 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1027 send_wr
.next
= NULL
;
1028 send_wr
.wr_id
= (uintptr_t)tx_desc
;
1029 send_wr
.sg_list
= tx_desc
->tx_sg
;
1030 send_wr
.num_sge
= tx_desc
->num_sge
;
1031 send_wr
.opcode
= IB_WR_SEND
;
1032 send_wr
.send_flags
= IB_SEND_SIGNALED
;
1034 ret
= ib_post_send(isert_conn
->qp
, &send_wr
, &send_wr_failed
);
1036 isert_err("ib_post_send() failed, ret: %d\n", ret
);
1042 isert_create_send_desc(struct isert_conn
*isert_conn
,
1043 struct isert_cmd
*isert_cmd
,
1044 struct iser_tx_desc
*tx_desc
)
1046 struct isert_device
*device
= isert_conn
->device
;
1047 struct ib_device
*ib_dev
= device
->ib_device
;
1049 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
1050 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1052 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_ctrl
));
1053 tx_desc
->iser_header
.flags
= ISCSI_CTRL
;
1055 tx_desc
->num_sge
= 1;
1056 tx_desc
->isert_cmd
= isert_cmd
;
1058 if (tx_desc
->tx_sg
[0].lkey
!= device
->pd
->local_dma_lkey
) {
1059 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1060 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
1065 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
1066 struct iser_tx_desc
*tx_desc
)
1068 struct isert_device
*device
= isert_conn
->device
;
1069 struct ib_device
*ib_dev
= device
->ib_device
;
1072 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
1073 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1074 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
1075 isert_err("ib_dma_mapping_error() failed\n");
1079 tx_desc
->dma_addr
= dma_addr
;
1080 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
1081 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
1082 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1084 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1085 tx_desc
->tx_sg
[0].addr
, tx_desc
->tx_sg
[0].length
,
1086 tx_desc
->tx_sg
[0].lkey
);
1092 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1093 struct ib_send_wr
*send_wr
)
1095 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
1097 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
1098 send_wr
->wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
1100 if (isert_conn
->snd_w_inv
&& isert_cmd
->inv_rkey
) {
1101 send_wr
->opcode
= IB_WR_SEND_WITH_INV
;
1102 send_wr
->ex
.invalidate_rkey
= isert_cmd
->inv_rkey
;
1104 send_wr
->opcode
= IB_WR_SEND
;
1107 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
1108 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
1109 send_wr
->send_flags
= IB_SEND_SIGNALED
;
1113 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
1115 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
1119 memset(&sge
, 0, sizeof(struct ib_sge
));
1120 sge
.addr
= isert_conn
->login_req_dma
;
1121 sge
.length
= ISER_RX_LOGIN_SIZE
;
1122 sge
.lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1124 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1125 sge
.addr
, sge
.length
, sge
.lkey
);
1127 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
1128 rx_wr
.wr_id
= (uintptr_t)isert_conn
->login_req_buf
;
1129 rx_wr
.sg_list
= &sge
;
1132 isert_conn
->post_recv_buf_count
++;
1133 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_fail
);
1135 isert_err("ib_post_recv() failed: %d\n", ret
);
1136 isert_conn
->post_recv_buf_count
--;
1143 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
1146 struct isert_conn
*isert_conn
= conn
->context
;
1147 struct isert_device
*device
= isert_conn
->device
;
1148 struct ib_device
*ib_dev
= device
->ib_device
;
1149 struct iser_tx_desc
*tx_desc
= &isert_conn
->login_tx_desc
;
1152 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
1154 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1155 sizeof(struct iscsi_hdr
));
1157 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1160 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1162 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1163 length
, DMA_TO_DEVICE
);
1165 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1167 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1168 length
, DMA_TO_DEVICE
);
1170 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1171 tx_dsg
->length
= length
;
1172 tx_dsg
->lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1173 tx_desc
->num_sge
= 2;
1175 if (!login
->login_failed
) {
1176 if (login
->login_complete
) {
1177 if (!conn
->sess
->sess_ops
->SessionType
&&
1178 isert_conn
->device
->use_fastreg
) {
1179 ret
= isert_conn_create_fastreg_pool(isert_conn
);
1181 isert_err("Conn: %p failed to create"
1182 " fastreg pool\n", isert_conn
);
1187 ret
= isert_alloc_rx_descriptors(isert_conn
);
1191 ret
= isert_post_recvm(isert_conn
,
1192 ISERT_QP_MAX_RECV_DTOS
);
1196 /* Now we are in FULL_FEATURE phase */
1197 mutex_lock(&isert_conn
->mutex
);
1198 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
1199 mutex_unlock(&isert_conn
->mutex
);
1203 ret
= isert_rdma_post_recvl(isert_conn
);
1208 ret
= isert_post_send(isert_conn
, tx_desc
);
1216 isert_rx_login_req(struct isert_conn
*isert_conn
)
1218 struct iser_rx_desc
*rx_desc
= (void *)isert_conn
->login_req_buf
;
1219 int rx_buflen
= isert_conn
->login_req_len
;
1220 struct iscsi_conn
*conn
= isert_conn
->conn
;
1221 struct iscsi_login
*login
= conn
->conn_login
;
1224 isert_info("conn %p\n", isert_conn
);
1226 WARN_ON_ONCE(!login
);
1228 if (login
->first_request
) {
1229 struct iscsi_login_req
*login_req
=
1230 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1232 * Setup the initial iscsi_login values from the leading
1233 * login request PDU.
1235 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1236 login
->current_stage
=
1237 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1239 login
->version_min
= login_req
->min_version
;
1240 login
->version_max
= login_req
->max_version
;
1241 memcpy(login
->isid
, login_req
->isid
, 6);
1242 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1243 login
->init_task_tag
= login_req
->itt
;
1244 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1245 login
->cid
= be16_to_cpu(login_req
->cid
);
1246 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1249 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1251 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1252 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1253 "MAX_KEY_VALUE_PAIRS: %d\n", size
, rx_buflen
,
1254 MAX_KEY_VALUE_PAIRS
);
1255 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1257 if (login
->first_request
) {
1258 complete(&isert_conn
->login_comp
);
1261 schedule_delayed_work(&conn
->login_work
, 0);
1264 static struct iscsi_cmd
1265 *isert_allocate_cmd(struct iscsi_conn
*conn
, struct iser_rx_desc
*rx_desc
)
1267 struct isert_conn
*isert_conn
= conn
->context
;
1268 struct isert_cmd
*isert_cmd
;
1269 struct iscsi_cmd
*cmd
;
1271 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1273 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1276 isert_cmd
= iscsit_priv_cmd(cmd
);
1277 isert_cmd
->conn
= isert_conn
;
1278 isert_cmd
->iscsi_cmd
= cmd
;
1279 isert_cmd
->rx_desc
= rx_desc
;
1285 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1286 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1287 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1289 struct iscsi_conn
*conn
= isert_conn
->conn
;
1290 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1291 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1292 bool dump_payload
= false;
1293 unsigned int data_len
;
1295 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1299 imm_data
= cmd
->immediate_data
;
1300 imm_data_len
= cmd
->first_burst_len
;
1301 unsol_data
= cmd
->unsolicited_data
;
1302 data_len
= cmd
->se_cmd
.data_length
;
1304 if (imm_data
&& imm_data_len
== data_len
)
1305 cmd
->se_cmd
.se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
1306 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1309 } else if (rc
> 0) {
1310 dump_payload
= true;
1317 if (imm_data_len
!= data_len
) {
1318 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1319 sg_copy_from_buffer(cmd
->se_cmd
.t_data_sg
, sg_nents
,
1320 &rx_desc
->data
[0], imm_data_len
);
1321 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1322 sg_nents
, imm_data_len
);
1324 sg_init_table(&isert_cmd
->sg
, 1);
1325 cmd
->se_cmd
.t_data_sg
= &isert_cmd
->sg
;
1326 cmd
->se_cmd
.t_data_nents
= 1;
1327 sg_set_buf(&isert_cmd
->sg
, &rx_desc
->data
[0], imm_data_len
);
1328 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1332 cmd
->write_data_done
+= imm_data_len
;
1334 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1335 spin_lock_bh(&cmd
->istate_lock
);
1336 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1337 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1338 spin_unlock_bh(&cmd
->istate_lock
);
1342 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1344 if (!rc
&& dump_payload
== false && unsol_data
)
1345 iscsit_set_unsoliticed_dataout(cmd
);
1346 else if (dump_payload
&& imm_data
)
1347 target_put_sess_cmd(&cmd
->se_cmd
);
1353 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1354 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1356 struct scatterlist
*sg_start
;
1357 struct iscsi_conn
*conn
= isert_conn
->conn
;
1358 struct iscsi_cmd
*cmd
= NULL
;
1359 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1360 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1361 int rc
, sg_nents
, sg_off
, page_off
;
1363 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1369 * FIXME: Unexpected unsolicited_data out
1371 if (!cmd
->unsolicited_data
) {
1372 isert_err("Received unexpected solicited data payload\n");
1377 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1378 "write_data_done: %u, data_length: %u\n",
1379 unsol_data_len
, cmd
->write_data_done
,
1380 cmd
->se_cmd
.data_length
);
1382 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1383 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1384 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1385 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1387 * FIXME: Non page-aligned unsolicited_data out
1390 isert_err("unexpected non-page aligned data payload\n");
1394 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1395 "sg_nents: %u from %p %u\n", sg_start
, sg_off
,
1396 sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1398 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1401 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1406 * multiple data-outs on the same command can arrive -
1407 * so post the buffer before hand
1409 rc
= isert_post_recv(isert_conn
, rx_desc
);
1411 isert_err("ib_post_recv failed with %d\n", rc
);
1418 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1419 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1422 struct iscsi_conn
*conn
= isert_conn
->conn
;
1423 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1426 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1430 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1433 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1437 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1438 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1439 struct iscsi_text
*hdr
)
1441 struct iscsi_conn
*conn
= isert_conn
->conn
;
1442 u32 payload_length
= ntoh24(hdr
->dlength
);
1444 unsigned char *text_in
= NULL
;
1446 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1450 if (payload_length
) {
1451 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1453 isert_err("Unable to allocate text_in of payload_length: %u\n",
1458 cmd
->text_in_ptr
= text_in
;
1460 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1462 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1466 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1467 uint32_t read_stag
, uint64_t read_va
,
1468 uint32_t write_stag
, uint64_t write_va
)
1470 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1471 struct iscsi_conn
*conn
= isert_conn
->conn
;
1472 struct iscsi_cmd
*cmd
;
1473 struct isert_cmd
*isert_cmd
;
1475 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1477 if (conn
->sess
->sess_ops
->SessionType
&&
1478 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1479 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1480 " ignoring\n", opcode
);
1485 case ISCSI_OP_SCSI_CMD
:
1486 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1490 isert_cmd
= iscsit_priv_cmd(cmd
);
1491 isert_cmd
->read_stag
= read_stag
;
1492 isert_cmd
->read_va
= read_va
;
1493 isert_cmd
->write_stag
= write_stag
;
1494 isert_cmd
->write_va
= write_va
;
1495 isert_cmd
->inv_rkey
= read_stag
? read_stag
: write_stag
;
1497 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1498 rx_desc
, (unsigned char *)hdr
);
1500 case ISCSI_OP_NOOP_OUT
:
1501 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1505 isert_cmd
= iscsit_priv_cmd(cmd
);
1506 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1507 rx_desc
, (unsigned char *)hdr
);
1509 case ISCSI_OP_SCSI_DATA_OUT
:
1510 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1511 (unsigned char *)hdr
);
1513 case ISCSI_OP_SCSI_TMFUNC
:
1514 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1518 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1519 (unsigned char *)hdr
);
1521 case ISCSI_OP_LOGOUT
:
1522 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1526 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1529 if (be32_to_cpu(hdr
->ttt
) != 0xFFFFFFFF)
1530 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1532 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1537 isert_cmd
= iscsit_priv_cmd(cmd
);
1538 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1539 rx_desc
, (struct iscsi_text
*)hdr
);
1542 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1551 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1553 struct iser_ctrl
*iser_ctrl
= &rx_desc
->iser_header
;
1554 uint64_t read_va
= 0, write_va
= 0;
1555 uint32_t read_stag
= 0, write_stag
= 0;
1557 switch (iser_ctrl
->flags
& 0xF0) {
1559 if (iser_ctrl
->flags
& ISER_RSV
) {
1560 read_stag
= be32_to_cpu(iser_ctrl
->read_stag
);
1561 read_va
= be64_to_cpu(iser_ctrl
->read_va
);
1562 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1563 read_stag
, (unsigned long long)read_va
);
1565 if (iser_ctrl
->flags
& ISER_WSV
) {
1566 write_stag
= be32_to_cpu(iser_ctrl
->write_stag
);
1567 write_va
= be64_to_cpu(iser_ctrl
->write_va
);
1568 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1569 write_stag
, (unsigned long long)write_va
);
1572 isert_dbg("ISER ISCSI_CTRL PDU\n");
1575 isert_err("iSER Hello message\n");
1578 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl
->flags
);
1582 isert_rx_opcode(isert_conn
, rx_desc
,
1583 read_stag
, read_va
, write_stag
, write_va
);
1587 isert_rcv_completion(struct iser_rx_desc
*desc
,
1588 struct isert_conn
*isert_conn
,
1591 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1592 struct iscsi_hdr
*hdr
;
1596 if ((char *)desc
== isert_conn
->login_req_buf
) {
1597 rx_dma
= isert_conn
->login_req_dma
;
1598 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1599 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1602 rx_dma
= desc
->dma_addr
;
1603 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1604 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1608 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1610 hdr
= &desc
->iscsi_header
;
1611 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1612 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1613 (int)(xfer_len
- ISER_HEADERS_LEN
));
1615 if ((char *)desc
== isert_conn
->login_req_buf
) {
1616 isert_conn
->login_req_len
= xfer_len
- ISER_HEADERS_LEN
;
1617 if (isert_conn
->conn
) {
1618 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1620 if (login
&& !login
->first_request
)
1621 isert_rx_login_req(isert_conn
);
1623 mutex_lock(&isert_conn
->mutex
);
1624 complete(&isert_conn
->login_req_comp
);
1625 mutex_unlock(&isert_conn
->mutex
);
1627 isert_rx_do_work(desc
, isert_conn
);
1630 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1633 isert_conn
->post_recv_buf_count
--;
1637 isert_map_data_buf(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1638 struct scatterlist
*sg
, u32 nents
, u32 length
, u32 offset
,
1639 enum iser_ib_op_code op
, struct isert_data_buf
*data
)
1641 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1643 data
->dma_dir
= op
== ISER_IB_RDMA_WRITE
?
1644 DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
1646 data
->len
= length
- offset
;
1647 data
->offset
= offset
;
1648 data
->sg_off
= data
->offset
/ PAGE_SIZE
;
1650 data
->sg
= &sg
[data
->sg_off
];
1651 data
->nents
= min_t(unsigned int, nents
- data
->sg_off
,
1652 ISCSI_ISER_SG_TABLESIZE
);
1653 data
->len
= min_t(unsigned int, data
->len
, ISCSI_ISER_SG_TABLESIZE
*
1656 data
->dma_nents
= ib_dma_map_sg(ib_dev
, data
->sg
, data
->nents
,
1658 if (unlikely(!data
->dma_nents
)) {
1659 isert_err("Cmd: unable to dma map SGs %p\n", sg
);
1663 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1664 isert_cmd
, data
->dma_nents
, data
->sg
, data
->nents
, data
->len
);
1670 isert_unmap_data_buf(struct isert_conn
*isert_conn
, struct isert_data_buf
*data
)
1672 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1674 ib_dma_unmap_sg(ib_dev
, data
->sg
, data
->nents
, data
->dma_dir
);
1675 memset(data
, 0, sizeof(*data
));
1681 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1683 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1685 isert_dbg("Cmd %p\n", isert_cmd
);
1688 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1689 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1693 isert_dbg("Cmd %p free send_wr\n", isert_cmd
);
1699 isert_dbg("Cmd %p free ib_sge\n", isert_cmd
);
1706 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1708 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1710 isert_dbg("Cmd %p\n", isert_cmd
);
1713 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd
, wr
->fr_desc
);
1714 if (wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1715 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
1716 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1718 spin_lock_bh(&isert_conn
->pool_lock
);
1719 list_add_tail(&wr
->fr_desc
->list
, &isert_conn
->fr_pool
);
1720 spin_unlock_bh(&isert_conn
->pool_lock
);
1725 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1726 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1734 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1736 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1737 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1738 struct iscsi_conn
*conn
= isert_conn
->conn
;
1739 struct isert_device
*device
= isert_conn
->device
;
1740 struct iscsi_text_rsp
*hdr
;
1742 isert_dbg("Cmd %p\n", isert_cmd
);
1744 switch (cmd
->iscsi_opcode
) {
1745 case ISCSI_OP_SCSI_CMD
:
1746 spin_lock_bh(&conn
->cmd_lock
);
1747 if (!list_empty(&cmd
->i_conn_node
))
1748 list_del_init(&cmd
->i_conn_node
);
1749 spin_unlock_bh(&conn
->cmd_lock
);
1751 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1752 iscsit_stop_dataout_timer(cmd
);
1754 * Check for special case during comp_err where
1755 * WRITE_PENDING has been handed off from core,
1756 * but requires an extra target_put_sess_cmd()
1757 * before transport_generic_free_cmd() below.
1760 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1761 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1763 target_put_sess_cmd(se_cmd
);
1767 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1768 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1770 case ISCSI_OP_SCSI_TMFUNC
:
1771 spin_lock_bh(&conn
->cmd_lock
);
1772 if (!list_empty(&cmd
->i_conn_node
))
1773 list_del_init(&cmd
->i_conn_node
);
1774 spin_unlock_bh(&conn
->cmd_lock
);
1776 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1778 case ISCSI_OP_REJECT
:
1779 case ISCSI_OP_NOOP_OUT
:
1781 hdr
= (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1782 /* If the continue bit is on, keep the command alive */
1783 if (hdr
->flags
& ISCSI_FLAG_TEXT_CONTINUE
)
1786 spin_lock_bh(&conn
->cmd_lock
);
1787 if (!list_empty(&cmd
->i_conn_node
))
1788 list_del_init(&cmd
->i_conn_node
);
1789 spin_unlock_bh(&conn
->cmd_lock
);
1792 * Handle special case for REJECT when iscsi_add_reject*() has
1793 * overwritten the original iscsi_opcode assignment, and the
1794 * associated cmd->se_cmd needs to be released.
1796 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1797 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1799 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1806 iscsit_release_cmd(cmd
);
1812 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1814 if (tx_desc
->dma_addr
!= 0) {
1815 isert_dbg("unmap single for tx_desc->dma_addr\n");
1816 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1817 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1818 tx_desc
->dma_addr
= 0;
1823 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1824 struct ib_device
*ib_dev
, bool comp_err
)
1826 if (isert_cmd
->pdu_buf_dma
!= 0) {
1827 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1828 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1829 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1830 isert_cmd
->pdu_buf_dma
= 0;
1833 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1834 isert_put_cmd(isert_cmd
, comp_err
);
1838 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1840 struct ib_mr_status mr_status
;
1843 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1845 isert_err("ib_check_mr_status failed, ret %d\n", ret
);
1846 goto fail_mr_status
;
1849 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1851 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1853 switch (mr_status
.sig_err
.err_type
) {
1854 case IB_SIG_BAD_GUARD
:
1855 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1857 case IB_SIG_BAD_REFTAG
:
1858 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1860 case IB_SIG_BAD_APPTAG
:
1861 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1864 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1865 do_div(sec_offset_err
, block_size
);
1866 se_cmd
->bad_sector
= sec_offset_err
+ se_cmd
->t_task_lba
;
1868 isert_err("PI error found type %d at sector 0x%llx "
1869 "expected 0x%x vs actual 0x%x\n",
1870 mr_status
.sig_err
.err_type
,
1871 (unsigned long long)se_cmd
->bad_sector
,
1872 mr_status
.sig_err
.expected
,
1873 mr_status
.sig_err
.actual
);
1882 isert_completion_rdma_write(struct iser_tx_desc
*tx_desc
,
1883 struct isert_cmd
*isert_cmd
)
1885 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1886 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1887 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1888 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1889 struct isert_device
*device
= isert_conn
->device
;
1892 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1893 ret
= isert_check_pi_status(se_cmd
,
1894 wr
->fr_desc
->pi_ctx
->sig_mr
);
1895 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1898 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1899 wr
->rdma_wr_num
= 0;
1901 transport_send_check_condition_and_sense(se_cmd
,
1904 isert_put_response(isert_conn
->conn
, cmd
);
1908 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1909 struct isert_cmd
*isert_cmd
)
1911 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1912 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1913 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1914 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1915 struct isert_device
*device
= isert_conn
->device
;
1918 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1919 ret
= isert_check_pi_status(se_cmd
,
1920 wr
->fr_desc
->pi_ctx
->sig_mr
);
1921 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1924 iscsit_stop_dataout_timer(cmd
);
1925 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1926 cmd
->write_data_done
= wr
->data
.len
;
1927 wr
->rdma_wr_num
= 0;
1929 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1930 spin_lock_bh(&cmd
->istate_lock
);
1931 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1932 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1933 spin_unlock_bh(&cmd
->istate_lock
);
1936 target_put_sess_cmd(se_cmd
);
1937 transport_send_check_condition_and_sense(se_cmd
,
1940 target_execute_cmd(se_cmd
);
1945 isert_do_control_comp(struct work_struct
*work
)
1947 struct isert_cmd
*isert_cmd
= container_of(work
,
1948 struct isert_cmd
, comp_work
);
1949 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1950 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1951 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1953 isert_dbg("Cmd %p i_state %d\n", isert_cmd
, cmd
->i_state
);
1955 switch (cmd
->i_state
) {
1956 case ISTATE_SEND_TASKMGTRSP
:
1957 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1958 case ISTATE_SEND_REJECT
: /* FALLTHRU */
1959 case ISTATE_SEND_TEXTRSP
: /* FALLTHRU */
1960 cmd
->i_state
= ISTATE_SENT_STATUS
;
1961 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
,
1964 case ISTATE_SEND_LOGOUTRSP
:
1965 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1968 isert_err("Unknown i_state %d\n", cmd
->i_state
);
1975 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1976 struct isert_cmd
*isert_cmd
,
1977 struct isert_conn
*isert_conn
,
1978 struct ib_device
*ib_dev
)
1980 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1982 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1983 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
||
1984 cmd
->i_state
== ISTATE_SEND_REJECT
||
1985 cmd
->i_state
== ISTATE_SEND_TEXTRSP
) {
1986 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1988 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1989 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1993 cmd
->i_state
= ISTATE_SENT_STATUS
;
1994 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
1998 isert_snd_completion(struct iser_tx_desc
*tx_desc
,
1999 struct isert_conn
*isert_conn
)
2001 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
2002 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
2003 struct isert_rdma_wr
*wr
;
2006 isert_unmap_tx_desc(tx_desc
, ib_dev
);
2009 wr
= &isert_cmd
->rdma_wr
;
2011 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd
, wr
->iser_ib_op
);
2013 switch (wr
->iser_ib_op
) {
2015 isert_response_completion(tx_desc
, isert_cmd
,
2016 isert_conn
, ib_dev
);
2018 case ISER_IB_RDMA_WRITE
:
2019 isert_completion_rdma_write(tx_desc
, isert_cmd
);
2021 case ISER_IB_RDMA_READ
:
2022 isert_completion_rdma_read(tx_desc
, isert_cmd
);
2025 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr
->iser_ib_op
);
2032 * is_isert_tx_desc() - Indicate if the completion wr_id
2033 * is a TX descriptor or not.
2034 * @isert_conn: iser connection
2035 * @wr_id: completion WR identifier
2037 * Since we cannot rely on wc opcode in FLUSH errors
2038 * we must work around it by checking if the wr_id address
2039 * falls in the iser connection rx_descs buffer. If so
2040 * it is an RX descriptor, otherwize it is a TX.
2043 is_isert_tx_desc(struct isert_conn
*isert_conn
, void *wr_id
)
2045 void *start
= isert_conn
->rx_descs
;
2046 int len
= ISERT_QP_MAX_RECV_DTOS
* sizeof(*isert_conn
->rx_descs
);
2048 if (wr_id
>= start
&& wr_id
< start
+ len
)
2055 isert_cq_comp_err(struct isert_conn
*isert_conn
, struct ib_wc
*wc
)
2057 if (wc
->wr_id
== ISER_BEACON_WRID
) {
2058 isert_info("conn %p completing wait_comp_err\n",
2060 complete(&isert_conn
->wait_comp_err
);
2061 } else if (is_isert_tx_desc(isert_conn
, (void *)(uintptr_t)wc
->wr_id
)) {
2062 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
2063 struct isert_cmd
*isert_cmd
;
2064 struct iser_tx_desc
*desc
;
2066 desc
= (struct iser_tx_desc
*)(uintptr_t)wc
->wr_id
;
2067 isert_cmd
= desc
->isert_cmd
;
2069 isert_unmap_tx_desc(desc
, ib_dev
);
2071 isert_completion_put(desc
, isert_cmd
, ib_dev
, true);
2073 isert_conn
->post_recv_buf_count
--;
2074 if (!isert_conn
->post_recv_buf_count
)
2075 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
2080 isert_handle_wc(struct ib_wc
*wc
)
2082 struct isert_conn
*isert_conn
;
2083 struct iser_tx_desc
*tx_desc
;
2084 struct iser_rx_desc
*rx_desc
;
2086 isert_conn
= wc
->qp
->qp_context
;
2087 if (likely(wc
->status
== IB_WC_SUCCESS
)) {
2088 if (wc
->opcode
== IB_WC_RECV
) {
2089 rx_desc
= (struct iser_rx_desc
*)(uintptr_t)wc
->wr_id
;
2090 isert_rcv_completion(rx_desc
, isert_conn
, wc
->byte_len
);
2092 tx_desc
= (struct iser_tx_desc
*)(uintptr_t)wc
->wr_id
;
2093 isert_snd_completion(tx_desc
, isert_conn
);
2096 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
2097 isert_err("%s (%d): wr id %llx vend_err %x\n",
2098 ib_wc_status_msg(wc
->status
), wc
->status
,
2099 wc
->wr_id
, wc
->vendor_err
);
2101 isert_dbg("%s (%d): wr id %llx\n",
2102 ib_wc_status_msg(wc
->status
), wc
->status
,
2105 if (wc
->wr_id
!= ISER_FASTREG_LI_WRID
)
2106 isert_cq_comp_err(isert_conn
, wc
);
2111 isert_cq_work(struct work_struct
*work
)
2113 enum { isert_poll_budget
= 65536 };
2114 struct isert_comp
*comp
= container_of(work
, struct isert_comp
,
2116 struct ib_wc
*const wcs
= comp
->wcs
;
2117 int i
, n
, completed
= 0;
2119 while ((n
= ib_poll_cq(comp
->cq
, ARRAY_SIZE(comp
->wcs
), wcs
)) > 0) {
2120 for (i
= 0; i
< n
; i
++)
2121 isert_handle_wc(&wcs
[i
]);
2124 if (completed
>= isert_poll_budget
)
2128 ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
);
2132 isert_cq_callback(struct ib_cq
*cq
, void *context
)
2134 struct isert_comp
*comp
= context
;
2136 queue_work(isert_comp_wq
, &comp
->work
);
2140 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
2142 struct ib_send_wr
*wr_failed
;
2145 ret
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2147 isert_err("ib_post_recv failed with %d\n", ret
);
2151 ret
= ib_post_send(isert_conn
->qp
, &isert_cmd
->tx_desc
.send_wr
,
2154 isert_err("ib_post_send failed with %d\n", ret
);
2161 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2163 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2164 struct isert_conn
*isert_conn
= conn
->context
;
2165 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2166 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
2167 &isert_cmd
->tx_desc
.iscsi_header
;
2169 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2170 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
2171 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2173 * Attach SENSE DATA payload to iSCSI Response PDU
2175 if (cmd
->se_cmd
.sense_buffer
&&
2176 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
2177 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
2178 struct isert_device
*device
= isert_conn
->device
;
2179 struct ib_device
*ib_dev
= device
->ib_device
;
2180 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2181 u32 padding
, pdu_len
;
2183 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
2185 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
2187 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
2188 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
2189 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
2191 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2192 (void *)cmd
->sense_buffer
, pdu_len
,
2195 isert_cmd
->pdu_buf_len
= pdu_len
;
2196 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2197 tx_dsg
->length
= pdu_len
;
2198 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2199 isert_cmd
->tx_desc
.num_sge
= 2;
2202 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2204 isert_dbg("Posting SCSI Response\n");
2206 return isert_post_response(isert_conn
, isert_cmd
);
2210 isert_aborted_task(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2212 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2213 struct isert_conn
*isert_conn
= conn
->context
;
2214 struct isert_device
*device
= isert_conn
->device
;
2216 spin_lock_bh(&conn
->cmd_lock
);
2217 if (!list_empty(&cmd
->i_conn_node
))
2218 list_del_init(&cmd
->i_conn_node
);
2219 spin_unlock_bh(&conn
->cmd_lock
);
2221 if (cmd
->data_direction
== DMA_TO_DEVICE
)
2222 iscsit_stop_dataout_timer(cmd
);
2224 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
2227 static enum target_prot_op
2228 isert_get_sup_prot_ops(struct iscsi_conn
*conn
)
2230 struct isert_conn
*isert_conn
= conn
->context
;
2231 struct isert_device
*device
= isert_conn
->device
;
2233 if (conn
->tpg
->tpg_attrib
.t10_pi
) {
2234 if (device
->pi_capable
) {
2235 isert_info("conn %p PI offload enabled\n", isert_conn
);
2236 isert_conn
->pi_support
= true;
2237 return TARGET_PROT_ALL
;
2241 isert_info("conn %p PI offload disabled\n", isert_conn
);
2242 isert_conn
->pi_support
= false;
2244 return TARGET_PROT_NORMAL
;
2248 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
2249 bool nopout_response
)
2251 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2252 struct isert_conn
*isert_conn
= conn
->context
;
2253 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2255 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2256 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
2257 &isert_cmd
->tx_desc
.iscsi_header
,
2259 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2260 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2262 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn
);
2264 return isert_post_response(isert_conn
, isert_cmd
);
2268 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2270 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2271 struct isert_conn
*isert_conn
= conn
->context
;
2272 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2274 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2275 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
2276 &isert_cmd
->tx_desc
.iscsi_header
);
2277 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2278 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2280 isert_dbg("conn %p Posting Logout Response\n", isert_conn
);
2282 return isert_post_response(isert_conn
, isert_cmd
);
2286 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2288 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2289 struct isert_conn
*isert_conn
= conn
->context
;
2290 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2292 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2293 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
2294 &isert_cmd
->tx_desc
.iscsi_header
);
2295 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2296 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2298 isert_dbg("conn %p Posting Task Management Response\n", isert_conn
);
2300 return isert_post_response(isert_conn
, isert_cmd
);
2304 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2306 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2307 struct isert_conn
*isert_conn
= conn
->context
;
2308 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2309 struct isert_device
*device
= isert_conn
->device
;
2310 struct ib_device
*ib_dev
= device
->ib_device
;
2311 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2312 struct iscsi_reject
*hdr
=
2313 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
2315 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2316 iscsit_build_reject(cmd
, conn
, hdr
);
2317 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2319 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2320 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2321 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2323 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2324 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2325 tx_dsg
->length
= ISCSI_HDR_LEN
;
2326 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2327 isert_cmd
->tx_desc
.num_sge
= 2;
2329 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2331 isert_dbg("conn %p Posting Reject\n", isert_conn
);
2333 return isert_post_response(isert_conn
, isert_cmd
);
2337 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2339 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2340 struct isert_conn
*isert_conn
= conn
->context
;
2341 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2342 struct iscsi_text_rsp
*hdr
=
2343 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2347 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2348 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
2353 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2356 struct isert_device
*device
= isert_conn
->device
;
2357 struct ib_device
*ib_dev
= device
->ib_device
;
2358 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2359 void *txt_rsp_buf
= cmd
->buf_ptr
;
2361 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2362 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2364 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2365 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2366 tx_dsg
->length
= txt_rsp_len
;
2367 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2368 isert_cmd
->tx_desc
.num_sge
= 2;
2370 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2372 isert_dbg("conn %p Text Response\n", isert_conn
);
2374 return isert_post_response(isert_conn
, isert_cmd
);
2378 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
2379 struct ib_sge
*ib_sge
, struct ib_rdma_wr
*rdma_wr
,
2380 u32 data_left
, u32 offset
)
2382 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2383 struct scatterlist
*sg_start
, *tmp_sg
;
2384 struct isert_device
*device
= isert_conn
->device
;
2385 struct ib_device
*ib_dev
= device
->ib_device
;
2386 u32 sg_off
, page_off
;
2387 int i
= 0, sg_nents
;
2389 sg_off
= offset
/ PAGE_SIZE
;
2390 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2391 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
2392 page_off
= offset
% PAGE_SIZE
;
2394 rdma_wr
->wr
.sg_list
= ib_sge
;
2395 rdma_wr
->wr
.wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
2397 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2399 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2400 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2402 (unsigned long long)tmp_sg
->dma_address
,
2403 tmp_sg
->length
, page_off
);
2405 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
2406 ib_sge
->length
= min_t(u32
, data_left
,
2407 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
2408 ib_sge
->lkey
= device
->pd
->local_dma_lkey
;
2410 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2411 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2413 data_left
-= ib_sge
->length
;
2417 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge
);
2420 rdma_wr
->wr
.num_sge
= ++i
;
2421 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2422 rdma_wr
->wr
.sg_list
, rdma_wr
->wr
.num_sge
);
2424 return rdma_wr
->wr
.num_sge
;
2428 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2429 struct isert_rdma_wr
*wr
)
2431 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2432 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2433 struct isert_conn
*isert_conn
= conn
->context
;
2434 struct isert_data_buf
*data
= &wr
->data
;
2435 struct ib_rdma_wr
*rdma_wr
;
2436 struct ib_sge
*ib_sge
;
2437 u32 offset
, data_len
, data_left
, rdma_write_max
, va_offset
= 0;
2438 int ret
= 0, i
, ib_sge_cnt
;
2440 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2442 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2443 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2444 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2445 offset
, wr
->iser_ib_op
, &wr
->data
);
2449 data_left
= data
->len
;
2450 offset
= data
->offset
;
2452 ib_sge
= kzalloc(sizeof(struct ib_sge
) * data
->nents
, GFP_KERNEL
);
2454 isert_warn("Unable to allocate ib_sge\n");
2458 wr
->ib_sge
= ib_sge
;
2460 wr
->rdma_wr_num
= DIV_ROUND_UP(data
->nents
, isert_conn
->max_sge
);
2461 wr
->rdma_wr
= kzalloc(sizeof(struct ib_rdma_wr
) * wr
->rdma_wr_num
,
2464 isert_dbg("Unable to allocate wr->rdma_wr\n");
2469 wr
->isert_cmd
= isert_cmd
;
2470 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2472 for (i
= 0; i
< wr
->rdma_wr_num
; i
++) {
2473 rdma_wr
= &isert_cmd
->rdma_wr
.rdma_wr
[i
];
2474 data_len
= min(data_left
, rdma_write_max
);
2476 rdma_wr
->wr
.send_flags
= 0;
2477 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2478 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
2479 rdma_wr
->remote_addr
= isert_cmd
->read_va
+ offset
;
2480 rdma_wr
->rkey
= isert_cmd
->read_stag
;
2481 if (i
+ 1 == wr
->rdma_wr_num
)
2482 rdma_wr
->wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2484 rdma_wr
->wr
.next
= &wr
->rdma_wr
[i
+ 1].wr
;
2486 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
2487 rdma_wr
->remote_addr
= isert_cmd
->write_va
+ va_offset
;
2488 rdma_wr
->rkey
= isert_cmd
->write_stag
;
2489 if (i
+ 1 == wr
->rdma_wr_num
)
2490 rdma_wr
->wr
.send_flags
= IB_SEND_SIGNALED
;
2492 rdma_wr
->wr
.next
= &wr
->rdma_wr
[i
+ 1].wr
;
2495 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2496 rdma_wr
, data_len
, offset
);
2497 ib_sge
+= ib_sge_cnt
;
2500 va_offset
+= data_len
;
2501 data_left
-= data_len
;
2506 isert_unmap_data_buf(isert_conn
, data
);
2512 isert_inv_rkey(struct ib_send_wr
*inv_wr
, struct ib_mr
*mr
)
2516 memset(inv_wr
, 0, sizeof(*inv_wr
));
2517 inv_wr
->wr_id
= ISER_FASTREG_LI_WRID
;
2518 inv_wr
->opcode
= IB_WR_LOCAL_INV
;
2519 inv_wr
->ex
.invalidate_rkey
= mr
->rkey
;
2522 rkey
= ib_inc_rkey(mr
->rkey
);
2523 ib_update_fast_reg_key(mr
, rkey
);
2527 isert_fast_reg_mr(struct isert_conn
*isert_conn
,
2528 struct fast_reg_descriptor
*fr_desc
,
2529 struct isert_data_buf
*mem
,
2530 enum isert_indicator ind
,
2533 struct isert_device
*device
= isert_conn
->device
;
2534 struct ib_device
*ib_dev
= device
->ib_device
;
2536 struct ib_reg_wr reg_wr
;
2537 struct ib_send_wr inv_wr
, *bad_wr
, *wr
= NULL
;
2540 if (mem
->dma_nents
== 1) {
2541 sge
->lkey
= device
->pd
->local_dma_lkey
;
2542 sge
->addr
= ib_sg_dma_address(ib_dev
, &mem
->sg
[0]);
2543 sge
->length
= ib_sg_dma_len(ib_dev
, &mem
->sg
[0]);
2544 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2545 sge
->addr
, sge
->length
, sge
->lkey
);
2549 if (ind
== ISERT_DATA_KEY_VALID
)
2550 /* Registering data buffer */
2551 mr
= fr_desc
->data_mr
;
2553 /* Registering protection buffer */
2554 mr
= fr_desc
->pi_ctx
->prot_mr
;
2556 if (!(fr_desc
->ind
& ind
)) {
2557 isert_inv_rkey(&inv_wr
, mr
);
2561 n
= ib_map_mr_sg(mr
, mem
->sg
, mem
->nents
, PAGE_SIZE
);
2562 if (unlikely(n
!= mem
->nents
)) {
2563 isert_err("failed to map mr sg (%d/%d)\n",
2565 return n
< 0 ? n
: -EINVAL
;
2568 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2569 fr_desc
, mem
->nents
, mem
->offset
);
2571 reg_wr
.wr
.next
= NULL
;
2572 reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
2573 reg_wr
.wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2574 reg_wr
.wr
.send_flags
= 0;
2575 reg_wr
.wr
.num_sge
= 0;
2577 reg_wr
.key
= mr
->lkey
;
2578 reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
;
2583 wr
->next
= ®_wr
.wr
;
2585 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2587 isert_err("fast registration failed, ret:%d\n", ret
);
2590 fr_desc
->ind
&= ~ind
;
2592 sge
->lkey
= mr
->lkey
;
2593 sge
->addr
= mr
->iova
;
2594 sge
->length
= mr
->length
;
2596 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2597 sge
->addr
, sge
->length
, sge
->lkey
);
2603 isert_set_dif_domain(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
,
2604 struct ib_sig_domain
*domain
)
2606 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
2607 domain
->sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2608 domain
->sig
.dif
.pi_interval
= se_cmd
->se_dev
->dev_attrib
.block_size
;
2609 domain
->sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2611 * At the moment we hard code those, but if in the future
2612 * the target core would like to use it, we will take it
2615 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
2616 domain
->sig
.dif
.app_escape
= true;
2617 domain
->sig
.dif
.ref_escape
= true;
2618 if (se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
||
2619 se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
)
2620 domain
->sig
.dif
.ref_remap
= true;
2624 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2626 switch (se_cmd
->prot_op
) {
2627 case TARGET_PROT_DIN_INSERT
:
2628 case TARGET_PROT_DOUT_STRIP
:
2629 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
2630 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2632 case TARGET_PROT_DOUT_INSERT
:
2633 case TARGET_PROT_DIN_STRIP
:
2634 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
2635 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2637 case TARGET_PROT_DIN_PASS
:
2638 case TARGET_PROT_DOUT_PASS
:
2639 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2640 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2643 isert_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2651 isert_set_prot_checks(u8 prot_checks
)
2653 return (prot_checks
& TARGET_DIF_CHECK_GUARD
? 0xc0 : 0) |
2654 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x30 : 0) |
2655 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x0f : 0);
2659 isert_reg_sig_mr(struct isert_conn
*isert_conn
,
2660 struct se_cmd
*se_cmd
,
2661 struct isert_rdma_wr
*rdma_wr
,
2662 struct fast_reg_descriptor
*fr_desc
)
2664 struct ib_sig_handover_wr sig_wr
;
2665 struct ib_send_wr inv_wr
, *bad_wr
, *wr
= NULL
;
2666 struct pi_context
*pi_ctx
= fr_desc
->pi_ctx
;
2667 struct ib_sig_attrs sig_attrs
;
2670 memset(&sig_attrs
, 0, sizeof(sig_attrs
));
2671 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2675 sig_attrs
.check_mask
= isert_set_prot_checks(se_cmd
->prot_checks
);
2677 if (!(fr_desc
->ind
& ISERT_SIG_KEY_VALID
)) {
2678 isert_inv_rkey(&inv_wr
, pi_ctx
->sig_mr
);
2682 memset(&sig_wr
, 0, sizeof(sig_wr
));
2683 sig_wr
.wr
.opcode
= IB_WR_REG_SIG_MR
;
2684 sig_wr
.wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2685 sig_wr
.wr
.sg_list
= &rdma_wr
->ib_sg
[DATA
];
2686 sig_wr
.wr
.num_sge
= 1;
2687 sig_wr
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2688 sig_wr
.sig_attrs
= &sig_attrs
;
2689 sig_wr
.sig_mr
= pi_ctx
->sig_mr
;
2690 if (se_cmd
->t_prot_sg
)
2691 sig_wr
.prot
= &rdma_wr
->ib_sg
[PROT
];
2696 wr
->next
= &sig_wr
.wr
;
2698 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2700 isert_err("fast registration failed, ret:%d\n", ret
);
2703 fr_desc
->ind
&= ~ISERT_SIG_KEY_VALID
;
2705 rdma_wr
->ib_sg
[SIG
].lkey
= pi_ctx
->sig_mr
->lkey
;
2706 rdma_wr
->ib_sg
[SIG
].addr
= 0;
2707 rdma_wr
->ib_sg
[SIG
].length
= se_cmd
->data_length
;
2708 if (se_cmd
->prot_op
!= TARGET_PROT_DIN_STRIP
&&
2709 se_cmd
->prot_op
!= TARGET_PROT_DOUT_INSERT
)
2711 * We have protection guards on the wire
2712 * so we need to set a larget transfer
2714 rdma_wr
->ib_sg
[SIG
].length
+= se_cmd
->prot_length
;
2716 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2717 rdma_wr
->ib_sg
[SIG
].addr
, rdma_wr
->ib_sg
[SIG
].length
,
2718 rdma_wr
->ib_sg
[SIG
].lkey
);
2724 isert_handle_prot_cmd(struct isert_conn
*isert_conn
,
2725 struct isert_cmd
*isert_cmd
,
2726 struct isert_rdma_wr
*wr
)
2728 struct isert_device
*device
= isert_conn
->device
;
2729 struct se_cmd
*se_cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
2732 if (!wr
->fr_desc
->pi_ctx
) {
2733 ret
= isert_create_pi_ctx(wr
->fr_desc
,
2737 isert_err("conn %p failed to allocate pi_ctx\n",
2743 if (se_cmd
->t_prot_sg
) {
2744 ret
= isert_map_data_buf(isert_conn
, isert_cmd
,
2746 se_cmd
->t_prot_nents
,
2747 se_cmd
->prot_length
,
2748 0, wr
->iser_ib_op
, &wr
->prot
);
2750 isert_err("conn %p failed to map protection buffer\n",
2755 memset(&wr
->ib_sg
[PROT
], 0, sizeof(wr
->ib_sg
[PROT
]));
2756 ret
= isert_fast_reg_mr(isert_conn
, wr
->fr_desc
, &wr
->prot
,
2757 ISERT_PROT_KEY_VALID
, &wr
->ib_sg
[PROT
]);
2759 isert_err("conn %p failed to fast reg mr\n",
2761 goto unmap_prot_cmd
;
2765 ret
= isert_reg_sig_mr(isert_conn
, se_cmd
, wr
, wr
->fr_desc
);
2767 isert_err("conn %p failed to fast reg mr\n",
2769 goto unmap_prot_cmd
;
2771 wr
->fr_desc
->ind
|= ISERT_PROTECTED
;
2776 if (se_cmd
->t_prot_sg
)
2777 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
2783 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2784 struct isert_rdma_wr
*wr
)
2786 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2787 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2788 struct isert_conn
*isert_conn
= conn
->context
;
2789 struct fast_reg_descriptor
*fr_desc
= NULL
;
2790 struct ib_rdma_wr
*rdma_wr
;
2791 struct ib_sge
*ib_sg
;
2794 unsigned long flags
;
2796 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2798 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2799 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2800 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2801 offset
, wr
->iser_ib_op
, &wr
->data
);
2805 if (wr
->data
.dma_nents
!= 1 || isert_prot_cmd(isert_conn
, se_cmd
)) {
2806 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2807 fr_desc
= list_first_entry(&isert_conn
->fr_pool
,
2808 struct fast_reg_descriptor
, list
);
2809 list_del(&fr_desc
->list
);
2810 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2811 wr
->fr_desc
= fr_desc
;
2814 ret
= isert_fast_reg_mr(isert_conn
, fr_desc
, &wr
->data
,
2815 ISERT_DATA_KEY_VALID
, &wr
->ib_sg
[DATA
]);
2819 if (isert_prot_cmd(isert_conn
, se_cmd
)) {
2820 ret
= isert_handle_prot_cmd(isert_conn
, isert_cmd
, wr
);
2824 ib_sg
= &wr
->ib_sg
[SIG
];
2826 ib_sg
= &wr
->ib_sg
[DATA
];
2829 memcpy(&wr
->s_ib_sge
, ib_sg
, sizeof(*ib_sg
));
2830 wr
->ib_sge
= &wr
->s_ib_sge
;
2831 wr
->rdma_wr_num
= 1;
2832 memset(&wr
->s_rdma_wr
, 0, sizeof(wr
->s_rdma_wr
));
2833 wr
->rdma_wr
= &wr
->s_rdma_wr
;
2834 wr
->isert_cmd
= isert_cmd
;
2836 rdma_wr
= &isert_cmd
->rdma_wr
.s_rdma_wr
;
2837 rdma_wr
->wr
.sg_list
= &wr
->s_ib_sge
;
2838 rdma_wr
->wr
.num_sge
= 1;
2839 rdma_wr
->wr
.wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
2840 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2841 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
2842 rdma_wr
->remote_addr
= isert_cmd
->read_va
;
2843 rdma_wr
->rkey
= isert_cmd
->read_stag
;
2844 rdma_wr
->wr
.send_flags
= !isert_prot_cmd(isert_conn
, se_cmd
) ?
2845 0 : IB_SEND_SIGNALED
;
2847 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
2848 rdma_wr
->remote_addr
= isert_cmd
->write_va
;
2849 rdma_wr
->rkey
= isert_cmd
->write_stag
;
2850 rdma_wr
->wr
.send_flags
= IB_SEND_SIGNALED
;
2857 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2858 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
2859 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2861 isert_unmap_data_buf(isert_conn
, &wr
->data
);
2867 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2869 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2870 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2871 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2872 struct isert_conn
*isert_conn
= conn
->context
;
2873 struct isert_device
*device
= isert_conn
->device
;
2874 struct ib_send_wr
*wr_failed
;
2877 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2878 isert_cmd
, se_cmd
->data_length
);
2880 wr
->iser_ib_op
= ISER_IB_RDMA_WRITE
;
2881 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2883 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2887 if (!isert_prot_cmd(isert_conn
, se_cmd
)) {
2889 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2891 isert_create_send_desc(isert_conn
, isert_cmd
,
2892 &isert_cmd
->tx_desc
);
2893 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2894 &isert_cmd
->tx_desc
.iscsi_header
);
2895 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2896 isert_init_send_wr(isert_conn
, isert_cmd
,
2897 &isert_cmd
->tx_desc
.send_wr
);
2898 isert_cmd
->rdma_wr
.s_rdma_wr
.wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2899 wr
->rdma_wr_num
+= 1;
2901 rc
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2903 isert_err("ib_post_recv failed with %d\n", rc
);
2908 rc
= ib_post_send(isert_conn
->qp
, &wr
->rdma_wr
->wr
, &wr_failed
);
2910 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2912 if (!isert_prot_cmd(isert_conn
, se_cmd
))
2913 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2914 "READ\n", isert_cmd
);
2916 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2923 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2925 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2926 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2927 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2928 struct isert_conn
*isert_conn
= conn
->context
;
2929 struct isert_device
*device
= isert_conn
->device
;
2930 struct ib_send_wr
*wr_failed
;
2933 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2934 isert_cmd
, se_cmd
->data_length
, cmd
->write_data_done
);
2935 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
2936 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2938 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2942 rc
= ib_post_send(isert_conn
->qp
, &wr
->rdma_wr
->wr
, &wr_failed
);
2944 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2946 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2953 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2955 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2960 spin_lock_bh(&conn
->cmd_lock
);
2961 list_del_init(&cmd
->i_conn_node
);
2962 spin_unlock_bh(&conn
->cmd_lock
);
2963 isert_put_cmd(isert_cmd
, true);
2965 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2966 ret
= isert_put_nopin(cmd
, conn
, false);
2969 isert_err("Unknown immediate state: 0x%02x\n", state
);
2978 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2980 struct isert_conn
*isert_conn
= conn
->context
;
2984 case ISTATE_SEND_LOGOUTRSP
:
2985 ret
= isert_put_logout_rsp(cmd
, conn
);
2987 isert_conn
->logout_posted
= true;
2989 case ISTATE_SEND_NOPIN
:
2990 ret
= isert_put_nopin(cmd
, conn
, true);
2992 case ISTATE_SEND_TASKMGTRSP
:
2993 ret
= isert_put_tm_rsp(cmd
, conn
);
2995 case ISTATE_SEND_REJECT
:
2996 ret
= isert_put_reject(cmd
, conn
);
2998 case ISTATE_SEND_TEXTRSP
:
2999 ret
= isert_put_text_rsp(cmd
, conn
);
3001 case ISTATE_SEND_STATUS
:
3003 * Special case for sending non GOOD SCSI status from TX thread
3004 * context during pre se_cmd excecution failure.
3006 ret
= isert_put_response(conn
, cmd
);
3009 isert_err("Unknown response state: 0x%02x\n", state
);
3018 isert_setup_id(struct isert_np
*isert_np
)
3020 struct iscsi_np
*np
= isert_np
->np
;
3021 struct rdma_cm_id
*id
;
3022 struct sockaddr
*sa
;
3025 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
3026 isert_dbg("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
3028 id
= rdma_create_id(&init_net
, isert_cma_handler
, isert_np
,
3029 RDMA_PS_TCP
, IB_QPT_RC
);
3031 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
3035 isert_dbg("id %p context %p\n", id
, id
->context
);
3037 ret
= rdma_bind_addr(id
, sa
);
3039 isert_err("rdma_bind_addr() failed: %d\n", ret
);
3043 ret
= rdma_listen(id
, 0);
3045 isert_err("rdma_listen() failed: %d\n", ret
);
3051 rdma_destroy_id(id
);
3053 return ERR_PTR(ret
);
3057 isert_setup_np(struct iscsi_np
*np
,
3058 struct sockaddr_storage
*ksockaddr
)
3060 struct isert_np
*isert_np
;
3061 struct rdma_cm_id
*isert_lid
;
3064 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
3066 isert_err("Unable to allocate struct isert_np\n");
3069 sema_init(&isert_np
->sem
, 0);
3070 mutex_init(&isert_np
->mutex
);
3071 INIT_LIST_HEAD(&isert_np
->accepted
);
3072 INIT_LIST_HEAD(&isert_np
->pending
);
3076 * Setup the np->np_sockaddr from the passed sockaddr setup
3077 * in iscsi_target_configfs.c code..
3079 memcpy(&np
->np_sockaddr
, ksockaddr
,
3080 sizeof(struct sockaddr_storage
));
3082 isert_lid
= isert_setup_id(isert_np
);
3083 if (IS_ERR(isert_lid
)) {
3084 ret
= PTR_ERR(isert_lid
);
3088 isert_np
->cm_id
= isert_lid
;
3089 np
->np_context
= isert_np
;
3100 isert_rdma_accept(struct isert_conn
*isert_conn
)
3102 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3103 struct rdma_conn_param cp
;
3105 struct iser_cm_hdr rsp_hdr
;
3107 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
3108 cp
.initiator_depth
= isert_conn
->initiator_depth
;
3110 cp
.rnr_retry_count
= 7;
3112 memset(&rsp_hdr
, 0, sizeof(rsp_hdr
));
3113 rsp_hdr
.flags
= ISERT_ZBVA_NOT_USED
;
3114 if (!isert_conn
->snd_w_inv
)
3115 rsp_hdr
.flags
= rsp_hdr
.flags
| ISERT_SEND_W_INV_NOT_USED
;
3116 cp
.private_data
= (void *)&rsp_hdr
;
3117 cp
.private_data_len
= sizeof(rsp_hdr
);
3119 ret
= rdma_accept(cm_id
, &cp
);
3121 isert_err("rdma_accept() failed with: %d\n", ret
);
3129 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
3131 struct isert_conn
*isert_conn
= conn
->context
;
3134 isert_info("before login_req comp conn: %p\n", isert_conn
);
3135 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
3137 isert_err("isert_conn %p interrupted before got login req\n",
3141 reinit_completion(&isert_conn
->login_req_comp
);
3144 * For login requests after the first PDU, isert_rx_login_req() will
3145 * kick schedule_delayed_work(&conn->login_work) as the packet is
3146 * received, which turns this callback from iscsi_target_do_login_rx()
3149 if (!login
->first_request
)
3152 isert_rx_login_req(isert_conn
);
3154 isert_info("before login_comp conn: %p\n", conn
);
3155 ret
= wait_for_completion_interruptible(&isert_conn
->login_comp
);
3159 isert_info("processing login->req: %p\n", login
->req
);
3165 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
3166 struct isert_conn
*isert_conn
)
3168 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3169 struct rdma_route
*cm_route
= &cm_id
->route
;
3171 conn
->login_family
= np
->np_sockaddr
.ss_family
;
3173 conn
->login_sockaddr
= cm_route
->addr
.dst_addr
;
3174 conn
->local_sockaddr
= cm_route
->addr
.src_addr
;
3178 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
3180 struct isert_np
*isert_np
= np
->np_context
;
3181 struct isert_conn
*isert_conn
;
3185 ret
= down_interruptible(&isert_np
->sem
);
3189 spin_lock_bh(&np
->np_thread_lock
);
3190 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
3191 spin_unlock_bh(&np
->np_thread_lock
);
3192 isert_dbg("np_thread_state %d\n",
3193 np
->np_thread_state
);
3195 * No point in stalling here when np_thread
3196 * is in state RESET/SHUTDOWN/EXIT - bail
3200 spin_unlock_bh(&np
->np_thread_lock
);
3202 mutex_lock(&isert_np
->mutex
);
3203 if (list_empty(&isert_np
->pending
)) {
3204 mutex_unlock(&isert_np
->mutex
);
3207 isert_conn
= list_first_entry(&isert_np
->pending
,
3208 struct isert_conn
, node
);
3209 list_del_init(&isert_conn
->node
);
3210 mutex_unlock(&isert_np
->mutex
);
3212 conn
->context
= isert_conn
;
3213 isert_conn
->conn
= conn
;
3215 isert_set_conn_info(np
, conn
, isert_conn
);
3217 isert_dbg("Processing isert_conn: %p\n", isert_conn
);
3223 isert_free_np(struct iscsi_np
*np
)
3225 struct isert_np
*isert_np
= np
->np_context
;
3226 struct isert_conn
*isert_conn
, *n
;
3228 if (isert_np
->cm_id
)
3229 rdma_destroy_id(isert_np
->cm_id
);
3232 * FIXME: At this point we don't have a good way to insure
3233 * that at this point we don't have hanging connections that
3234 * completed RDMA establishment but didn't start iscsi login
3235 * process. So work-around this by cleaning up what ever piled
3236 * up in accepted and pending lists.
3238 mutex_lock(&isert_np
->mutex
);
3239 if (!list_empty(&isert_np
->pending
)) {
3240 isert_info("Still have isert pending connections\n");
3241 list_for_each_entry_safe(isert_conn
, n
,
3244 isert_info("cleaning isert_conn %p state (%d)\n",
3245 isert_conn
, isert_conn
->state
);
3246 isert_connect_release(isert_conn
);
3250 if (!list_empty(&isert_np
->accepted
)) {
3251 isert_info("Still have isert accepted connections\n");
3252 list_for_each_entry_safe(isert_conn
, n
,
3253 &isert_np
->accepted
,
3255 isert_info("cleaning isert_conn %p state (%d)\n",
3256 isert_conn
, isert_conn
->state
);
3257 isert_connect_release(isert_conn
);
3260 mutex_unlock(&isert_np
->mutex
);
3262 np
->np_context
= NULL
;
3266 static void isert_release_work(struct work_struct
*work
)
3268 struct isert_conn
*isert_conn
= container_of(work
,
3272 isert_info("Starting release conn %p\n", isert_conn
);
3274 wait_for_completion(&isert_conn
->wait
);
3276 mutex_lock(&isert_conn
->mutex
);
3277 isert_conn
->state
= ISER_CONN_DOWN
;
3278 mutex_unlock(&isert_conn
->mutex
);
3280 isert_info("Destroying conn %p\n", isert_conn
);
3281 isert_put_conn(isert_conn
);
3285 isert_wait4logout(struct isert_conn
*isert_conn
)
3287 struct iscsi_conn
*conn
= isert_conn
->conn
;
3289 isert_info("conn %p\n", isert_conn
);
3291 if (isert_conn
->logout_posted
) {
3292 isert_info("conn %p wait for conn_logout_comp\n", isert_conn
);
3293 wait_for_completion_timeout(&conn
->conn_logout_comp
,
3294 SECONDS_FOR_LOGOUT_COMP
* HZ
);
3299 isert_wait4cmds(struct iscsi_conn
*conn
)
3301 isert_info("iscsi_conn %p\n", conn
);
3304 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
3305 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
3310 isert_wait4flush(struct isert_conn
*isert_conn
)
3312 struct ib_recv_wr
*bad_wr
;
3314 isert_info("conn %p\n", isert_conn
);
3316 init_completion(&isert_conn
->wait_comp_err
);
3317 isert_conn
->beacon
.wr_id
= ISER_BEACON_WRID
;
3318 /* post an indication that all flush errors were consumed */
3319 if (ib_post_recv(isert_conn
->qp
, &isert_conn
->beacon
, &bad_wr
)) {
3320 isert_err("conn %p failed to post beacon", isert_conn
);
3324 wait_for_completion(&isert_conn
->wait_comp_err
);
3328 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3329 * unsolicitate dataout
3330 * @conn: iscsi connection
3332 * We might still have commands that are waiting for unsolicited
3333 * dataouts messages. We must put the extra reference on those
3334 * before blocking on the target_wait_for_session_cmds
3337 isert_put_unsol_pending_cmds(struct iscsi_conn
*conn
)
3339 struct iscsi_cmd
*cmd
, *tmp
;
3340 static LIST_HEAD(drop_cmd_list
);
3342 spin_lock_bh(&conn
->cmd_lock
);
3343 list_for_each_entry_safe(cmd
, tmp
, &conn
->conn_cmd_list
, i_conn_node
) {
3344 if ((cmd
->cmd_flags
& ICF_NON_IMMEDIATE_UNSOLICITED_DATA
) &&
3345 (cmd
->write_data_done
< conn
->sess
->sess_ops
->FirstBurstLength
) &&
3346 (cmd
->write_data_done
< cmd
->se_cmd
.data_length
))
3347 list_move_tail(&cmd
->i_conn_node
, &drop_cmd_list
);
3349 spin_unlock_bh(&conn
->cmd_lock
);
3351 list_for_each_entry_safe(cmd
, tmp
, &drop_cmd_list
, i_conn_node
) {
3352 list_del_init(&cmd
->i_conn_node
);
3353 if (cmd
->i_state
!= ISTATE_REMOVE
) {
3354 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
3356 isert_info("conn %p dropping cmd %p\n", conn
, cmd
);
3357 isert_put_cmd(isert_cmd
, true);
3362 static void isert_wait_conn(struct iscsi_conn
*conn
)
3364 struct isert_conn
*isert_conn
= conn
->context
;
3366 isert_info("Starting conn %p\n", isert_conn
);
3368 mutex_lock(&isert_conn
->mutex
);
3370 * Only wait for wait_comp_err if the isert_conn made it
3371 * into full feature phase..
3373 if (isert_conn
->state
== ISER_CONN_INIT
) {
3374 mutex_unlock(&isert_conn
->mutex
);
3377 isert_conn_terminate(isert_conn
);
3378 mutex_unlock(&isert_conn
->mutex
);
3380 isert_wait4flush(isert_conn
);
3381 isert_put_unsol_pending_cmds(conn
);
3382 isert_wait4cmds(conn
);
3383 isert_wait4logout(isert_conn
);
3385 queue_work(isert_release_wq
, &isert_conn
->release_work
);
3388 static void isert_free_conn(struct iscsi_conn
*conn
)
3390 struct isert_conn
*isert_conn
= conn
->context
;
3392 isert_wait4flush(isert_conn
);
3393 isert_put_conn(isert_conn
);
3396 static struct iscsit_transport iser_target_transport
= {
3398 .transport_type
= ISCSI_INFINIBAND
,
3399 .priv_size
= sizeof(struct isert_cmd
),
3400 .owner
= THIS_MODULE
,
3401 .iscsit_setup_np
= isert_setup_np
,
3402 .iscsit_accept_np
= isert_accept_np
,
3403 .iscsit_free_np
= isert_free_np
,
3404 .iscsit_wait_conn
= isert_wait_conn
,
3405 .iscsit_free_conn
= isert_free_conn
,
3406 .iscsit_get_login_rx
= isert_get_login_rx
,
3407 .iscsit_put_login_tx
= isert_put_login_tx
,
3408 .iscsit_immediate_queue
= isert_immediate_queue
,
3409 .iscsit_response_queue
= isert_response_queue
,
3410 .iscsit_get_dataout
= isert_get_dataout
,
3411 .iscsit_queue_data_in
= isert_put_datain
,
3412 .iscsit_queue_status
= isert_put_response
,
3413 .iscsit_aborted_task
= isert_aborted_task
,
3414 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
3417 static int __init
isert_init(void)
3421 isert_comp_wq
= alloc_workqueue("isert_comp_wq",
3422 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
3423 if (!isert_comp_wq
) {
3424 isert_err("Unable to allocate isert_comp_wq\n");
3429 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
3430 WQ_UNBOUND_MAX_ACTIVE
);
3431 if (!isert_release_wq
) {
3432 isert_err("Unable to allocate isert_release_wq\n");
3434 goto destroy_comp_wq
;
3437 iscsit_register_transport(&iser_target_transport
);
3438 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3443 destroy_workqueue(isert_comp_wq
);
3448 static void __exit
isert_exit(void)
3450 flush_scheduled_work();
3451 destroy_workqueue(isert_release_wq
);
3452 destroy_workqueue(isert_comp_wq
);
3453 iscsit_unregister_transport(&iser_target_transport
);
3454 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3457 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3458 MODULE_VERSION("1.0");
3459 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3460 MODULE_LICENSE("GPL");
3462 module_init(isert_init
);
3463 module_exit(isert_exit
);