2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 - 2006 Mike Christie
7 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
8 * maintained by open-iscsi@googlegroups.com
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published
12 * by the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * See the file COPYING included with this distribution for more details.
29 #include <linux/types.h>
30 #include <linux/list.h>
31 #include <linux/inet.h>
32 #include <linux/file.h>
33 #include <linux/blkdev.h>
34 #include <linux/crypto.h>
35 #include <linux/delay.h>
36 #include <linux/kfifo.h>
37 #include <linux/scatterlist.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_transport_iscsi.h>
45 #include "iscsi_tcp.h"
47 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
48 "Alex Aizman <itn780@yahoo.com>");
49 MODULE_DESCRIPTION("iSCSI/TCP data-path");
50 MODULE_LICENSE("GPL");
55 #define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
57 #define debug_tcp(fmt...)
67 static unsigned int iscsi_max_lun
= 512;
68 module_param_named(max_lun
, iscsi_max_lun
, uint
, S_IRUGO
);
70 static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn
*tcp_conn
,
71 struct iscsi_chunk
*chunk
);
74 iscsi_buf_init_iov(struct iscsi_buf
*ibuf
, char *vbuf
, int size
)
76 ibuf
->sg
.page
= virt_to_page(vbuf
);
77 ibuf
->sg
.offset
= offset_in_page(vbuf
);
78 ibuf
->sg
.length
= size
;
80 ibuf
->use_sendmsg
= 1;
84 iscsi_buf_init_sg(struct iscsi_buf
*ibuf
, struct scatterlist
*sg
)
86 ibuf
->sg
.page
= sg
->page
;
87 ibuf
->sg
.offset
= sg
->offset
;
88 ibuf
->sg
.length
= sg
->length
;
90 * Fastpath: sg element fits into single page
92 if (sg
->length
+ sg
->offset
<= PAGE_SIZE
&& !PageSlab(sg
->page
))
93 ibuf
->use_sendmsg
= 0;
95 ibuf
->use_sendmsg
= 1;
100 iscsi_buf_left(struct iscsi_buf
*ibuf
)
104 rc
= ibuf
->sg
.length
- ibuf
->sent
;
110 iscsi_hdr_digest(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
113 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
115 crypto_hash_digest(&tcp_conn
->tx_hash
, &buf
->sg
, buf
->sg
.length
, crc
);
116 buf
->sg
.length
+= ISCSI_DIGEST_SIZE
;
120 * Scatterlist handling: inside the iscsi_chunk, we
121 * remember an index into the scatterlist, and set data/size
122 * to the current scatterlist entry. For highmem pages, we
125 * Note that the page is unmapped when we return from
126 * TCP's data_ready handler, so we may end up mapping and
127 * unmapping the same page repeatedly. The whole reason
128 * for this is that we shouldn't keep the page mapped
129 * outside the softirq.
133 * iscsi_tcp_chunk_init_sg - init indicated scatterlist entry
134 * @chunk: the buffer object
135 * @idx: index into scatterlist
136 * @offset: byte offset into that sg entry
138 * This function sets up the chunk so that subsequent
139 * data is copied to the indicated sg entry, at the given
143 iscsi_tcp_chunk_init_sg(struct iscsi_chunk
*chunk
,
144 unsigned int idx
, unsigned int offset
)
146 struct scatterlist
*sg
;
148 BUG_ON(chunk
->sg
== NULL
);
150 sg
= &chunk
->sg
[idx
];
151 chunk
->sg_index
= idx
;
152 chunk
->sg_offset
= offset
;
153 chunk
->size
= min(sg
->length
- offset
, chunk
->total_size
);
158 * iscsi_tcp_chunk_map - map the current S/G page
159 * @chunk: iscsi chunk
161 * We only need to possibly kmap data if scatter lists are being used,
162 * because the iscsi passthrough and internal IO paths will never use high
166 iscsi_tcp_chunk_map(struct iscsi_chunk
*chunk
)
168 struct scatterlist
*sg
;
170 if (chunk
->data
!= NULL
|| !chunk
->sg
)
173 sg
= &chunk
->sg
[chunk
->sg_index
];
174 BUG_ON(chunk
->sg_mapped
);
175 BUG_ON(sg
->length
== 0);
176 chunk
->sg_mapped
= kmap_atomic(sg
->page
, KM_SOFTIRQ0
);
177 chunk
->data
= chunk
->sg_mapped
+ sg
->offset
+ chunk
->sg_offset
;
181 iscsi_tcp_chunk_unmap(struct iscsi_chunk
*chunk
)
183 if (chunk
->sg_mapped
) {
184 kunmap_atomic(chunk
->sg_mapped
, KM_SOFTIRQ0
);
185 chunk
->sg_mapped
= NULL
;
191 * Splice the digest buffer into the buffer
194 iscsi_tcp_chunk_splice_digest(struct iscsi_chunk
*chunk
, void *digest
)
196 chunk
->data
= digest
;
197 chunk
->digest_len
= ISCSI_DIGEST_SIZE
;
198 chunk
->total_size
+= ISCSI_DIGEST_SIZE
;
199 chunk
->size
= ISCSI_DIGEST_SIZE
;
207 * iscsi_tcp_chunk_done - check whether the chunk is complete
208 * @chunk: iscsi chunk to check
210 * Check if we're done receiving this chunk. If the receive
211 * buffer is full but we expect more data, move on to the
212 * next entry in the scatterlist.
214 * If the amount of data we received isn't a multiple of 4,
215 * we will transparently receive the pad bytes, too.
217 * This function must be re-entrant.
220 iscsi_tcp_chunk_done(struct iscsi_chunk
*chunk
)
222 static unsigned char padbuf
[ISCSI_PAD_LEN
];
225 if (chunk
->copied
< chunk
->size
) {
226 iscsi_tcp_chunk_map(chunk
);
230 chunk
->total_copied
+= chunk
->copied
;
234 /* Unmap the current scatterlist page, if there is one. */
235 iscsi_tcp_chunk_unmap(chunk
);
237 /* Do we have more scatterlist entries? */
238 if (chunk
->total_copied
< chunk
->total_size
) {
239 /* Proceed to the next entry in the scatterlist. */
240 iscsi_tcp_chunk_init_sg(chunk
, chunk
->sg_index
+ 1, 0);
241 iscsi_tcp_chunk_map(chunk
);
242 BUG_ON(chunk
->size
== 0);
246 /* Do we need to handle padding? */
247 pad
= iscsi_padding(chunk
->total_copied
);
249 debug_tcp("consume %d pad bytes\n", pad
);
250 chunk
->total_size
+= pad
;
252 chunk
->data
= padbuf
;
257 * Set us up for receiving the data digest. hdr digest
258 * is completely handled in hdr done function.
261 if (chunk
->digest_len
== 0) {
262 crypto_hash_final(chunk
->hash
, chunk
->digest
);
263 iscsi_tcp_chunk_splice_digest(chunk
,
273 * iscsi_tcp_chunk_recv - copy data to chunk
274 * @tcp_conn: the iSCSI TCP connection
275 * @chunk: the buffer to copy to
277 * @len: amount of data available
279 * This function copies up to @len bytes to the
280 * given buffer, and returns the number of bytes
281 * consumed, which can actually be less than @len.
283 * If hash digest is enabled, the function will update the
284 * hash while copying.
285 * Combining these two operations doesn't buy us a lot (yet),
286 * but in the future we could implement combined copy+crc,
287 * just way we do for network layer checksums.
290 iscsi_tcp_chunk_recv(struct iscsi_tcp_conn
*tcp_conn
,
291 struct iscsi_chunk
*chunk
, const void *ptr
,
294 struct scatterlist sg
;
295 unsigned int copy
, copied
= 0;
297 while (!iscsi_tcp_chunk_done(chunk
)) {
301 copy
= min(len
- copied
, chunk
->size
- chunk
->copied
);
302 memcpy(chunk
->data
+ chunk
->copied
, ptr
+ copied
, copy
);
305 sg_init_one(&sg
, ptr
+ copied
, copy
);
306 crypto_hash_update(chunk
->hash
, &sg
, copy
);
308 chunk
->copied
+= copy
;
317 iscsi_tcp_dgst_header(struct hash_desc
*hash
, const void *hdr
, size_t hdrlen
,
318 unsigned char digest
[ISCSI_DIGEST_SIZE
])
320 struct scatterlist sg
;
322 sg_init_one(&sg
, hdr
, hdrlen
);
323 crypto_hash_digest(hash
, &sg
, hdrlen
, digest
);
327 iscsi_tcp_dgst_verify(struct iscsi_tcp_conn
*tcp_conn
,
328 struct iscsi_chunk
*chunk
)
330 if (!chunk
->digest_len
)
333 if (memcmp(chunk
->recv_digest
, chunk
->digest
, chunk
->digest_len
)) {
334 debug_scsi("digest mismatch\n");
342 * Helper function to set up chunk buffer
345 __iscsi_chunk_init(struct iscsi_chunk
*chunk
, size_t size
,
346 iscsi_chunk_done_fn_t
*done
, struct hash_desc
*hash
)
348 memset(chunk
, 0, sizeof(*chunk
));
349 chunk
->total_size
= size
;
354 crypto_hash_init(hash
);
359 iscsi_chunk_init_linear(struct iscsi_chunk
*chunk
, void *data
, size_t size
,
360 iscsi_chunk_done_fn_t
*done
, struct hash_desc
*hash
)
362 __iscsi_chunk_init(chunk
, size
, done
, hash
);
368 iscsi_chunk_seek_sg(struct iscsi_chunk
*chunk
,
369 struct scatterlist
*sg
, unsigned int sg_count
,
370 unsigned int offset
, size_t size
,
371 iscsi_chunk_done_fn_t
*done
, struct hash_desc
*hash
)
375 __iscsi_chunk_init(chunk
, size
, done
, hash
);
376 for (i
= 0; i
< sg_count
; ++i
) {
377 if (offset
< sg
[i
].length
) {
379 chunk
->sg_count
= sg_count
;
380 iscsi_tcp_chunk_init_sg(chunk
, i
, offset
);
383 offset
-= sg
[i
].length
;
386 return ISCSI_ERR_DATA_OFFSET
;
390 * iscsi_tcp_hdr_recv_prep - prep chunk for hdr reception
391 * @tcp_conn: iscsi connection to prep for
393 * This function always passes NULL for the hash argument, because when this
394 * function is called we do not yet know the final size of the header and want
395 * to delay the digest processing until we know that.
398 iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn
*tcp_conn
)
400 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn
,
401 tcp_conn
->iscsi_conn
->hdrdgst_en
? ", digest enabled" : "");
402 iscsi_chunk_init_linear(&tcp_conn
->in
.chunk
,
403 tcp_conn
->in
.hdr_buf
, sizeof(struct iscsi_hdr
),
404 iscsi_tcp_hdr_recv_done
, NULL
);
408 * Handle incoming reply to any other type of command
411 iscsi_tcp_data_recv_done(struct iscsi_tcp_conn
*tcp_conn
,
412 struct iscsi_chunk
*chunk
)
414 struct iscsi_conn
*conn
= tcp_conn
->iscsi_conn
;
417 if (!iscsi_tcp_dgst_verify(tcp_conn
, chunk
))
418 return ISCSI_ERR_DATA_DGST
;
420 rc
= iscsi_complete_pdu(conn
, tcp_conn
->in
.hdr
,
421 conn
->data
, tcp_conn
->in
.datalen
);
425 iscsi_tcp_hdr_recv_prep(tcp_conn
);
430 iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn
*tcp_conn
)
432 struct iscsi_conn
*conn
= tcp_conn
->iscsi_conn
;
433 struct hash_desc
*rx_hash
= NULL
;
435 if (conn
->datadgst_en
)
436 rx_hash
= &tcp_conn
->rx_hash
;
438 iscsi_chunk_init_linear(&tcp_conn
->in
.chunk
,
439 conn
->data
, tcp_conn
->in
.datalen
,
440 iscsi_tcp_data_recv_done
, rx_hash
);
444 * must be called with session lock
447 iscsi_tcp_cleanup_ctask(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
449 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
450 struct iscsi_r2t_info
*r2t
;
451 struct scsi_cmnd
*sc
;
453 /* flush ctask's r2t queues */
454 while (__kfifo_get(tcp_ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*))) {
455 __kfifo_put(tcp_ctask
->r2tpool
.queue
, (void*)&r2t
,
457 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
464 tcp_ctask
->xmstate
= XMSTATE_IDLE
;
465 tcp_ctask
->r2t
= NULL
;
469 * iscsi_data_rsp - SCSI Data-In Response processing
470 * @conn: iscsi connection
471 * @ctask: scsi command task
474 iscsi_data_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
476 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
477 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
478 struct iscsi_data_rsp
*rhdr
= (struct iscsi_data_rsp
*)tcp_conn
->in
.hdr
;
479 struct iscsi_session
*session
= conn
->session
;
480 struct scsi_cmnd
*sc
= ctask
->sc
;
481 int datasn
= be32_to_cpu(rhdr
->datasn
);
483 iscsi_update_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
485 * setup Data-In byte counter (gets decremented..)
487 ctask
->data_count
= tcp_conn
->in
.datalen
;
489 if (tcp_conn
->in
.datalen
== 0)
492 if (tcp_ctask
->exp_datasn
!= datasn
) {
493 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
494 __FUNCTION__
, tcp_ctask
->exp_datasn
, datasn
);
495 return ISCSI_ERR_DATASN
;
498 tcp_ctask
->exp_datasn
++;
500 tcp_ctask
->data_offset
= be32_to_cpu(rhdr
->offset
);
501 if (tcp_ctask
->data_offset
+ tcp_conn
->in
.datalen
> scsi_bufflen(sc
)) {
502 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
503 __FUNCTION__
, tcp_ctask
->data_offset
,
504 tcp_conn
->in
.datalen
, scsi_bufflen(sc
));
505 return ISCSI_ERR_DATA_OFFSET
;
508 if (rhdr
->flags
& ISCSI_FLAG_DATA_STATUS
) {
509 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
510 conn
->exp_statsn
= be32_to_cpu(rhdr
->statsn
) + 1;
511 if (rhdr
->flags
& (ISCSI_FLAG_DATA_UNDERFLOW
|
512 ISCSI_FLAG_DATA_OVERFLOW
)) {
513 int res_count
= be32_to_cpu(rhdr
->residual_count
);
516 (rhdr
->flags
& ISCSI_FLAG_CMD_OVERFLOW
||
517 res_count
<= scsi_bufflen(sc
)))
518 scsi_set_resid(sc
, res_count
);
520 sc
->result
= (DID_BAD_TARGET
<< 16) |
525 conn
->datain_pdus_cnt
++;
530 * iscsi_solicit_data_init - initialize first Data-Out
531 * @conn: iscsi connection
532 * @ctask: scsi command task
536 * Initialize first Data-Out within this R2T sequence and finds
537 * proper data_offset within this SCSI command.
539 * This function is called with connection lock taken.
542 iscsi_solicit_data_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
543 struct iscsi_r2t_info
*r2t
)
545 struct iscsi_data
*hdr
;
546 struct scsi_cmnd
*sc
= ctask
->sc
;
548 struct scatterlist
*sg
;
550 hdr
= &r2t
->dtask
.hdr
;
551 memset(hdr
, 0, sizeof(struct iscsi_data
));
553 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
554 r2t
->solicit_datasn
++;
555 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
556 memcpy(hdr
->lun
, ctask
->hdr
->lun
, sizeof(hdr
->lun
));
557 hdr
->itt
= ctask
->hdr
->itt
;
558 hdr
->exp_statsn
= r2t
->exp_statsn
;
559 hdr
->offset
= cpu_to_be32(r2t
->data_offset
);
560 if (r2t
->data_length
> conn
->max_xmit_dlength
) {
561 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
562 r2t
->data_count
= conn
->max_xmit_dlength
;
565 hton24(hdr
->dlength
, r2t
->data_length
);
566 r2t
->data_count
= r2t
->data_length
;
567 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
569 conn
->dataout_pdus_cnt
++;
573 iscsi_buf_init_iov(&r2t
->headbuf
, (char*)hdr
,
574 sizeof(struct iscsi_hdr
));
576 sg
= scsi_sglist(sc
);
578 for (i
= 0; i
< scsi_sg_count(sc
); i
++, sg
+= 1) {
579 /* FIXME: prefetch ? */
580 if (sg_count
+ sg
->length
> r2t
->data_offset
) {
585 /* offset within this page */
586 page_offset
= r2t
->data_offset
- sg_count
;
588 /* fill in this buffer */
589 iscsi_buf_init_sg(&r2t
->sendbuf
, sg
);
590 r2t
->sendbuf
.sg
.offset
+= page_offset
;
591 r2t
->sendbuf
.sg
.length
-= page_offset
;
593 /* xmit logic will continue with next one */
597 sg_count
+= sg
->length
;
599 BUG_ON(r2t
->sg
== NULL
);
603 * iscsi_r2t_rsp - iSCSI R2T Response processing
604 * @conn: iscsi connection
605 * @ctask: scsi command task
608 iscsi_r2t_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
610 struct iscsi_r2t_info
*r2t
;
611 struct iscsi_session
*session
= conn
->session
;
612 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
613 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
614 struct iscsi_r2t_rsp
*rhdr
= (struct iscsi_r2t_rsp
*)tcp_conn
->in
.hdr
;
615 int r2tsn
= be32_to_cpu(rhdr
->r2tsn
);
618 if (tcp_conn
->in
.datalen
) {
619 printk(KERN_ERR
"iscsi_tcp: invalid R2t with datalen %d\n",
620 tcp_conn
->in
.datalen
);
621 return ISCSI_ERR_DATALEN
;
624 if (tcp_ctask
->exp_datasn
!= r2tsn
){
625 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
626 __FUNCTION__
, tcp_ctask
->exp_datasn
, r2tsn
);
627 return ISCSI_ERR_R2TSN
;
630 /* fill-in new R2T associated with the task */
631 spin_lock(&session
->lock
);
632 iscsi_update_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
634 if (!ctask
->sc
|| session
->state
!= ISCSI_STATE_LOGGED_IN
) {
635 printk(KERN_INFO
"iscsi_tcp: dropping R2T itt %d in "
636 "recovery...\n", ctask
->itt
);
637 spin_unlock(&session
->lock
);
641 rc
= __kfifo_get(tcp_ctask
->r2tpool
.queue
, (void*)&r2t
, sizeof(void*));
644 r2t
->exp_statsn
= rhdr
->statsn
;
645 r2t
->data_length
= be32_to_cpu(rhdr
->data_length
);
646 if (r2t
->data_length
== 0) {
647 printk(KERN_ERR
"iscsi_tcp: invalid R2T with zero data len\n");
648 spin_unlock(&session
->lock
);
649 return ISCSI_ERR_DATALEN
;
652 if (r2t
->data_length
> session
->max_burst
)
653 debug_scsi("invalid R2T with data len %u and max burst %u."
654 "Attempting to execute request.\n",
655 r2t
->data_length
, session
->max_burst
);
657 r2t
->data_offset
= be32_to_cpu(rhdr
->data_offset
);
658 if (r2t
->data_offset
+ r2t
->data_length
> scsi_bufflen(ctask
->sc
)) {
659 spin_unlock(&session
->lock
);
660 printk(KERN_ERR
"iscsi_tcp: invalid R2T with data len %u at "
661 "offset %u and total length %d\n", r2t
->data_length
,
662 r2t
->data_offset
, scsi_bufflen(ctask
->sc
));
663 return ISCSI_ERR_DATALEN
;
666 r2t
->ttt
= rhdr
->ttt
; /* no flip */
667 r2t
->solicit_datasn
= 0;
669 iscsi_solicit_data_init(conn
, ctask
, r2t
);
671 tcp_ctask
->exp_datasn
= r2tsn
+ 1;
672 __kfifo_put(tcp_ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*));
673 tcp_ctask
->xmstate
|= XMSTATE_SOL_HDR_INIT
;
674 conn
->r2t_pdus_cnt
++;
676 iscsi_requeue_ctask(ctask
);
677 spin_unlock(&session
->lock
);
683 * Handle incoming reply to DataIn command
686 iscsi_tcp_process_data_in(struct iscsi_tcp_conn
*tcp_conn
,
687 struct iscsi_chunk
*chunk
)
689 struct iscsi_conn
*conn
= tcp_conn
->iscsi_conn
;
690 struct iscsi_hdr
*hdr
= tcp_conn
->in
.hdr
;
693 if (!iscsi_tcp_dgst_verify(tcp_conn
, chunk
))
694 return ISCSI_ERR_DATA_DGST
;
696 /* check for non-exceptional status */
697 if (hdr
->flags
& ISCSI_FLAG_DATA_STATUS
) {
698 rc
= iscsi_complete_pdu(conn
, tcp_conn
->in
.hdr
, NULL
, 0);
703 iscsi_tcp_hdr_recv_prep(tcp_conn
);
708 * iscsi_tcp_hdr_dissect - process PDU header
709 * @conn: iSCSI connection
712 * This function analyzes the header of the PDU received,
713 * and performs several sanity checks. If the PDU is accompanied
714 * by data, the receive buffer is set up to copy the incoming data
715 * to the correct location.
718 iscsi_tcp_hdr_dissect(struct iscsi_conn
*conn
, struct iscsi_hdr
*hdr
)
720 int rc
= 0, opcode
, ahslen
;
721 struct iscsi_session
*session
= conn
->session
;
722 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
723 struct iscsi_cmd_task
*ctask
;
726 /* verify PDU length */
727 tcp_conn
->in
.datalen
= ntoh24(hdr
->dlength
);
728 if (tcp_conn
->in
.datalen
> conn
->max_recv_dlength
) {
729 printk(KERN_ERR
"iscsi_tcp: datalen %d > %d\n",
730 tcp_conn
->in
.datalen
, conn
->max_recv_dlength
);
731 return ISCSI_ERR_DATALEN
;
734 /* Additional header segments. So far, we don't
735 * process additional headers.
737 ahslen
= hdr
->hlength
<< 2;
739 opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
740 /* verify itt (itt encoding: age+cid+itt) */
741 rc
= iscsi_verify_itt(conn
, hdr
, &itt
);
742 if (rc
== ISCSI_ERR_NO_SCSI_CMD
) {
743 /* XXX: what does this do? */
744 tcp_conn
->in
.datalen
= 0; /* force drop */
749 debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
750 opcode
, ahslen
, tcp_conn
->in
.datalen
);
753 case ISCSI_OP_SCSI_DATA_IN
:
754 ctask
= session
->cmds
[itt
];
755 rc
= iscsi_data_rsp(conn
, ctask
);
758 if (tcp_conn
->in
.datalen
) {
759 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
760 struct hash_desc
*rx_hash
= NULL
;
763 * Setup copy of Data-In into the Scsi_Cmnd
765 * We set up the iscsi_chunk to point to the next
766 * scatterlist entry to copy to. As we go along,
767 * we move on to the next scatterlist entry and
768 * update the digest per-entry.
770 if (conn
->datadgst_en
)
771 rx_hash
= &tcp_conn
->rx_hash
;
773 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
774 "datalen=%d)\n", tcp_conn
,
775 tcp_ctask
->data_offset
,
776 tcp_conn
->in
.datalen
);
777 return iscsi_chunk_seek_sg(&tcp_conn
->in
.chunk
,
778 scsi_sglist(ctask
->sc
),
779 scsi_sg_count(ctask
->sc
),
780 tcp_ctask
->data_offset
,
781 tcp_conn
->in
.datalen
,
782 iscsi_tcp_process_data_in
,
786 case ISCSI_OP_SCSI_CMD_RSP
:
787 if (tcp_conn
->in
.datalen
) {
788 iscsi_tcp_data_recv_prep(tcp_conn
);
791 rc
= iscsi_complete_pdu(conn
, hdr
, NULL
, 0);
794 ctask
= session
->cmds
[itt
];
796 rc
= ISCSI_ERR_AHSLEN
;
797 else if (ctask
->sc
->sc_data_direction
== DMA_TO_DEVICE
)
798 rc
= iscsi_r2t_rsp(conn
, ctask
);
800 rc
= ISCSI_ERR_PROTO
;
802 case ISCSI_OP_LOGIN_RSP
:
803 case ISCSI_OP_TEXT_RSP
:
804 case ISCSI_OP_REJECT
:
805 case ISCSI_OP_ASYNC_EVENT
:
807 * It is possible that we could get a PDU with a buffer larger
808 * than 8K, but there are no targets that currently do this.
809 * For now we fail until we find a vendor that needs it
811 if (ISCSI_DEF_MAX_RECV_SEG_LEN
< tcp_conn
->in
.datalen
) {
812 printk(KERN_ERR
"iscsi_tcp: received buffer of len %u "
813 "but conn buffer is only %u (opcode %0x)\n",
814 tcp_conn
->in
.datalen
,
815 ISCSI_DEF_MAX_RECV_SEG_LEN
, opcode
);
816 rc
= ISCSI_ERR_PROTO
;
820 /* If there's data coming in with the response,
821 * receive it to the connection's buffer.
823 if (tcp_conn
->in
.datalen
) {
824 iscsi_tcp_data_recv_prep(tcp_conn
);
828 case ISCSI_OP_LOGOUT_RSP
:
829 case ISCSI_OP_NOOP_IN
:
830 case ISCSI_OP_SCSI_TMFUNC_RSP
:
831 rc
= iscsi_complete_pdu(conn
, hdr
, NULL
, 0);
834 rc
= ISCSI_ERR_BAD_OPCODE
;
839 /* Anything that comes with data should have
840 * been handled above. */
841 if (tcp_conn
->in
.datalen
)
842 return ISCSI_ERR_PROTO
;
843 iscsi_tcp_hdr_recv_prep(tcp_conn
);
850 partial_sg_digest_update(struct hash_desc
*desc
, struct scatterlist
*sg
,
851 int offset
, int length
)
853 struct scatterlist temp
;
855 sg_init_table(&temp
, 1);
856 sg_set_page(&temp
, sg_page(sg
), length
, offset
);
857 crypto_hash_update(desc
, &temp
, length
);
861 * iscsi_tcp_hdr_recv_done - process PDU header
863 * This is the callback invoked when the PDU header has
864 * been received. If the header is followed by additional
865 * header segments, we go back for more data.
868 iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn
*tcp_conn
,
869 struct iscsi_chunk
*chunk
)
871 struct iscsi_conn
*conn
= tcp_conn
->iscsi_conn
;
872 struct iscsi_hdr
*hdr
;
874 /* Check if there are additional header segments
875 * *prior* to computing the digest, because we
876 * may need to go back to the caller for more.
878 hdr
= (struct iscsi_hdr
*) tcp_conn
->in
.hdr_buf
;
879 if (chunk
->copied
== sizeof(struct iscsi_hdr
) && hdr
->hlength
) {
880 /* Bump the header length - the caller will
881 * just loop around and get the AHS for us, and
883 unsigned int ahslen
= hdr
->hlength
<< 2;
885 /* Make sure we don't overflow */
886 if (sizeof(*hdr
) + ahslen
> sizeof(tcp_conn
->in
.hdr_buf
))
887 return ISCSI_ERR_AHSLEN
;
889 chunk
->total_size
+= ahslen
;
890 chunk
->size
+= ahslen
;
894 /* We're done processing the header. See if we're doing
895 * header digests; if so, set up the recv_digest buffer
896 * and go back for more. */
897 if (conn
->hdrdgst_en
) {
898 if (chunk
->digest_len
== 0) {
899 iscsi_tcp_chunk_splice_digest(chunk
,
903 iscsi_tcp_dgst_header(&tcp_conn
->rx_hash
, hdr
,
904 chunk
->total_copied
- ISCSI_DIGEST_SIZE
,
907 if (!iscsi_tcp_dgst_verify(tcp_conn
, chunk
))
908 return ISCSI_ERR_HDR_DGST
;
911 tcp_conn
->in
.hdr
= hdr
;
912 return iscsi_tcp_hdr_dissect(conn
, hdr
);
916 * iscsi_tcp_recv - TCP receive in sendfile fashion
917 * @rd_desc: read descriptor
918 * @skb: socket buffer
919 * @offset: offset in skb
920 * @len: skb->len - offset
923 iscsi_tcp_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
924 unsigned int offset
, size_t len
)
926 struct iscsi_conn
*conn
= rd_desc
->arg
.data
;
927 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
928 struct iscsi_chunk
*chunk
= &tcp_conn
->in
.chunk
;
929 struct skb_seq_state seq
;
930 unsigned int consumed
= 0;
933 debug_tcp("in %d bytes\n", skb
->len
- offset
);
935 if (unlikely(conn
->suspend_rx
)) {
936 debug_tcp("conn %d Rx suspended!\n", conn
->id
);
940 skb_prepare_seq_read(skb
, offset
, skb
->len
, &seq
);
945 avail
= skb_seq_read(consumed
, &ptr
, &seq
);
948 BUG_ON(chunk
->copied
>= chunk
->size
);
950 debug_tcp("skb %p ptr=%p avail=%u\n", skb
, ptr
, avail
);
951 rc
= iscsi_tcp_chunk_recv(tcp_conn
, chunk
, ptr
, avail
);
955 if (chunk
->total_copied
>= chunk
->total_size
) {
956 rc
= chunk
->done(tcp_conn
, chunk
);
958 skb_abort_seq_read(&seq
);
962 /* The done() functions sets up the
967 conn
->rxdata_octets
+= consumed
;
971 debug_tcp("Error receiving PDU, errno=%d\n", rc
);
972 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
977 iscsi_tcp_data_ready(struct sock
*sk
, int flag
)
979 struct iscsi_conn
*conn
= sk
->sk_user_data
;
980 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
981 read_descriptor_t rd_desc
;
983 read_lock(&sk
->sk_callback_lock
);
986 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
987 * We set count to 1 because we want the network layer to
988 * hand us all the skbs that are available. iscsi_tcp_recv
989 * handled pdus that cross buffers or pdus that still need data.
991 rd_desc
.arg
.data
= conn
;
993 tcp_read_sock(sk
, &rd_desc
, iscsi_tcp_recv
);
995 read_unlock(&sk
->sk_callback_lock
);
997 /* If we had to (atomically) map a highmem page,
999 iscsi_tcp_chunk_unmap(&tcp_conn
->in
.chunk
);
1003 iscsi_tcp_state_change(struct sock
*sk
)
1005 struct iscsi_tcp_conn
*tcp_conn
;
1006 struct iscsi_conn
*conn
;
1007 struct iscsi_session
*session
;
1008 void (*old_state_change
)(struct sock
*);
1010 read_lock(&sk
->sk_callback_lock
);
1012 conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1013 session
= conn
->session
;
1015 if ((sk
->sk_state
== TCP_CLOSE_WAIT
||
1016 sk
->sk_state
== TCP_CLOSE
) &&
1017 !atomic_read(&sk
->sk_rmem_alloc
)) {
1018 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1019 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1022 tcp_conn
= conn
->dd_data
;
1023 old_state_change
= tcp_conn
->old_state_change
;
1025 read_unlock(&sk
->sk_callback_lock
);
1027 old_state_change(sk
);
1031 * iscsi_write_space - Called when more output buffer space is available
1032 * @sk: socket space is available for
1035 iscsi_write_space(struct sock
*sk
)
1037 struct iscsi_conn
*conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1038 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1040 tcp_conn
->old_write_space(sk
);
1041 debug_tcp("iscsi_write_space: cid %d\n", conn
->id
);
1042 scsi_queue_work(conn
->session
->host
, &conn
->xmitwork
);
1046 iscsi_conn_set_callbacks(struct iscsi_conn
*conn
)
1048 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1049 struct sock
*sk
= tcp_conn
->sock
->sk
;
1051 /* assign new callbacks */
1052 write_lock_bh(&sk
->sk_callback_lock
);
1053 sk
->sk_user_data
= conn
;
1054 tcp_conn
->old_data_ready
= sk
->sk_data_ready
;
1055 tcp_conn
->old_state_change
= sk
->sk_state_change
;
1056 tcp_conn
->old_write_space
= sk
->sk_write_space
;
1057 sk
->sk_data_ready
= iscsi_tcp_data_ready
;
1058 sk
->sk_state_change
= iscsi_tcp_state_change
;
1059 sk
->sk_write_space
= iscsi_write_space
;
1060 write_unlock_bh(&sk
->sk_callback_lock
);
1064 iscsi_conn_restore_callbacks(struct iscsi_tcp_conn
*tcp_conn
)
1066 struct sock
*sk
= tcp_conn
->sock
->sk
;
1068 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1069 write_lock_bh(&sk
->sk_callback_lock
);
1070 sk
->sk_user_data
= NULL
;
1071 sk
->sk_data_ready
= tcp_conn
->old_data_ready
;
1072 sk
->sk_state_change
= tcp_conn
->old_state_change
;
1073 sk
->sk_write_space
= tcp_conn
->old_write_space
;
1074 sk
->sk_no_check
= 0;
1075 write_unlock_bh(&sk
->sk_callback_lock
);
1079 * iscsi_send - generic send routine
1080 * @sk: kernel's socket
1081 * @buf: buffer to write from
1082 * @size: actual size to write
1083 * @flags: socket's flags
1086 iscsi_send(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
, int size
, int flags
)
1088 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1089 struct socket
*sk
= tcp_conn
->sock
;
1090 int offset
= buf
->sg
.offset
+ buf
->sent
, res
;
1093 * if we got use_sg=0 or are sending something we kmallocd
1094 * then we did not have to do kmap (kmap returns page_address)
1096 * if we got use_sg > 0, but had to drop down, we do not
1097 * set clustering so this should only happen for that
1100 if (buf
->use_sendmsg
)
1101 res
= sock_no_sendpage(sk
, buf
->sg
.page
, offset
, size
, flags
);
1103 res
= tcp_conn
->sendpage(sk
, buf
->sg
.page
, offset
, size
, flags
);
1106 conn
->txdata_octets
+= res
;
1111 tcp_conn
->sendpage_failures_cnt
++;
1115 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1120 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1121 * @conn: iscsi connection
1122 * @buf: buffer to write from
1123 * @datalen: lenght of data to be sent after the header
1129 iscsi_sendhdr(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
, int datalen
)
1131 int flags
= 0; /* MSG_DONTWAIT; */
1134 size
= buf
->sg
.length
- buf
->sent
;
1135 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1136 if (buf
->sent
+ size
!= buf
->sg
.length
|| datalen
)
1139 res
= iscsi_send(conn
, buf
, size
, flags
);
1140 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size
, buf
->sent
, res
);
1151 * iscsi_sendpage - send one page of iSCSI Data-Out.
1152 * @conn: iscsi connection
1153 * @buf: buffer to write from
1154 * @count: remaining data
1155 * @sent: number of bytes sent
1161 iscsi_sendpage(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
1162 int *count
, int *sent
)
1164 int flags
= 0; /* MSG_DONTWAIT; */
1167 size
= buf
->sg
.length
- buf
->sent
;
1168 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1171 if (buf
->sent
+ size
!= buf
->sg
.length
|| *count
!= size
)
1174 res
= iscsi_send(conn
, buf
, size
, flags
);
1175 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1176 size
, buf
->sent
, *count
, *sent
, res
);
1189 iscsi_data_digest_init(struct iscsi_tcp_conn
*tcp_conn
,
1190 struct iscsi_tcp_cmd_task
*tcp_ctask
)
1192 crypto_hash_init(&tcp_conn
->tx_hash
);
1193 tcp_ctask
->digest_count
= 4;
1197 * iscsi_solicit_data_cont - initialize next Data-Out
1198 * @conn: iscsi connection
1199 * @ctask: scsi command task
1201 * @left: bytes left to transfer
1204 * Initialize next Data-Out within this R2T sequence and continue
1205 * to process next Scatter-Gather element(if any) of this SCSI command.
1207 * Called under connection lock.
1210 iscsi_solicit_data_cont(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1211 struct iscsi_r2t_info
*r2t
, int left
)
1213 struct iscsi_data
*hdr
;
1216 hdr
= &r2t
->dtask
.hdr
;
1217 memset(hdr
, 0, sizeof(struct iscsi_data
));
1218 hdr
->ttt
= r2t
->ttt
;
1219 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
1220 r2t
->solicit_datasn
++;
1221 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
1222 memcpy(hdr
->lun
, ctask
->hdr
->lun
, sizeof(hdr
->lun
));
1223 hdr
->itt
= ctask
->hdr
->itt
;
1224 hdr
->exp_statsn
= r2t
->exp_statsn
;
1225 new_offset
= r2t
->data_offset
+ r2t
->sent
;
1226 hdr
->offset
= cpu_to_be32(new_offset
);
1227 if (left
> conn
->max_xmit_dlength
) {
1228 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
1229 r2t
->data_count
= conn
->max_xmit_dlength
;
1231 hton24(hdr
->dlength
, left
);
1232 r2t
->data_count
= left
;
1233 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
1235 conn
->dataout_pdus_cnt
++;
1237 iscsi_buf_init_iov(&r2t
->headbuf
, (char*)hdr
,
1238 sizeof(struct iscsi_hdr
));
1240 if (iscsi_buf_left(&r2t
->sendbuf
))
1243 iscsi_buf_init_sg(&r2t
->sendbuf
, r2t
->sg
);
1247 static void iscsi_set_padding(struct iscsi_tcp_cmd_task
*tcp_ctask
,
1250 tcp_ctask
->pad_count
= len
& (ISCSI_PAD_LEN
- 1);
1251 if (!tcp_ctask
->pad_count
)
1254 tcp_ctask
->pad_count
= ISCSI_PAD_LEN
- tcp_ctask
->pad_count
;
1255 debug_scsi("write padding %d bytes\n", tcp_ctask
->pad_count
);
1256 tcp_ctask
->xmstate
|= XMSTATE_W_PAD
;
1260 * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1261 * @conn: iscsi connection
1262 * @ctask: scsi command task
1266 iscsi_tcp_cmd_init(struct iscsi_cmd_task
*ctask
)
1268 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1270 BUG_ON(__kfifo_len(tcp_ctask
->r2tqueue
));
1271 tcp_ctask
->xmstate
= XMSTATE_CMD_HDR_INIT
;
1275 * iscsi_tcp_mtask_xmit - xmit management(immediate) task
1276 * @conn: iscsi connection
1277 * @mtask: task management task
1280 * The function can return -EAGAIN in which case caller must
1281 * call it again later, or recover. '0' return code means successful
1284 * Management xmit state machine consists of these states:
1285 * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header
1286 * XMSTATE_IMM_HDR - PDU Header xmit in progress
1287 * XMSTATE_IMM_DATA - PDU Data xmit in progress
1288 * XMSTATE_IDLE - management PDU is done
1291 iscsi_tcp_mtask_xmit(struct iscsi_conn
*conn
, struct iscsi_mgmt_task
*mtask
)
1293 struct iscsi_tcp_mgmt_task
*tcp_mtask
= mtask
->dd_data
;
1296 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1297 conn
->id
, tcp_mtask
->xmstate
, mtask
->itt
);
1299 if (tcp_mtask
->xmstate
& XMSTATE_IMM_HDR_INIT
) {
1300 iscsi_buf_init_iov(&tcp_mtask
->headbuf
, (char*)mtask
->hdr
,
1301 sizeof(struct iscsi_hdr
));
1303 if (mtask
->data_count
) {
1304 tcp_mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1305 iscsi_buf_init_iov(&tcp_mtask
->sendbuf
,
1310 if (conn
->c_stage
!= ISCSI_CONN_INITIAL_STAGE
&&
1311 conn
->stop_stage
!= STOP_CONN_RECOVER
&&
1313 iscsi_hdr_digest(conn
, &tcp_mtask
->headbuf
,
1314 (u8
*)tcp_mtask
->hdrext
);
1316 tcp_mtask
->sent
= 0;
1317 tcp_mtask
->xmstate
&= ~XMSTATE_IMM_HDR_INIT
;
1318 tcp_mtask
->xmstate
|= XMSTATE_IMM_HDR
;
1321 if (tcp_mtask
->xmstate
& XMSTATE_IMM_HDR
) {
1322 rc
= iscsi_sendhdr(conn
, &tcp_mtask
->headbuf
,
1326 tcp_mtask
->xmstate
&= ~XMSTATE_IMM_HDR
;
1329 if (tcp_mtask
->xmstate
& XMSTATE_IMM_DATA
) {
1330 BUG_ON(!mtask
->data_count
);
1331 tcp_mtask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1332 /* FIXME: implement.
1333 * Virtual buffer could be spreaded across multiple pages...
1338 rc
= iscsi_sendpage(conn
, &tcp_mtask
->sendbuf
,
1339 &mtask
->data_count
, &tcp_mtask
->sent
);
1341 tcp_mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1344 } while (mtask
->data_count
);
1347 BUG_ON(tcp_mtask
->xmstate
!= XMSTATE_IDLE
);
1348 if (mtask
->hdr
->itt
== RESERVED_ITT
) {
1349 struct iscsi_session
*session
= conn
->session
;
1351 spin_lock_bh(&session
->lock
);
1352 list_del(&conn
->mtask
->running
);
1353 __kfifo_put(session
->mgmtpool
.queue
, (void*)&conn
->mtask
,
1355 spin_unlock_bh(&session
->lock
);
1361 iscsi_send_cmd_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1363 struct scsi_cmnd
*sc
= ctask
->sc
;
1364 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1367 if (tcp_ctask
->xmstate
& XMSTATE_CMD_HDR_INIT
) {
1368 tcp_ctask
->sent
= 0;
1369 tcp_ctask
->sg_count
= 0;
1370 tcp_ctask
->exp_datasn
= 0;
1372 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
1373 struct scatterlist
*sg
= scsi_sglist(sc
);
1375 iscsi_buf_init_sg(&tcp_ctask
->sendbuf
, sg
);
1376 tcp_ctask
->sg
= sg
+ 1;
1377 tcp_ctask
->bad_sg
= sg
+ scsi_sg_count(sc
);
1379 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1380 "unsol count %d, unsol offset %d]\n",
1381 ctask
->itt
, scsi_bufflen(sc
),
1382 ctask
->imm_count
, ctask
->unsol_count
,
1383 ctask
->unsol_offset
);
1386 iscsi_buf_init_iov(&tcp_ctask
->headbuf
, (char*)ctask
->hdr
,
1389 if (conn
->hdrdgst_en
)
1390 iscsi_hdr_digest(conn
, &tcp_ctask
->headbuf
,
1391 iscsi_next_hdr(ctask
));
1392 tcp_ctask
->xmstate
&= ~XMSTATE_CMD_HDR_INIT
;
1393 tcp_ctask
->xmstate
|= XMSTATE_CMD_HDR_XMIT
;
1396 if (tcp_ctask
->xmstate
& XMSTATE_CMD_HDR_XMIT
) {
1397 rc
= iscsi_sendhdr(conn
, &tcp_ctask
->headbuf
, ctask
->imm_count
);
1400 tcp_ctask
->xmstate
&= ~XMSTATE_CMD_HDR_XMIT
;
1402 if (sc
->sc_data_direction
!= DMA_TO_DEVICE
)
1405 if (ctask
->imm_count
) {
1406 tcp_ctask
->xmstate
|= XMSTATE_IMM_DATA
;
1407 iscsi_set_padding(tcp_ctask
, ctask
->imm_count
);
1409 if (ctask
->conn
->datadgst_en
) {
1410 iscsi_data_digest_init(ctask
->conn
->dd_data
,
1412 tcp_ctask
->immdigest
= 0;
1416 if (ctask
->unsol_count
)
1417 tcp_ctask
->xmstate
|=
1418 XMSTATE_UNS_HDR
| XMSTATE_UNS_INIT
;
1424 iscsi_send_padding(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1426 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1427 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1430 if (tcp_ctask
->xmstate
& XMSTATE_W_PAD
) {
1431 iscsi_buf_init_iov(&tcp_ctask
->sendbuf
, (char*)&tcp_ctask
->pad
,
1432 tcp_ctask
->pad_count
);
1433 if (conn
->datadgst_en
)
1434 crypto_hash_update(&tcp_conn
->tx_hash
,
1435 &tcp_ctask
->sendbuf
.sg
,
1436 tcp_ctask
->sendbuf
.sg
.length
);
1437 } else if (!(tcp_ctask
->xmstate
& XMSTATE_W_RESEND_PAD
))
1440 tcp_ctask
->xmstate
&= ~XMSTATE_W_PAD
;
1441 tcp_ctask
->xmstate
&= ~XMSTATE_W_RESEND_PAD
;
1442 debug_scsi("sending %d pad bytes for itt 0x%x\n",
1443 tcp_ctask
->pad_count
, ctask
->itt
);
1444 rc
= iscsi_sendpage(conn
, &tcp_ctask
->sendbuf
, &tcp_ctask
->pad_count
,
1447 debug_scsi("padding send failed %d\n", rc
);
1448 tcp_ctask
->xmstate
|= XMSTATE_W_RESEND_PAD
;
1454 iscsi_send_digest(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1455 struct iscsi_buf
*buf
, uint32_t *digest
)
1457 struct iscsi_tcp_cmd_task
*tcp_ctask
;
1458 struct iscsi_tcp_conn
*tcp_conn
;
1461 if (!conn
->datadgst_en
)
1464 tcp_ctask
= ctask
->dd_data
;
1465 tcp_conn
= conn
->dd_data
;
1467 if (!(tcp_ctask
->xmstate
& XMSTATE_W_RESEND_DATA_DIGEST
)) {
1468 crypto_hash_final(&tcp_conn
->tx_hash
, (u8
*)digest
);
1469 iscsi_buf_init_iov(buf
, (char*)digest
, 4);
1471 tcp_ctask
->xmstate
&= ~XMSTATE_W_RESEND_DATA_DIGEST
;
1473 rc
= iscsi_sendpage(conn
, buf
, &tcp_ctask
->digest_count
, &sent
);
1475 debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest
,
1478 debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
1479 *digest
, ctask
->itt
);
1480 tcp_ctask
->xmstate
|= XMSTATE_W_RESEND_DATA_DIGEST
;
1486 iscsi_send_data(struct iscsi_cmd_task
*ctask
, struct iscsi_buf
*sendbuf
,
1487 struct scatterlist
**sg
, int *sent
, int *count
,
1488 struct iscsi_buf
*digestbuf
, uint32_t *digest
)
1490 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1491 struct iscsi_conn
*conn
= ctask
->conn
;
1492 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1493 int rc
, buf_sent
, offset
;
1497 offset
= sendbuf
->sent
;
1499 rc
= iscsi_sendpage(conn
, sendbuf
, count
, &buf_sent
);
1500 *sent
= *sent
+ buf_sent
;
1501 if (buf_sent
&& conn
->datadgst_en
)
1502 partial_sg_digest_update(&tcp_conn
->tx_hash
,
1503 &sendbuf
->sg
, sendbuf
->sg
.offset
+ offset
,
1505 if (!iscsi_buf_left(sendbuf
) && *sg
!= tcp_ctask
->bad_sg
) {
1506 iscsi_buf_init_sg(sendbuf
, *sg
);
1514 rc
= iscsi_send_padding(conn
, ctask
);
1518 return iscsi_send_digest(conn
, ctask
, digestbuf
, digest
);
1522 iscsi_send_unsol_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1524 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1525 struct iscsi_data_task
*dtask
;
1528 tcp_ctask
->xmstate
|= XMSTATE_UNS_DATA
;
1529 if (tcp_ctask
->xmstate
& XMSTATE_UNS_INIT
) {
1530 dtask
= &tcp_ctask
->unsol_dtask
;
1532 iscsi_prep_unsolicit_data_pdu(ctask
, &dtask
->hdr
);
1533 iscsi_buf_init_iov(&tcp_ctask
->headbuf
, (char*)&dtask
->hdr
,
1534 sizeof(struct iscsi_hdr
));
1535 if (conn
->hdrdgst_en
)
1536 iscsi_hdr_digest(conn
, &tcp_ctask
->headbuf
,
1537 (u8
*)dtask
->hdrext
);
1539 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_INIT
;
1540 iscsi_set_padding(tcp_ctask
, ctask
->data_count
);
1543 rc
= iscsi_sendhdr(conn
, &tcp_ctask
->headbuf
, ctask
->data_count
);
1545 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1546 tcp_ctask
->xmstate
|= XMSTATE_UNS_HDR
;
1550 if (conn
->datadgst_en
) {
1551 dtask
= &tcp_ctask
->unsol_dtask
;
1552 iscsi_data_digest_init(ctask
->conn
->dd_data
, tcp_ctask
);
1556 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1557 ctask
->itt
, ctask
->unsol_count
, tcp_ctask
->sent
);
1562 iscsi_send_unsol_pdu(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1564 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1567 if (tcp_ctask
->xmstate
& XMSTATE_UNS_HDR
) {
1568 BUG_ON(!ctask
->unsol_count
);
1569 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_HDR
;
1571 rc
= iscsi_send_unsol_hdr(conn
, ctask
);
1576 if (tcp_ctask
->xmstate
& XMSTATE_UNS_DATA
) {
1577 struct iscsi_data_task
*dtask
= &tcp_ctask
->unsol_dtask
;
1578 int start
= tcp_ctask
->sent
;
1580 rc
= iscsi_send_data(ctask
, &tcp_ctask
->sendbuf
, &tcp_ctask
->sg
,
1581 &tcp_ctask
->sent
, &ctask
->data_count
,
1582 &dtask
->digestbuf
, &dtask
->digest
);
1583 ctask
->unsol_count
-= tcp_ctask
->sent
- start
;
1586 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1588 * Done with the Data-Out. Next, check if we need
1589 * to send another unsolicited Data-Out.
1591 if (ctask
->unsol_count
) {
1592 debug_scsi("sending more uns\n");
1593 tcp_ctask
->xmstate
|= XMSTATE_UNS_INIT
;
1600 static int iscsi_send_sol_pdu(struct iscsi_conn
*conn
,
1601 struct iscsi_cmd_task
*ctask
)
1603 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1604 struct iscsi_session
*session
= conn
->session
;
1605 struct iscsi_r2t_info
*r2t
;
1606 struct iscsi_data_task
*dtask
;
1609 if (tcp_ctask
->xmstate
& XMSTATE_SOL_HDR_INIT
) {
1610 if (!tcp_ctask
->r2t
) {
1611 spin_lock_bh(&session
->lock
);
1612 __kfifo_get(tcp_ctask
->r2tqueue
, (void*)&tcp_ctask
->r2t
,
1614 spin_unlock_bh(&session
->lock
);
1617 r2t
= tcp_ctask
->r2t
;
1618 dtask
= &r2t
->dtask
;
1620 if (conn
->hdrdgst_en
)
1621 iscsi_hdr_digest(conn
, &r2t
->headbuf
,
1622 (u8
*)dtask
->hdrext
);
1623 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_HDR_INIT
;
1624 tcp_ctask
->xmstate
|= XMSTATE_SOL_HDR
;
1627 if (tcp_ctask
->xmstate
& XMSTATE_SOL_HDR
) {
1628 r2t
= tcp_ctask
->r2t
;
1629 dtask
= &r2t
->dtask
;
1631 rc
= iscsi_sendhdr(conn
, &r2t
->headbuf
, r2t
->data_count
);
1634 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
1635 tcp_ctask
->xmstate
|= XMSTATE_SOL_DATA
;
1637 if (conn
->datadgst_en
) {
1638 iscsi_data_digest_init(conn
->dd_data
, tcp_ctask
);
1642 iscsi_set_padding(tcp_ctask
, r2t
->data_count
);
1643 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1644 r2t
->solicit_datasn
- 1, ctask
->itt
, r2t
->data_count
,
1648 if (tcp_ctask
->xmstate
& XMSTATE_SOL_DATA
) {
1649 r2t
= tcp_ctask
->r2t
;
1650 dtask
= &r2t
->dtask
;
1652 rc
= iscsi_send_data(ctask
, &r2t
->sendbuf
, &r2t
->sg
,
1653 &r2t
->sent
, &r2t
->data_count
,
1654 &dtask
->digestbuf
, &dtask
->digest
);
1657 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_DATA
;
1660 * Done with this Data-Out. Next, check if we have
1661 * to send another Data-Out for this R2T.
1663 BUG_ON(r2t
->data_length
- r2t
->sent
< 0);
1664 left
= r2t
->data_length
- r2t
->sent
;
1666 iscsi_solicit_data_cont(conn
, ctask
, r2t
, left
);
1671 * Done with this R2T. Check if there are more
1672 * outstanding R2Ts ready to be processed.
1674 spin_lock_bh(&session
->lock
);
1675 tcp_ctask
->r2t
= NULL
;
1676 __kfifo_put(tcp_ctask
->r2tpool
.queue
, (void*)&r2t
,
1678 if (__kfifo_get(tcp_ctask
->r2tqueue
, (void*)&r2t
,
1680 tcp_ctask
->r2t
= r2t
;
1681 spin_unlock_bh(&session
->lock
);
1684 spin_unlock_bh(&session
->lock
);
1690 * iscsi_tcp_ctask_xmit - xmit normal PDU task
1691 * @conn: iscsi connection
1692 * @ctask: iscsi command task
1695 * The function can return -EAGAIN in which case caller must
1696 * call it again later, or recover. '0' return code means successful
1698 * The function is devided to logical helpers (above) for the different
1701 *iscsi_send_cmd_hdr()
1702 * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate
1704 * XMSTATE_CMD_HDR_XMIT - Transmit header in progress
1707 * XMSTATE_W_PAD - Prepare and send pading
1708 * XMSTATE_W_RESEND_PAD - retry send pading
1711 * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
1712 * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest
1714 *iscsi_send_unsol_hdr
1715 * XMSTATE_UNS_INIT - prepare un-solicit data header and digest
1716 * XMSTATE_UNS_HDR - send un-solicit header
1718 *iscsi_send_unsol_pdu
1719 * XMSTATE_UNS_DATA - send un-solicit data in progress
1722 * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize
1723 * XMSTATE_SOL_HDR - send solicit header
1724 * XMSTATE_SOL_DATA - send solicit data
1726 *iscsi_tcp_ctask_xmit
1727 * XMSTATE_IMM_DATA - xmit managment data (??)
1730 iscsi_tcp_ctask_xmit(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1732 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1735 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
1736 conn
->id
, tcp_ctask
->xmstate
, ctask
->itt
);
1738 rc
= iscsi_send_cmd_hdr(conn
, ctask
);
1741 if (ctask
->sc
->sc_data_direction
!= DMA_TO_DEVICE
)
1744 if (tcp_ctask
->xmstate
& XMSTATE_IMM_DATA
) {
1745 rc
= iscsi_send_data(ctask
, &tcp_ctask
->sendbuf
, &tcp_ctask
->sg
,
1746 &tcp_ctask
->sent
, &ctask
->imm_count
,
1747 &tcp_ctask
->immbuf
, &tcp_ctask
->immdigest
);
1750 tcp_ctask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1753 rc
= iscsi_send_unsol_pdu(conn
, ctask
);
1757 rc
= iscsi_send_sol_pdu(conn
, ctask
);
1764 static struct iscsi_cls_conn
*
1765 iscsi_tcp_conn_create(struct iscsi_cls_session
*cls_session
, uint32_t conn_idx
)
1767 struct iscsi_conn
*conn
;
1768 struct iscsi_cls_conn
*cls_conn
;
1769 struct iscsi_tcp_conn
*tcp_conn
;
1771 cls_conn
= iscsi_conn_setup(cls_session
, conn_idx
);
1774 conn
= cls_conn
->dd_data
;
1776 * due to strange issues with iser these are not set
1777 * in iscsi_conn_setup
1779 conn
->max_recv_dlength
= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1781 tcp_conn
= kzalloc(sizeof(*tcp_conn
), GFP_KERNEL
);
1783 goto tcp_conn_alloc_fail
;
1785 conn
->dd_data
= tcp_conn
;
1786 tcp_conn
->iscsi_conn
= conn
;
1788 tcp_conn
->tx_hash
.tfm
= crypto_alloc_hash("crc32c", 0,
1790 tcp_conn
->tx_hash
.flags
= 0;
1791 if (IS_ERR(tcp_conn
->tx_hash
.tfm
)) {
1792 printk(KERN_ERR
"Could not create connection due to crc32c "
1793 "loading error %ld. Make sure the crc32c module is "
1794 "built as a module or into the kernel\n",
1795 PTR_ERR(tcp_conn
->tx_hash
.tfm
));
1799 tcp_conn
->rx_hash
.tfm
= crypto_alloc_hash("crc32c", 0,
1801 tcp_conn
->rx_hash
.flags
= 0;
1802 if (IS_ERR(tcp_conn
->rx_hash
.tfm
)) {
1803 printk(KERN_ERR
"Could not create connection due to crc32c "
1804 "loading error %ld. Make sure the crc32c module is "
1805 "built as a module or into the kernel\n",
1806 PTR_ERR(tcp_conn
->rx_hash
.tfm
));
1813 crypto_free_hash(tcp_conn
->tx_hash
.tfm
);
1816 tcp_conn_alloc_fail
:
1817 iscsi_conn_teardown(cls_conn
);
1822 iscsi_tcp_release_conn(struct iscsi_conn
*conn
)
1824 struct iscsi_session
*session
= conn
->session
;
1825 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1826 struct socket
*sock
= tcp_conn
->sock
;
1831 sock_hold(sock
->sk
);
1832 iscsi_conn_restore_callbacks(tcp_conn
);
1835 spin_lock_bh(&session
->lock
);
1836 tcp_conn
->sock
= NULL
;
1837 conn
->recv_lock
= NULL
;
1838 spin_unlock_bh(&session
->lock
);
1843 iscsi_tcp_conn_destroy(struct iscsi_cls_conn
*cls_conn
)
1845 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
1846 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1848 iscsi_tcp_release_conn(conn
);
1849 iscsi_conn_teardown(cls_conn
);
1851 if (tcp_conn
->tx_hash
.tfm
)
1852 crypto_free_hash(tcp_conn
->tx_hash
.tfm
);
1853 if (tcp_conn
->rx_hash
.tfm
)
1854 crypto_free_hash(tcp_conn
->rx_hash
.tfm
);
1860 iscsi_tcp_conn_stop(struct iscsi_cls_conn
*cls_conn
, int flag
)
1862 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
1864 iscsi_conn_stop(cls_conn
, flag
);
1865 iscsi_tcp_release_conn(conn
);
1868 static int iscsi_tcp_get_addr(struct iscsi_conn
*conn
, struct socket
*sock
,
1869 char *buf
, int *port
,
1870 int (*getname
)(struct socket
*, struct sockaddr
*,
1873 struct sockaddr_storage
*addr
;
1874 struct sockaddr_in6
*sin6
;
1875 struct sockaddr_in
*sin
;
1878 addr
= kmalloc(sizeof(*addr
), GFP_KERNEL
);
1882 if (getname(sock
, (struct sockaddr
*) addr
, &len
)) {
1887 switch (addr
->ss_family
) {
1889 sin
= (struct sockaddr_in
*)addr
;
1890 spin_lock_bh(&conn
->session
->lock
);
1891 sprintf(buf
, NIPQUAD_FMT
, NIPQUAD(sin
->sin_addr
.s_addr
));
1892 *port
= be16_to_cpu(sin
->sin_port
);
1893 spin_unlock_bh(&conn
->session
->lock
);
1896 sin6
= (struct sockaddr_in6
*)addr
;
1897 spin_lock_bh(&conn
->session
->lock
);
1898 sprintf(buf
, NIP6_FMT
, NIP6(sin6
->sin6_addr
));
1899 *port
= be16_to_cpu(sin6
->sin6_port
);
1900 spin_unlock_bh(&conn
->session
->lock
);
1909 iscsi_tcp_conn_bind(struct iscsi_cls_session
*cls_session
,
1910 struct iscsi_cls_conn
*cls_conn
, uint64_t transport_eph
,
1913 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
1914 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1916 struct socket
*sock
;
1919 /* lookup for existing socket */
1920 sock
= sockfd_lookup((int)transport_eph
, &err
);
1922 printk(KERN_ERR
"iscsi_tcp: sockfd_lookup failed %d\n", err
);
1926 * copy these values now because if we drop the session
1927 * userspace may still want to query the values since we will
1928 * be using them for the reconnect
1930 err
= iscsi_tcp_get_addr(conn
, sock
, conn
->portal_address
,
1931 &conn
->portal_port
, kernel_getpeername
);
1935 err
= iscsi_tcp_get_addr(conn
, sock
, conn
->local_address
,
1936 &conn
->local_port
, kernel_getsockname
);
1940 err
= iscsi_conn_bind(cls_session
, cls_conn
, is_leading
);
1944 /* bind iSCSI connection and socket */
1945 tcp_conn
->sock
= sock
;
1947 /* setup Socket parameters */
1950 sk
->sk_sndtimeo
= 15 * HZ
; /* FIXME: make it configurable */
1951 sk
->sk_allocation
= GFP_ATOMIC
;
1953 /* FIXME: disable Nagle's algorithm */
1956 * Intercept TCP callbacks for sendfile like receive
1959 conn
->recv_lock
= &sk
->sk_callback_lock
;
1960 iscsi_conn_set_callbacks(conn
);
1961 tcp_conn
->sendpage
= tcp_conn
->sock
->ops
->sendpage
;
1963 * set receive state machine into initial state
1965 iscsi_tcp_hdr_recv_prep(tcp_conn
);
1973 /* called with host lock */
1975 iscsi_tcp_mgmt_init(struct iscsi_conn
*conn
, struct iscsi_mgmt_task
*mtask
)
1977 struct iscsi_tcp_mgmt_task
*tcp_mtask
= mtask
->dd_data
;
1978 tcp_mtask
->xmstate
= XMSTATE_IMM_HDR_INIT
;
1982 iscsi_r2tpool_alloc(struct iscsi_session
*session
)
1988 * initialize per-task: R2T pool and xmit queue
1990 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
1991 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
1992 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1995 * pre-allocated x4 as much r2ts to handle race when
1996 * target acks DataOut faster than we data_xmit() queues
1997 * could replenish r2tqueue.
2001 if (iscsi_pool_init(&tcp_ctask
->r2tpool
, session
->max_r2t
* 4, NULL
,
2002 sizeof(struct iscsi_r2t_info
))) {
2003 goto r2t_alloc_fail
;
2006 /* R2T xmit queue */
2007 tcp_ctask
->r2tqueue
= kfifo_alloc(
2008 session
->max_r2t
* 4 * sizeof(void*), GFP_KERNEL
, NULL
);
2009 if (tcp_ctask
->r2tqueue
== ERR_PTR(-ENOMEM
)) {
2010 iscsi_pool_free(&tcp_ctask
->r2tpool
);
2011 goto r2t_alloc_fail
;
2018 for (i
= 0; i
< cmd_i
; i
++) {
2019 struct iscsi_cmd_task
*ctask
= session
->cmds
[i
];
2020 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
2022 kfifo_free(tcp_ctask
->r2tqueue
);
2023 iscsi_pool_free(&tcp_ctask
->r2tpool
);
2029 iscsi_r2tpool_free(struct iscsi_session
*session
)
2033 for (i
= 0; i
< session
->cmds_max
; i
++) {
2034 struct iscsi_cmd_task
*ctask
= session
->cmds
[i
];
2035 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
2037 kfifo_free(tcp_ctask
->r2tqueue
);
2038 iscsi_pool_free(&tcp_ctask
->r2tpool
);
2043 iscsi_conn_set_param(struct iscsi_cls_conn
*cls_conn
, enum iscsi_param param
,
2044 char *buf
, int buflen
)
2046 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2047 struct iscsi_session
*session
= conn
->session
;
2048 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2052 case ISCSI_PARAM_HDRDGST_EN
:
2053 iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2055 case ISCSI_PARAM_DATADGST_EN
:
2056 iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2057 tcp_conn
->sendpage
= conn
->datadgst_en
?
2058 sock_no_sendpage
: tcp_conn
->sock
->ops
->sendpage
;
2060 case ISCSI_PARAM_MAX_R2T
:
2061 sscanf(buf
, "%d", &value
);
2062 if (session
->max_r2t
== roundup_pow_of_two(value
))
2064 iscsi_r2tpool_free(session
);
2065 iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2066 if (session
->max_r2t
& (session
->max_r2t
- 1))
2067 session
->max_r2t
= roundup_pow_of_two(session
->max_r2t
);
2068 if (iscsi_r2tpool_alloc(session
))
2072 return iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2079 iscsi_tcp_conn_get_param(struct iscsi_cls_conn
*cls_conn
,
2080 enum iscsi_param param
, char *buf
)
2082 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2086 case ISCSI_PARAM_CONN_PORT
:
2087 spin_lock_bh(&conn
->session
->lock
);
2088 len
= sprintf(buf
, "%hu\n", conn
->portal_port
);
2089 spin_unlock_bh(&conn
->session
->lock
);
2091 case ISCSI_PARAM_CONN_ADDRESS
:
2092 spin_lock_bh(&conn
->session
->lock
);
2093 len
= sprintf(buf
, "%s\n", conn
->portal_address
);
2094 spin_unlock_bh(&conn
->session
->lock
);
2097 return iscsi_conn_get_param(cls_conn
, param
, buf
);
2104 iscsi_tcp_host_get_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2107 struct iscsi_session
*session
= iscsi_hostdata(shost
->hostdata
);
2111 case ISCSI_HOST_PARAM_IPADDRESS
:
2112 spin_lock_bh(&session
->lock
);
2113 if (!session
->leadconn
)
2116 len
= sprintf(buf
, "%s\n",
2117 session
->leadconn
->local_address
);
2118 spin_unlock_bh(&session
->lock
);
2121 return iscsi_host_get_param(shost
, param
, buf
);
2127 iscsi_conn_get_stats(struct iscsi_cls_conn
*cls_conn
, struct iscsi_stats
*stats
)
2129 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2130 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2132 stats
->txdata_octets
= conn
->txdata_octets
;
2133 stats
->rxdata_octets
= conn
->rxdata_octets
;
2134 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
2135 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
2136 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
2137 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
2138 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
2139 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
2140 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
2141 stats
->custom_length
= 3;
2142 strcpy(stats
->custom
[0].desc
, "tx_sendpage_failures");
2143 stats
->custom
[0].value
= tcp_conn
->sendpage_failures_cnt
;
2144 strcpy(stats
->custom
[1].desc
, "rx_discontiguous_hdr");
2145 stats
->custom
[1].value
= tcp_conn
->discontiguous_hdr_cnt
;
2146 strcpy(stats
->custom
[2].desc
, "eh_abort_cnt");
2147 stats
->custom
[2].value
= conn
->eh_abort_cnt
;
2150 static struct iscsi_cls_session
*
2151 iscsi_tcp_session_create(struct iscsi_transport
*iscsit
,
2152 struct scsi_transport_template
*scsit
,
2153 uint16_t cmds_max
, uint16_t qdepth
,
2154 uint32_t initial_cmdsn
, uint32_t *hostno
)
2156 struct iscsi_cls_session
*cls_session
;
2157 struct iscsi_session
*session
;
2161 cls_session
= iscsi_session_setup(iscsit
, scsit
, cmds_max
, qdepth
,
2162 sizeof(struct iscsi_tcp_cmd_task
),
2163 sizeof(struct iscsi_tcp_mgmt_task
),
2164 initial_cmdsn
, &hn
);
2169 session
= class_to_transport_session(cls_session
);
2170 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
2171 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
2172 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
2174 ctask
->hdr
= &tcp_ctask
->hdr
.cmd_hdr
;
2175 ctask
->hdr_max
= sizeof(tcp_ctask
->hdr
) - ISCSI_DIGEST_SIZE
;
2178 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++) {
2179 struct iscsi_mgmt_task
*mtask
= session
->mgmt_cmds
[cmd_i
];
2180 struct iscsi_tcp_mgmt_task
*tcp_mtask
= mtask
->dd_data
;
2182 mtask
->hdr
= &tcp_mtask
->hdr
;
2185 if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session
)))
2186 goto r2tpool_alloc_fail
;
2191 iscsi_session_teardown(cls_session
);
2195 static void iscsi_tcp_session_destroy(struct iscsi_cls_session
*cls_session
)
2197 iscsi_r2tpool_free(class_to_transport_session(cls_session
));
2198 iscsi_session_teardown(cls_session
);
2201 static int iscsi_tcp_slave_configure(struct scsi_device
*sdev
)
2203 blk_queue_bounce_limit(sdev
->request_queue
, BLK_BOUNCE_ANY
);
2204 blk_queue_dma_alignment(sdev
->request_queue
, 0);
2208 static struct scsi_host_template iscsi_sht
= {
2209 .module
= THIS_MODULE
,
2210 .name
= "iSCSI Initiator over TCP/IP",
2211 .queuecommand
= iscsi_queuecommand
,
2212 .change_queue_depth
= iscsi_change_queue_depth
,
2213 .can_queue
= ISCSI_DEF_XMIT_CMDS_MAX
- 1,
2214 .sg_tablesize
= ISCSI_SG_TABLESIZE
,
2215 .max_sectors
= 0xFFFF,
2216 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
2217 .eh_abort_handler
= iscsi_eh_abort
,
2218 .eh_device_reset_handler
= iscsi_eh_device_reset
,
2219 .eh_host_reset_handler
= iscsi_eh_host_reset
,
2220 .use_clustering
= DISABLE_CLUSTERING
,
2221 .slave_configure
= iscsi_tcp_slave_configure
,
2222 .proc_name
= "iscsi_tcp",
2226 static struct iscsi_transport iscsi_tcp_transport
= {
2227 .owner
= THIS_MODULE
,
2229 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
2231 .param_mask
= ISCSI_MAX_RECV_DLENGTH
|
2232 ISCSI_MAX_XMIT_DLENGTH
|
2235 ISCSI_INITIAL_R2T_EN
|
2240 ISCSI_PDU_INORDER_EN
|
2241 ISCSI_DATASEQ_INORDER_EN
|
2244 ISCSI_CONN_ADDRESS
|
2246 ISCSI_PERSISTENT_PORT
|
2247 ISCSI_PERSISTENT_ADDRESS
|
2248 ISCSI_TARGET_NAME
| ISCSI_TPGT
|
2249 ISCSI_USERNAME
| ISCSI_PASSWORD
|
2250 ISCSI_USERNAME_IN
| ISCSI_PASSWORD_IN
|
2252 .host_param_mask
= ISCSI_HOST_HWADDRESS
| ISCSI_HOST_IPADDRESS
|
2253 ISCSI_HOST_INITIATOR_NAME
|
2254 ISCSI_HOST_NETDEV_NAME
,
2255 .host_template
= &iscsi_sht
,
2256 .conndata_size
= sizeof(struct iscsi_conn
),
2258 .max_cmd_len
= ISCSI_TCP_MAX_CMD_LEN
,
2259 /* session management */
2260 .create_session
= iscsi_tcp_session_create
,
2261 .destroy_session
= iscsi_tcp_session_destroy
,
2262 /* connection management */
2263 .create_conn
= iscsi_tcp_conn_create
,
2264 .bind_conn
= iscsi_tcp_conn_bind
,
2265 .destroy_conn
= iscsi_tcp_conn_destroy
,
2266 .set_param
= iscsi_conn_set_param
,
2267 .get_conn_param
= iscsi_tcp_conn_get_param
,
2268 .get_session_param
= iscsi_session_get_param
,
2269 .start_conn
= iscsi_conn_start
,
2270 .stop_conn
= iscsi_tcp_conn_stop
,
2271 /* iscsi host params */
2272 .get_host_param
= iscsi_tcp_host_get_param
,
2273 .set_host_param
= iscsi_host_set_param
,
2275 .send_pdu
= iscsi_conn_send_pdu
,
2276 .get_stats
= iscsi_conn_get_stats
,
2277 .init_cmd_task
= iscsi_tcp_cmd_init
,
2278 .init_mgmt_task
= iscsi_tcp_mgmt_init
,
2279 .xmit_cmd_task
= iscsi_tcp_ctask_xmit
,
2280 .xmit_mgmt_task
= iscsi_tcp_mtask_xmit
,
2281 .cleanup_cmd_task
= iscsi_tcp_cleanup_ctask
,
2283 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
2287 iscsi_tcp_init(void)
2289 if (iscsi_max_lun
< 1) {
2290 printk(KERN_ERR
"iscsi_tcp: Invalid max_lun value of %u\n",
2294 iscsi_tcp_transport
.max_lun
= iscsi_max_lun
;
2296 if (!iscsi_register_transport(&iscsi_tcp_transport
))
2303 iscsi_tcp_exit(void)
2305 iscsi_unregister_transport(&iscsi_tcp_transport
);
2308 module_init(iscsi_tcp_init
);
2309 module_exit(iscsi_tcp_exit
);