1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi_proto.h>
34 #include <target/target_core_base.h>
35 #include <target/target_core_backend.h>
37 #include "target_core_rd.h"
39 static inline struct rd_dev
*RD_DEV(struct se_device
*dev
)
41 return container_of(dev
, struct rd_dev
, dev
);
44 static int rd_attach_hba(struct se_hba
*hba
, u32 host_id
)
46 struct rd_host
*rd_host
;
48 rd_host
= kzalloc(sizeof(struct rd_host
), GFP_KERNEL
);
50 pr_err("Unable to allocate memory for struct rd_host\n");
54 rd_host
->rd_host_id
= host_id
;
56 hba
->hba_ptr
= rd_host
;
58 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
59 " Generic Target Core Stack %s\n", hba
->hba_id
,
60 RD_HBA_VERSION
, TARGET_CORE_VERSION
);
65 static void rd_detach_hba(struct se_hba
*hba
)
67 struct rd_host
*rd_host
= hba
->hba_ptr
;
69 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
70 " Generic Target Core\n", hba
->hba_id
, rd_host
->rd_host_id
);
76 static u32
rd_release_sgl_table(struct rd_dev
*rd_dev
, struct rd_dev_sg_table
*sg_table
,
80 struct scatterlist
*sg
;
81 u32 i
, j
, page_count
= 0, sg_per_table
;
83 for (i
= 0; i
< sg_table_count
; i
++) {
84 sg
= sg_table
[i
].sg_table
;
85 sg_per_table
= sg_table
[i
].rd_sg_count
;
87 for (j
= 0; j
< sg_per_table
; j
++) {
101 static void rd_release_device_space(struct rd_dev
*rd_dev
)
105 if (!rd_dev
->sg_table_array
|| !rd_dev
->sg_table_count
)
108 page_count
= rd_release_sgl_table(rd_dev
, rd_dev
->sg_table_array
,
109 rd_dev
->sg_table_count
);
111 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
112 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
113 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
114 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
116 rd_dev
->sg_table_array
= NULL
;
117 rd_dev
->sg_table_count
= 0;
121 /* rd_build_device_space():
125 static int rd_allocate_sgl_table(struct rd_dev
*rd_dev
, struct rd_dev_sg_table
*sg_table
,
126 u32 total_sg_needed
, unsigned char init_payload
)
128 u32 i
= 0, j
, page_offset
= 0, sg_per_table
;
129 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
130 sizeof(struct scatterlist
));
132 struct scatterlist
*sg
;
135 while (total_sg_needed
) {
136 unsigned int chain_entry
= 0;
138 sg_per_table
= (total_sg_needed
> max_sg_per_table
) ?
139 max_sg_per_table
: total_sg_needed
;
141 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
144 * Reserve extra element for chain entry
146 if (sg_per_table
< total_sg_needed
)
149 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
151 sg
= kcalloc(sg_per_table
+ chain_entry
, sizeof(*sg
),
154 pr_err("Unable to allocate scatterlist array"
155 " for struct rd_dev\n");
159 sg_init_table(sg
, sg_per_table
+ chain_entry
);
161 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
164 sg_chain(sg_table
[i
- 1].sg_table
,
165 max_sg_per_table
+ 1, sg
);
168 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
170 sg_table
[i
].sg_table
= sg
;
171 sg_table
[i
].rd_sg_count
= sg_per_table
;
172 sg_table
[i
].page_start_offset
= page_offset
;
173 sg_table
[i
++].page_end_offset
= (page_offset
+ sg_per_table
)
176 for (j
= 0; j
< sg_per_table
; j
++) {
177 pg
= alloc_pages(GFP_KERNEL
, 0);
179 pr_err("Unable to allocate scatterlist"
180 " pages for struct rd_dev_sg_table\n");
183 sg_assign_page(&sg
[j
], pg
);
184 sg
[j
].length
= PAGE_SIZE
;
187 memset(p
, init_payload
, PAGE_SIZE
);
191 page_offset
+= sg_per_table
;
192 total_sg_needed
-= sg_per_table
;
198 static int rd_build_device_space(struct rd_dev
*rd_dev
)
200 struct rd_dev_sg_table
*sg_table
;
201 u32 sg_tables
, total_sg_needed
;
202 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
203 sizeof(struct scatterlist
));
206 if (rd_dev
->rd_page_count
<= 0) {
207 pr_err("Illegal page count: %u for Ramdisk device\n",
208 rd_dev
->rd_page_count
);
212 /* Don't need backing pages for NULLIO */
213 if (rd_dev
->rd_flags
& RDF_NULLIO
)
216 total_sg_needed
= rd_dev
->rd_page_count
;
218 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
220 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
222 pr_err("Unable to allocate memory for Ramdisk"
223 " scatterlist tables\n");
227 rd_dev
->sg_table_array
= sg_table
;
228 rd_dev
->sg_table_count
= sg_tables
;
230 rc
= rd_allocate_sgl_table(rd_dev
, sg_table
, total_sg_needed
, 0x00);
234 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
235 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
236 rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
237 rd_dev
->sg_table_count
);
242 static void rd_release_prot_space(struct rd_dev
*rd_dev
)
246 if (!rd_dev
->sg_prot_array
|| !rd_dev
->sg_prot_count
)
249 page_count
= rd_release_sgl_table(rd_dev
, rd_dev
->sg_prot_array
,
250 rd_dev
->sg_prot_count
);
252 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
253 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
254 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
255 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
257 rd_dev
->sg_prot_array
= NULL
;
258 rd_dev
->sg_prot_count
= 0;
261 static int rd_build_prot_space(struct rd_dev
*rd_dev
, int prot_length
, int block_size
)
263 struct rd_dev_sg_table
*sg_table
;
264 u32 total_sg_needed
, sg_tables
;
265 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
266 sizeof(struct scatterlist
));
269 if (rd_dev
->rd_flags
& RDF_NULLIO
)
272 * prot_length=8byte dif data
273 * tot sg needed = rd_page_count * (PGSZ/block_size) *
274 * (prot_length/block_size) + pad
275 * PGSZ canceled each other.
277 total_sg_needed
= (rd_dev
->rd_page_count
* prot_length
/ block_size
) + 1;
279 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
281 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
283 pr_err("Unable to allocate memory for Ramdisk protection"
284 " scatterlist tables\n");
288 rd_dev
->sg_prot_array
= sg_table
;
289 rd_dev
->sg_prot_count
= sg_tables
;
291 rc
= rd_allocate_sgl_table(rd_dev
, sg_table
, total_sg_needed
, 0xff);
295 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
296 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
297 rd_dev
->rd_dev_id
, total_sg_needed
, rd_dev
->sg_prot_count
);
302 static struct se_device
*rd_alloc_device(struct se_hba
*hba
, const char *name
)
304 struct rd_dev
*rd_dev
;
305 struct rd_host
*rd_host
= hba
->hba_ptr
;
307 rd_dev
= kzalloc(sizeof(struct rd_dev
), GFP_KERNEL
);
309 pr_err("Unable to allocate memory for struct rd_dev\n");
313 rd_dev
->rd_host
= rd_host
;
318 static int rd_configure_device(struct se_device
*dev
)
320 struct rd_dev
*rd_dev
= RD_DEV(dev
);
321 struct rd_host
*rd_host
= dev
->se_hba
->hba_ptr
;
324 if (!(rd_dev
->rd_flags
& RDF_HAS_PAGE_COUNT
)) {
325 pr_debug("Missing rd_pages= parameter\n");
329 ret
= rd_build_device_space(rd_dev
);
333 dev
->dev_attrib
.hw_block_size
= RD_BLOCKSIZE
;
334 dev
->dev_attrib
.hw_max_sectors
= UINT_MAX
;
335 dev
->dev_attrib
.hw_queue_depth
= RD_MAX_DEVICE_QUEUE_DEPTH
;
337 rd_dev
->rd_dev_id
= rd_host
->rd_host_dev_id_count
++;
339 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
340 " %u pages in %u tables, %lu total bytes\n",
341 rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
342 rd_dev
->sg_table_count
,
343 (unsigned long)(rd_dev
->rd_page_count
* PAGE_SIZE
));
348 rd_release_device_space(rd_dev
);
352 static void rd_dev_call_rcu(struct rcu_head
*p
)
354 struct se_device
*dev
= container_of(p
, struct se_device
, rcu_head
);
355 struct rd_dev
*rd_dev
= RD_DEV(dev
);
360 static void rd_free_device(struct se_device
*dev
)
362 struct rd_dev
*rd_dev
= RD_DEV(dev
);
364 rd_release_device_space(rd_dev
);
365 call_rcu(&dev
->rcu_head
, rd_dev_call_rcu
);
368 static struct rd_dev_sg_table
*rd_get_sg_table(struct rd_dev
*rd_dev
, u32 page
)
370 struct rd_dev_sg_table
*sg_table
;
371 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
372 sizeof(struct scatterlist
));
374 i
= page
/ sg_per_table
;
375 if (i
< rd_dev
->sg_table_count
) {
376 sg_table
= &rd_dev
->sg_table_array
[i
];
377 if ((sg_table
->page_start_offset
<= page
) &&
378 (sg_table
->page_end_offset
>= page
))
382 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
388 static struct rd_dev_sg_table
*rd_get_prot_table(struct rd_dev
*rd_dev
, u32 page
)
390 struct rd_dev_sg_table
*sg_table
;
391 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
392 sizeof(struct scatterlist
));
394 i
= page
/ sg_per_table
;
395 if (i
< rd_dev
->sg_prot_count
) {
396 sg_table
= &rd_dev
->sg_prot_array
[i
];
397 if ((sg_table
->page_start_offset
<= page
) &&
398 (sg_table
->page_end_offset
>= page
))
402 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
408 static sense_reason_t
rd_do_prot_rw(struct se_cmd
*cmd
, bool is_read
)
410 struct se_device
*se_dev
= cmd
->se_dev
;
411 struct rd_dev
*dev
= RD_DEV(se_dev
);
412 struct rd_dev_sg_table
*prot_table
;
413 bool need_to_release
= false;
414 struct scatterlist
*prot_sg
;
415 u32 sectors
= cmd
->data_length
/ se_dev
->dev_attrib
.block_size
;
416 u32 prot_offset
, prot_page
;
417 u32 prot_npages __maybe_unused
;
419 sense_reason_t rc
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
421 tmp
= cmd
->t_task_lba
* se_dev
->prot_length
;
422 prot_offset
= do_div(tmp
, PAGE_SIZE
);
425 prot_table
= rd_get_prot_table(dev
, prot_page
);
427 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
429 prot_sg
= &prot_table
->sg_table
[prot_page
-
430 prot_table
->page_start_offset
];
432 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
434 prot_npages
= DIV_ROUND_UP(prot_offset
+ sectors
* se_dev
->prot_length
,
438 * Allocate temporaly contiguous scatterlist entries if prot pages
439 * straddles multiple scatterlist tables.
441 if (prot_table
->page_end_offset
< prot_page
+ prot_npages
- 1) {
444 prot_sg
= kcalloc(prot_npages
, sizeof(*prot_sg
), GFP_KERNEL
);
446 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
448 need_to_release
= true;
449 sg_init_table(prot_sg
, prot_npages
);
451 for (i
= 0; i
< prot_npages
; i
++) {
452 if (prot_page
+ i
> prot_table
->page_end_offset
) {
453 prot_table
= rd_get_prot_table(dev
,
459 sg_unmark_end(&prot_sg
[i
- 1]);
461 prot_sg
[i
] = prot_table
->sg_table
[prot_page
+ i
-
462 prot_table
->page_start_offset
];
466 #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
469 rc
= sbc_dif_verify(cmd
, cmd
->t_task_lba
, sectors
, 0,
470 prot_sg
, prot_offset
);
472 rc
= sbc_dif_verify(cmd
, cmd
->t_task_lba
, sectors
, 0,
476 sbc_dif_copy_prot(cmd
, sectors
, is_read
, prot_sg
, prot_offset
);
484 static sense_reason_t
485 rd_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
486 enum dma_data_direction data_direction
)
488 struct se_device
*se_dev
= cmd
->se_dev
;
489 struct rd_dev
*dev
= RD_DEV(se_dev
);
490 struct rd_dev_sg_table
*table
;
491 struct scatterlist
*rd_sg
;
492 struct sg_mapping_iter m
;
500 if (dev
->rd_flags
& RDF_NULLIO
) {
501 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
505 tmp
= cmd
->t_task_lba
* se_dev
->dev_attrib
.block_size
;
506 rd_offset
= do_div(tmp
, PAGE_SIZE
);
508 rd_size
= cmd
->data_length
;
510 table
= rd_get_sg_table(dev
, rd_page
);
512 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
514 rd_sg
= &table
->sg_table
[rd_page
- table
->page_start_offset
];
516 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
518 data_direction
== DMA_FROM_DEVICE
? "Read" : "Write",
519 cmd
->t_task_lba
, rd_size
, rd_page
, rd_offset
);
521 if (cmd
->prot_type
&& se_dev
->dev_attrib
.pi_prot_type
&&
522 data_direction
== DMA_TO_DEVICE
) {
523 rc
= rd_do_prot_rw(cmd
, false);
528 src_len
= PAGE_SIZE
- rd_offset
;
529 sg_miter_start(&m
, sgl
, sgl_nents
,
530 data_direction
== DMA_FROM_DEVICE
?
531 SG_MITER_TO_SG
: SG_MITER_FROM_SG
);
537 if (!(u32
)m
.length
) {
538 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
539 dev
->rd_dev_id
, m
.addr
, m
.length
);
541 return TCM_INCORRECT_AMOUNT_OF_DATA
;
543 len
= min((u32
)m
.length
, src_len
);
545 pr_debug("RD[%u]: size underrun page %d offset %d "
546 "size %d\n", dev
->rd_dev_id
,
547 rd_page
, rd_offset
, rd_size
);
552 rd_addr
= sg_virt(rd_sg
) + rd_offset
;
554 if (data_direction
== DMA_FROM_DEVICE
)
555 memcpy(m
.addr
, rd_addr
, len
);
557 memcpy(rd_addr
, m
.addr
, len
);
569 /* rd page completed, next one please */
573 if (rd_page
<= table
->page_end_offset
) {
578 table
= rd_get_sg_table(dev
, rd_page
);
581 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
584 /* since we increment, the first sg entry is correct */
585 rd_sg
= table
->sg_table
;
589 if (cmd
->prot_type
&& se_dev
->dev_attrib
.pi_prot_type
&&
590 data_direction
== DMA_FROM_DEVICE
) {
591 rc
= rd_do_prot_rw(cmd
, true);
596 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
601 Opt_rd_pages
, Opt_rd_nullio
, Opt_err
604 static match_table_t tokens
= {
605 {Opt_rd_pages
, "rd_pages=%d"},
606 {Opt_rd_nullio
, "rd_nullio=%d"},
610 static ssize_t
rd_set_configfs_dev_params(struct se_device
*dev
,
611 const char *page
, ssize_t count
)
613 struct rd_dev
*rd_dev
= RD_DEV(dev
);
614 char *orig
, *ptr
, *opts
;
615 substring_t args
[MAX_OPT_ARGS
];
616 int ret
= 0, arg
, token
;
618 opts
= kstrdup(page
, GFP_KERNEL
);
624 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
628 token
= match_token(ptr
, tokens
, args
);
631 match_int(args
, &arg
);
632 rd_dev
->rd_page_count
= arg
;
633 pr_debug("RAMDISK: Referencing Page"
634 " Count: %u\n", rd_dev
->rd_page_count
);
635 rd_dev
->rd_flags
|= RDF_HAS_PAGE_COUNT
;
638 match_int(args
, &arg
);
642 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg
);
643 rd_dev
->rd_flags
|= RDF_NULLIO
;
651 return (!ret
) ? count
: ret
;
654 static ssize_t
rd_show_configfs_dev_params(struct se_device
*dev
, char *b
)
656 struct rd_dev
*rd_dev
= RD_DEV(dev
);
658 ssize_t bl
= sprintf(b
, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
660 bl
+= sprintf(b
+ bl
, " PAGES/PAGE_SIZE: %u*%lu"
661 " SG_table_count: %u nullio: %d\n", rd_dev
->rd_page_count
,
662 PAGE_SIZE
, rd_dev
->sg_table_count
,
663 !!(rd_dev
->rd_flags
& RDF_NULLIO
));
667 static sector_t
rd_get_blocks(struct se_device
*dev
)
669 struct rd_dev
*rd_dev
= RD_DEV(dev
);
671 unsigned long long blocks_long
= ((rd_dev
->rd_page_count
* PAGE_SIZE
) /
672 dev
->dev_attrib
.block_size
) - 1;
677 static int rd_init_prot(struct se_device
*dev
)
679 struct rd_dev
*rd_dev
= RD_DEV(dev
);
681 if (!dev
->dev_attrib
.pi_prot_type
)
684 return rd_build_prot_space(rd_dev
, dev
->prot_length
,
685 dev
->dev_attrib
.block_size
);
688 static void rd_free_prot(struct se_device
*dev
)
690 struct rd_dev
*rd_dev
= RD_DEV(dev
);
692 rd_release_prot_space(rd_dev
);
695 static struct sbc_ops rd_sbc_ops
= {
696 .execute_rw
= rd_execute_rw
,
699 static sense_reason_t
700 rd_parse_cdb(struct se_cmd
*cmd
)
702 return sbc_parse_cdb(cmd
, &rd_sbc_ops
);
705 static const struct target_backend_ops rd_mcp_ops
= {
707 .inquiry_prod
= "RAMDISK-MCP",
708 .inquiry_rev
= RD_MCP_VERSION
,
709 .attach_hba
= rd_attach_hba
,
710 .detach_hba
= rd_detach_hba
,
711 .alloc_device
= rd_alloc_device
,
712 .configure_device
= rd_configure_device
,
713 .free_device
= rd_free_device
,
714 .parse_cdb
= rd_parse_cdb
,
715 .set_configfs_dev_params
= rd_set_configfs_dev_params
,
716 .show_configfs_dev_params
= rd_show_configfs_dev_params
,
717 .get_device_type
= sbc_get_device_type
,
718 .get_blocks
= rd_get_blocks
,
719 .init_prot
= rd_init_prot
,
720 .free_prot
= rd_free_prot
,
721 .tb_dev_attrib_attrs
= sbc_attrib_attrs
,
724 int __init
rd_module_init(void)
726 return transport_backend_register(&rd_mcp_ops
);
729 void rd_module_exit(void)
731 target_backend_unregister(&rd_mcp_ops
);