2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 #include <linux/nvme.h>
28 #include <linux/bitops.h>
29 #include <linux/lightnvm.h>
30 #include <linux/vmalloc.h>
32 enum nvme_nvm_admin_opcode
{
33 nvme_nvm_admin_identity
= 0xe2,
34 nvme_nvm_admin_get_l2p_tbl
= 0xea,
35 nvme_nvm_admin_get_bb_tbl
= 0xf2,
36 nvme_nvm_admin_set_bb_tbl
= 0xf1,
39 struct nvme_nvm_hb_rw
{
55 struct nvme_nvm_ph_rw
{
71 struct nvme_nvm_identity
{
83 struct nvme_nvm_l2ptbl
{
96 struct nvme_nvm_getbbtbl
{
108 struct nvme_nvm_setbbtbl
{
123 struct nvme_nvm_erase_blk
{
138 struct nvme_nvm_command
{
140 struct nvme_common_command common
;
141 struct nvme_nvm_identity identity
;
142 struct nvme_nvm_hb_rw hb_rw
;
143 struct nvme_nvm_ph_rw ph_rw
;
144 struct nvme_nvm_l2ptbl l2p
;
145 struct nvme_nvm_getbbtbl get_bb
;
146 struct nvme_nvm_setbbtbl set_bb
;
147 struct nvme_nvm_erase_blk erase
;
151 struct nvme_nvm_id_group
{
177 struct nvme_nvm_addr_format
{
200 struct nvme_nvm_addr_format ppaf
;
202 struct nvme_nvm_id_group groups
[4];
205 struct nvme_nvm_bb_tbl
{
220 * Check we didn't inadvertently grow the command struct
222 static inline void _nvme_nvm_check_size(void)
224 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity
) != 64);
225 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw
) != 64);
226 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw
) != 64);
227 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl
) != 64);
228 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl
) != 64);
229 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl
) != 64);
230 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk
) != 64);
231 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group
) != 960);
232 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format
) != 128);
233 BUILD_BUG_ON(sizeof(struct nvme_nvm_id
) != 4096);
234 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl
) != 512);
237 static int init_grps(struct nvm_id
*nvm_id
, struct nvme_nvm_id
*nvme_nvm_id
)
239 struct nvme_nvm_id_group
*src
;
240 struct nvm_id_group
*dst
;
243 end
= min_t(u32
, 4, nvm_id
->cgrps
);
245 for (i
= 0; i
< end
; i
++) {
246 src
= &nvme_nvm_id
->groups
[i
];
247 dst
= &nvm_id
->groups
[i
];
249 dst
->mtype
= src
->mtype
;
250 dst
->fmtype
= src
->fmtype
;
251 dst
->num_ch
= src
->num_ch
;
252 dst
->num_lun
= src
->num_lun
;
253 dst
->num_pln
= src
->num_pln
;
255 dst
->num_pg
= le16_to_cpu(src
->num_pg
);
256 dst
->num_blk
= le16_to_cpu(src
->num_blk
);
257 dst
->fpg_sz
= le16_to_cpu(src
->fpg_sz
);
258 dst
->csecs
= le16_to_cpu(src
->csecs
);
259 dst
->sos
= le16_to_cpu(src
->sos
);
261 dst
->trdt
= le32_to_cpu(src
->trdt
);
262 dst
->trdm
= le32_to_cpu(src
->trdm
);
263 dst
->tprt
= le32_to_cpu(src
->tprt
);
264 dst
->tprm
= le32_to_cpu(src
->tprm
);
265 dst
->tbet
= le32_to_cpu(src
->tbet
);
266 dst
->tbem
= le32_to_cpu(src
->tbem
);
267 dst
->mpos
= le32_to_cpu(src
->mpos
);
268 dst
->mccap
= le32_to_cpu(src
->mccap
);
270 dst
->cpar
= le16_to_cpu(src
->cpar
);
276 static int nvme_nvm_identity(struct request_queue
*q
, struct nvm_id
*nvm_id
)
278 struct nvme_ns
*ns
= q
->queuedata
;
279 struct nvme_nvm_id
*nvme_nvm_id
;
280 struct nvme_nvm_command c
= {};
283 c
.identity
.opcode
= nvme_nvm_admin_identity
;
284 c
.identity
.nsid
= cpu_to_le32(ns
->ns_id
);
285 c
.identity
.chnl_off
= 0;
287 nvme_nvm_id
= kmalloc(sizeof(struct nvme_nvm_id
), GFP_KERNEL
);
291 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
292 nvme_nvm_id
, sizeof(struct nvme_nvm_id
));
298 nvm_id
->ver_id
= nvme_nvm_id
->ver_id
;
299 nvm_id
->vmnt
= nvme_nvm_id
->vmnt
;
300 nvm_id
->cgrps
= nvme_nvm_id
->cgrps
;
301 nvm_id
->cap
= le32_to_cpu(nvme_nvm_id
->cap
);
302 nvm_id
->dom
= le32_to_cpu(nvme_nvm_id
->dom
);
303 memcpy(&nvm_id
->ppaf
, &nvme_nvm_id
->ppaf
,
304 sizeof(struct nvme_nvm_addr_format
));
306 ret
= init_grps(nvm_id
, nvme_nvm_id
);
312 static int nvme_nvm_get_l2p_tbl(struct request_queue
*q
, u64 slba
, u32 nlb
,
313 nvm_l2p_update_fn
*update_l2p
, void *priv
)
315 struct nvme_ns
*ns
= q
->queuedata
;
316 struct nvme_nvm_command c
= {};
317 u32 len
= queue_max_hw_sectors(ns
->ctrl
->admin_q
) << 9;
318 u32 nlb_pr_rq
= len
/ sizeof(u64
);
323 c
.l2p
.opcode
= nvme_nvm_admin_get_l2p_tbl
;
324 c
.l2p
.nsid
= cpu_to_le32(ns
->ns_id
);
325 entries
= kmalloc(len
, GFP_KERNEL
);
330 u32 cmd_nlb
= min(nlb_pr_rq
, nlb
);
332 c
.l2p
.slba
= cpu_to_le64(cmd_slba
);
333 c
.l2p
.nlb
= cpu_to_le32(cmd_nlb
);
335 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
,
336 (struct nvme_command
*)&c
, entries
, len
);
338 dev_err(ns
->ctrl
->dev
, "L2P table transfer failed (%d)\n",
344 if (update_l2p(cmd_slba
, cmd_nlb
, entries
, priv
)) {
358 static int nvme_nvm_get_bb_tbl(struct request_queue
*q
, struct ppa_addr ppa
,
359 int nr_blocks
, nvm_bb_update_fn
*update_bbtbl
,
362 struct nvme_ns
*ns
= q
->queuedata
;
363 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
364 struct nvme_nvm_command c
= {};
365 struct nvme_nvm_bb_tbl
*bb_tbl
;
366 int tblsz
= sizeof(struct nvme_nvm_bb_tbl
) + nr_blocks
;
369 c
.get_bb
.opcode
= nvme_nvm_admin_get_bb_tbl
;
370 c
.get_bb
.nsid
= cpu_to_le32(ns
->ns_id
);
371 c
.get_bb
.spba
= cpu_to_le64(ppa
.ppa
);
373 bb_tbl
= kzalloc(tblsz
, GFP_KERNEL
);
377 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, (struct nvme_command
*)&c
,
380 dev_err(ctrl
->dev
, "get bad block table failed (%d)\n", ret
);
385 if (bb_tbl
->tblid
[0] != 'B' || bb_tbl
->tblid
[1] != 'B' ||
386 bb_tbl
->tblid
[2] != 'L' || bb_tbl
->tblid
[3] != 'T') {
387 dev_err(ctrl
->dev
, "bbt format mismatch\n");
392 if (le16_to_cpu(bb_tbl
->verid
) != 1) {
394 dev_err(ctrl
->dev
, "bbt version not supported\n");
398 if (le32_to_cpu(bb_tbl
->tblks
) != nr_blocks
) {
400 dev_err(ctrl
->dev
, "bbt unsuspected blocks returned (%u!=%u)",
401 le32_to_cpu(bb_tbl
->tblks
), nr_blocks
);
405 ret
= update_bbtbl(ppa
, nr_blocks
, bb_tbl
->blk
, priv
);
416 static int nvme_nvm_set_bb_tbl(struct request_queue
*q
, struct nvm_rq
*rqd
,
419 struct nvme_ns
*ns
= q
->queuedata
;
420 struct nvme_nvm_command c
= {};
423 c
.set_bb
.opcode
= nvme_nvm_admin_set_bb_tbl
;
424 c
.set_bb
.nsid
= cpu_to_le32(ns
->ns_id
);
425 c
.set_bb
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
426 c
.set_bb
.nlb
= cpu_to_le16(rqd
->nr_pages
- 1);
427 c
.set_bb
.value
= type
;
429 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
432 dev_err(ns
->ctrl
->dev
, "set bad block table failed (%d)\n", ret
);
436 static inline void nvme_nvm_rqtocmd(struct request
*rq
, struct nvm_rq
*rqd
,
437 struct nvme_ns
*ns
, struct nvme_nvm_command
*c
)
439 c
->ph_rw
.opcode
= rqd
->opcode
;
440 c
->ph_rw
.nsid
= cpu_to_le32(ns
->ns_id
);
441 c
->ph_rw
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
442 c
->ph_rw
.control
= cpu_to_le16(rqd
->flags
);
443 c
->ph_rw
.length
= cpu_to_le16(rqd
->nr_pages
- 1);
445 if (rqd
->opcode
== NVM_OP_HBWRITE
|| rqd
->opcode
== NVM_OP_HBREAD
)
446 c
->hb_rw
.slba
= cpu_to_le64(nvme_block_nr(ns
,
447 rqd
->bio
->bi_iter
.bi_sector
));
450 static void nvme_nvm_end_io(struct request
*rq
, int error
)
452 struct nvm_rq
*rqd
= rq
->end_io_data
;
453 struct nvm_dev
*dev
= rqd
->dev
;
455 if (dev
->mt
->end_io(rqd
, error
))
456 pr_err("nvme: err status: %x result: %lx\n",
457 rq
->errors
, (unsigned long)rq
->special
);
460 blk_mq_free_request(rq
);
463 static int nvme_nvm_submit_io(struct request_queue
*q
, struct nvm_rq
*rqd
)
465 struct nvme_ns
*ns
= q
->queuedata
;
467 struct bio
*bio
= rqd
->bio
;
468 struct nvme_nvm_command
*cmd
;
470 rq
= blk_mq_alloc_request(q
, bio_rw(bio
), 0);
474 cmd
= kzalloc(sizeof(struct nvme_nvm_command
), GFP_KERNEL
);
476 blk_mq_free_request(rq
);
480 rq
->cmd_type
= REQ_TYPE_DRV_PRIV
;
481 rq
->ioprio
= bio_prio(bio
);
483 if (bio_has_data(bio
))
484 rq
->nr_phys_segments
= bio_phys_segments(q
, bio
);
486 rq
->__data_len
= bio
->bi_iter
.bi_size
;
487 rq
->bio
= rq
->biotail
= bio
;
489 nvme_nvm_rqtocmd(rq
, rqd
, ns
, cmd
);
491 rq
->cmd
= (unsigned char *)cmd
;
492 rq
->cmd_len
= sizeof(struct nvme_nvm_command
);
493 rq
->special
= (void *)0;
495 rq
->end_io_data
= rqd
;
497 blk_execute_rq_nowait(q
, NULL
, rq
, 0, nvme_nvm_end_io
);
502 static int nvme_nvm_erase_block(struct request_queue
*q
, struct nvm_rq
*rqd
)
504 struct nvme_ns
*ns
= q
->queuedata
;
505 struct nvme_nvm_command c
= {};
507 c
.erase
.opcode
= NVM_OP_ERASE
;
508 c
.erase
.nsid
= cpu_to_le32(ns
->ns_id
);
509 c
.erase
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
510 c
.erase
.length
= cpu_to_le16(rqd
->nr_pages
- 1);
512 return nvme_submit_sync_cmd(q
, (struct nvme_command
*)&c
, NULL
, 0);
515 static void *nvme_nvm_create_dma_pool(struct request_queue
*q
, char *name
)
517 struct nvme_ns
*ns
= q
->queuedata
;
519 return dma_pool_create(name
, ns
->ctrl
->dev
, PAGE_SIZE
, PAGE_SIZE
, 0);
522 static void nvme_nvm_destroy_dma_pool(void *pool
)
524 struct dma_pool
*dma_pool
= pool
;
526 dma_pool_destroy(dma_pool
);
529 static void *nvme_nvm_dev_dma_alloc(struct request_queue
*q
, void *pool
,
530 gfp_t mem_flags
, dma_addr_t
*dma_handler
)
532 return dma_pool_alloc(pool
, mem_flags
, dma_handler
);
535 static void nvme_nvm_dev_dma_free(void *pool
, void *ppa_list
,
536 dma_addr_t dma_handler
)
538 dma_pool_free(pool
, ppa_list
, dma_handler
);
541 static struct nvm_dev_ops nvme_nvm_dev_ops
= {
542 .identity
= nvme_nvm_identity
,
544 .get_l2p_tbl
= nvme_nvm_get_l2p_tbl
,
546 .get_bb_tbl
= nvme_nvm_get_bb_tbl
,
547 .set_bb_tbl
= nvme_nvm_set_bb_tbl
,
549 .submit_io
= nvme_nvm_submit_io
,
550 .erase_block
= nvme_nvm_erase_block
,
552 .create_dma_pool
= nvme_nvm_create_dma_pool
,
553 .destroy_dma_pool
= nvme_nvm_destroy_dma_pool
,
554 .dev_dma_alloc
= nvme_nvm_dev_dma_alloc
,
555 .dev_dma_free
= nvme_nvm_dev_dma_free
,
560 int nvme_nvm_register(struct request_queue
*q
, char *disk_name
)
562 return nvm_register(q
, disk_name
, &nvme_nvm_dev_ops
);
565 void nvme_nvm_unregister(struct request_queue
*q
, char *disk_name
)
567 nvm_unregister(disk_name
);
570 int nvme_nvm_ns_supported(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
572 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
573 /* XXX: this is poking into PCI structures from generic code! */
574 struct pci_dev
*pdev
= to_pci_dev(ctrl
->dev
);
576 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
577 if (pdev
->vendor
== PCI_VENDOR_ID_INTEL
&& pdev
->device
== 0x5845 &&
581 /* CNEX Labs - PCI ID + Vendor specific bit */
582 if (pdev
->vendor
== 0x1d1d && pdev
->device
== 0x2807 &&
589 int nvme_nvm_register(struct request_queue
*q
, char *disk_name
)
593 void nvme_nvm_unregister(struct request_queue
*q
, char *disk_name
) {};
594 int nvme_nvm_ns_supported(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
598 #endif /* CONFIG_NVM */