2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
19 static struct kmem_cache
*rrpc_gcb_cache
, *rrpc_rq_cache
;
20 static DECLARE_RWSEM(rrpc_lock
);
22 static int rrpc_submit_io(struct rrpc
*rrpc
, struct bio
*bio
,
23 struct nvm_rq
*rqd
, unsigned long flags
);
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc
*rrpc
, struct rrpc_addr
*a
)
31 struct rrpc_block
*rblk
= a
->rblk
;
32 unsigned int pg_offset
;
34 lockdep_assert_held(&rrpc
->rev_lock
);
36 if (a
->addr
== ADDR_EMPTY
|| !rblk
)
39 spin_lock(&rblk
->lock
);
41 div_u64_rem(a
->addr
, rrpc
->dev
->sec_per_blk
, &pg_offset
);
42 WARN_ON(test_and_set_bit(pg_offset
, rblk
->invalid_pages
));
43 rblk
->nr_invalid_pages
++;
45 spin_unlock(&rblk
->lock
);
47 rrpc
->rev_trans_map
[a
->addr
- rrpc
->poffset
].addr
= ADDR_EMPTY
;
50 static void rrpc_invalidate_range(struct rrpc
*rrpc
, sector_t slba
,
55 spin_lock(&rrpc
->rev_lock
);
56 for (i
= slba
; i
< slba
+ len
; i
++) {
57 struct rrpc_addr
*gp
= &rrpc
->trans_map
[i
];
59 rrpc_page_invalidate(rrpc
, gp
);
62 spin_unlock(&rrpc
->rev_lock
);
65 static struct nvm_rq
*rrpc_inflight_laddr_acquire(struct rrpc
*rrpc
,
66 sector_t laddr
, unsigned int pages
)
69 struct rrpc_inflight_rq
*inf
;
71 rqd
= mempool_alloc(rrpc
->rq_pool
, GFP_ATOMIC
);
73 return ERR_PTR(-ENOMEM
);
75 inf
= rrpc_get_inflight_rq(rqd
);
76 if (rrpc_lock_laddr(rrpc
, laddr
, pages
, inf
)) {
77 mempool_free(rqd
, rrpc
->rq_pool
);
84 static void rrpc_inflight_laddr_release(struct rrpc
*rrpc
, struct nvm_rq
*rqd
)
86 struct rrpc_inflight_rq
*inf
= rrpc_get_inflight_rq(rqd
);
88 rrpc_unlock_laddr(rrpc
, inf
);
90 mempool_free(rqd
, rrpc
->rq_pool
);
93 static void rrpc_discard(struct rrpc
*rrpc
, struct bio
*bio
)
95 sector_t slba
= bio
->bi_iter
.bi_sector
/ NR_PHY_IN_LOG
;
96 sector_t len
= bio
->bi_iter
.bi_size
/ RRPC_EXPOSED_PAGE_SIZE
;
100 rqd
= rrpc_inflight_laddr_acquire(rrpc
, slba
, len
);
108 pr_err("rrpc: unable to acquire inflight IO\n");
113 rrpc_invalidate_range(rrpc
, slba
, len
);
114 rrpc_inflight_laddr_release(rrpc
, rqd
);
117 static int block_is_full(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
119 return (rblk
->next_page
== rrpc
->dev
->sec_per_blk
);
122 /* Calculate relative addr for the given block, considering instantiated LUNs */
123 static u64
block_to_rel_addr(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
125 struct nvm_block
*blk
= rblk
->parent
;
126 int lun_blk
= blk
->id
% (rrpc
->dev
->blks_per_lun
* rrpc
->nr_luns
);
128 return lun_blk
* rrpc
->dev
->sec_per_blk
;
131 /* Calculate global addr for the given block */
132 static u64
block_to_addr(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
134 struct nvm_block
*blk
= rblk
->parent
;
136 return blk
->id
* rrpc
->dev
->sec_per_blk
;
139 static struct ppa_addr
linear_to_generic_addr(struct nvm_dev
*dev
,
143 int secs
, pgs
, blks
, luns
;
144 sector_t ppa
= r
.ppa
;
148 div_u64_rem(ppa
, dev
->sec_per_pg
, &secs
);
151 sector_div(ppa
, dev
->sec_per_pg
);
152 div_u64_rem(ppa
, dev
->pgs_per_blk
, &pgs
);
155 sector_div(ppa
, dev
->pgs_per_blk
);
156 div_u64_rem(ppa
, dev
->blks_per_lun
, &blks
);
159 sector_div(ppa
, dev
->blks_per_lun
);
160 div_u64_rem(ppa
, dev
->luns_per_chnl
, &luns
);
163 sector_div(ppa
, dev
->luns_per_chnl
);
169 static struct ppa_addr
rrpc_ppa_to_gaddr(struct nvm_dev
*dev
, u64 addr
)
171 struct ppa_addr paddr
;
174 return linear_to_generic_addr(dev
, paddr
);
177 /* requires lun->lock taken */
178 static void rrpc_set_lun_cur(struct rrpc_lun
*rlun
, struct rrpc_block
*rblk
)
180 struct rrpc
*rrpc
= rlun
->rrpc
;
185 spin_lock(&rlun
->cur
->lock
);
186 WARN_ON(!block_is_full(rrpc
, rlun
->cur
));
187 spin_unlock(&rlun
->cur
->lock
);
192 static struct rrpc_block
*rrpc_get_blk(struct rrpc
*rrpc
, struct rrpc_lun
*rlun
,
195 struct nvm_lun
*lun
= rlun
->parent
;
196 struct nvm_block
*blk
;
197 struct rrpc_block
*rblk
;
199 spin_lock(&lun
->lock
);
200 blk
= nvm_get_blk_unlocked(rrpc
->dev
, rlun
->parent
, flags
);
202 pr_err("nvm: rrpc: cannot get new block from media manager\n");
203 spin_unlock(&lun
->lock
);
207 rblk
= rrpc_get_rblk(rlun
, blk
->id
);
208 spin_unlock(&lun
->lock
);
211 bitmap_zero(rblk
->invalid_pages
, rrpc
->dev
->sec_per_blk
);
213 rblk
->nr_invalid_pages
= 0;
214 atomic_set(&rblk
->data_cmnt_size
, 0);
219 static void rrpc_put_blk(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
221 struct rrpc_lun
*rlun
= rblk
->rlun
;
222 struct nvm_lun
*lun
= rlun
->parent
;
224 spin_lock(&lun
->lock
);
225 nvm_put_blk_unlocked(rrpc
->dev
, rblk
->parent
);
226 spin_unlock(&lun
->lock
);
229 static void rrpc_put_blks(struct rrpc
*rrpc
)
231 struct rrpc_lun
*rlun
;
234 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
235 rlun
= &rrpc
->luns
[i
];
237 rrpc_put_blk(rrpc
, rlun
->cur
);
239 rrpc_put_blk(rrpc
, rlun
->gc_cur
);
243 static struct rrpc_lun
*get_next_lun(struct rrpc
*rrpc
)
245 int next
= atomic_inc_return(&rrpc
->next_lun
);
247 return &rrpc
->luns
[next
% rrpc
->nr_luns
];
250 static void rrpc_gc_kick(struct rrpc
*rrpc
)
252 struct rrpc_lun
*rlun
;
255 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
256 rlun
= &rrpc
->luns
[i
];
257 queue_work(rrpc
->krqd_wq
, &rlun
->ws_gc
);
262 * timed GC every interval.
264 static void rrpc_gc_timer(unsigned long data
)
266 struct rrpc
*rrpc
= (struct rrpc
*)data
;
269 mod_timer(&rrpc
->gc_timer
, jiffies
+ msecs_to_jiffies(10));
272 static void rrpc_end_sync_bio(struct bio
*bio
)
274 struct completion
*waiting
= bio
->bi_private
;
277 pr_err("nvm: gc request failed (%u).\n", bio
->bi_error
);
283 * rrpc_move_valid_pages -- migrate live data off the block
284 * @rrpc: the 'rrpc' structure
285 * @block: the block from which to migrate live pages
288 * GC algorithms may call this function to migrate remaining live
289 * pages off the block prior to erasing it. This function blocks
290 * further execution until the operation is complete.
292 static int rrpc_move_valid_pages(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
294 struct request_queue
*q
= rrpc
->dev
->q
;
295 struct rrpc_rev_addr
*rev
;
300 int nr_sec_per_blk
= rrpc
->dev
->sec_per_blk
;
302 DECLARE_COMPLETION_ONSTACK(wait
);
304 if (bitmap_full(rblk
->invalid_pages
, nr_sec_per_blk
))
307 bio
= bio_alloc(GFP_NOIO
, 1);
309 pr_err("nvm: could not alloc bio to gc\n");
313 page
= mempool_alloc(rrpc
->page_pool
, GFP_NOIO
);
319 while ((slot
= find_first_zero_bit(rblk
->invalid_pages
,
320 nr_sec_per_blk
)) < nr_sec_per_blk
) {
323 phys_addr
= rblk
->parent
->id
* nr_sec_per_blk
+ slot
;
326 spin_lock(&rrpc
->rev_lock
);
327 /* Get logical address from physical to logical table */
328 rev
= &rrpc
->rev_trans_map
[phys_addr
- rrpc
->poffset
];
329 /* already updated by previous regular write */
330 if (rev
->addr
== ADDR_EMPTY
) {
331 spin_unlock(&rrpc
->rev_lock
);
335 rqd
= rrpc_inflight_laddr_acquire(rrpc
, rev
->addr
, 1);
336 if (IS_ERR_OR_NULL(rqd
)) {
337 spin_unlock(&rrpc
->rev_lock
);
342 spin_unlock(&rrpc
->rev_lock
);
344 /* Perform read to do GC */
345 bio
->bi_iter
.bi_sector
= rrpc_get_sector(rev
->addr
);
346 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
347 bio
->bi_private
= &wait
;
348 bio
->bi_end_io
= rrpc_end_sync_bio
;
350 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
351 bio_add_pc_page(q
, bio
, page
, RRPC_EXPOSED_PAGE_SIZE
, 0);
353 if (rrpc_submit_io(rrpc
, bio
, rqd
, NVM_IOTYPE_GC
)) {
354 pr_err("rrpc: gc read failed.\n");
355 rrpc_inflight_laddr_release(rrpc
, rqd
);
358 wait_for_completion_io(&wait
);
360 rrpc_inflight_laddr_release(rrpc
, rqd
);
365 reinit_completion(&wait
);
367 bio
->bi_iter
.bi_sector
= rrpc_get_sector(rev
->addr
);
368 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
369 bio
->bi_private
= &wait
;
370 bio
->bi_end_io
= rrpc_end_sync_bio
;
372 bio_add_pc_page(q
, bio
, page
, RRPC_EXPOSED_PAGE_SIZE
, 0);
374 /* turn the command around and write the data back to a new
377 if (rrpc_submit_io(rrpc
, bio
, rqd
, NVM_IOTYPE_GC
)) {
378 pr_err("rrpc: gc write failed.\n");
379 rrpc_inflight_laddr_release(rrpc
, rqd
);
382 wait_for_completion_io(&wait
);
384 rrpc_inflight_laddr_release(rrpc
, rqd
);
392 mempool_free(page
, rrpc
->page_pool
);
395 if (!bitmap_full(rblk
->invalid_pages
, nr_sec_per_blk
)) {
396 pr_err("nvm: failed to garbage collect block\n");
403 static void rrpc_block_gc(struct work_struct
*work
)
405 struct rrpc_block_gc
*gcb
= container_of(work
, struct rrpc_block_gc
,
407 struct rrpc
*rrpc
= gcb
->rrpc
;
408 struct rrpc_block
*rblk
= gcb
->rblk
;
409 struct rrpc_lun
*rlun
= rblk
->rlun
;
410 struct nvm_dev
*dev
= rrpc
->dev
;
412 mempool_free(gcb
, rrpc
->gcb_pool
);
413 pr_debug("nvm: block '%lu' being reclaimed\n", rblk
->parent
->id
);
415 if (rrpc_move_valid_pages(rrpc
, rblk
))
418 if (nvm_erase_blk(dev
, rblk
->parent
))
421 rrpc_put_blk(rrpc
, rblk
);
426 spin_lock(&rlun
->lock
);
427 list_add_tail(&rblk
->prio
, &rlun
->prio_list
);
428 spin_unlock(&rlun
->lock
);
431 /* the block with highest number of invalid pages, will be in the beginning
434 static struct rrpc_block
*rblock_max_invalid(struct rrpc_block
*ra
,
435 struct rrpc_block
*rb
)
437 if (ra
->nr_invalid_pages
== rb
->nr_invalid_pages
)
440 return (ra
->nr_invalid_pages
< rb
->nr_invalid_pages
) ? rb
: ra
;
443 /* linearly find the block with highest number of invalid pages
446 static struct rrpc_block
*block_prio_find_max(struct rrpc_lun
*rlun
)
448 struct list_head
*prio_list
= &rlun
->prio_list
;
449 struct rrpc_block
*rblock
, *max
;
451 BUG_ON(list_empty(prio_list
));
453 max
= list_first_entry(prio_list
, struct rrpc_block
, prio
);
454 list_for_each_entry(rblock
, prio_list
, prio
)
455 max
= rblock_max_invalid(max
, rblock
);
460 static void rrpc_lun_gc(struct work_struct
*work
)
462 struct rrpc_lun
*rlun
= container_of(work
, struct rrpc_lun
, ws_gc
);
463 struct rrpc
*rrpc
= rlun
->rrpc
;
464 struct nvm_lun
*lun
= rlun
->parent
;
465 struct rrpc_block_gc
*gcb
;
466 unsigned int nr_blocks_need
;
468 nr_blocks_need
= rrpc
->dev
->blks_per_lun
/ GC_LIMIT_INVERSE
;
470 if (nr_blocks_need
< rrpc
->nr_luns
)
471 nr_blocks_need
= rrpc
->nr_luns
;
473 spin_lock(&rlun
->lock
);
474 while (nr_blocks_need
> lun
->nr_free_blocks
&&
475 !list_empty(&rlun
->prio_list
)) {
476 struct rrpc_block
*rblock
= block_prio_find_max(rlun
);
477 struct nvm_block
*block
= rblock
->parent
;
479 if (!rblock
->nr_invalid_pages
)
482 gcb
= mempool_alloc(rrpc
->gcb_pool
, GFP_ATOMIC
);
486 list_del_init(&rblock
->prio
);
488 BUG_ON(!block_is_full(rrpc
, rblock
));
490 pr_debug("rrpc: selected block '%lu' for GC\n", block
->id
);
494 INIT_WORK(&gcb
->ws_gc
, rrpc_block_gc
);
496 queue_work(rrpc
->kgc_wq
, &gcb
->ws_gc
);
500 spin_unlock(&rlun
->lock
);
502 /* TODO: Hint that request queue can be started again */
505 static void rrpc_gc_queue(struct work_struct
*work
)
507 struct rrpc_block_gc
*gcb
= container_of(work
, struct rrpc_block_gc
,
509 struct rrpc
*rrpc
= gcb
->rrpc
;
510 struct rrpc_block
*rblk
= gcb
->rblk
;
511 struct rrpc_lun
*rlun
= rblk
->rlun
;
513 spin_lock(&rlun
->lock
);
514 list_add_tail(&rblk
->prio
, &rlun
->prio_list
);
515 spin_unlock(&rlun
->lock
);
517 mempool_free(gcb
, rrpc
->gcb_pool
);
518 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
522 static const struct block_device_operations rrpc_fops
= {
523 .owner
= THIS_MODULE
,
526 static struct rrpc_lun
*rrpc_get_lun_rr(struct rrpc
*rrpc
, int is_gc
)
529 struct rrpc_lun
*rlun
, *max_free
;
532 return get_next_lun(rrpc
);
534 /* during GC, we don't care about RR, instead we want to make
535 * sure that we maintain evenness between the block luns.
537 max_free
= &rrpc
->luns
[0];
538 /* prevent GC-ing lun from devouring pages of a lun with
539 * little free blocks. We don't take the lock as we only need an
542 rrpc_for_each_lun(rrpc
, rlun
, i
) {
543 if (rlun
->parent
->nr_free_blocks
>
544 max_free
->parent
->nr_free_blocks
)
551 static struct rrpc_addr
*rrpc_update_map(struct rrpc
*rrpc
, sector_t laddr
,
552 struct rrpc_block
*rblk
, u64 paddr
)
554 struct rrpc_addr
*gp
;
555 struct rrpc_rev_addr
*rev
;
557 BUG_ON(laddr
>= rrpc
->nr_sects
);
559 gp
= &rrpc
->trans_map
[laddr
];
560 spin_lock(&rrpc
->rev_lock
);
562 rrpc_page_invalidate(rrpc
, gp
);
567 rev
= &rrpc
->rev_trans_map
[gp
->addr
- rrpc
->poffset
];
569 spin_unlock(&rrpc
->rev_lock
);
574 static u64
rrpc_alloc_addr(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
576 u64 addr
= ADDR_EMPTY
;
578 spin_lock(&rblk
->lock
);
579 if (block_is_full(rrpc
, rblk
))
582 addr
= block_to_addr(rrpc
, rblk
) + rblk
->next_page
;
586 spin_unlock(&rblk
->lock
);
590 /* Simple round-robin Logical to physical address translation.
592 * Retrieve the mapping using the active append point. Then update the ap for
593 * the next write to the disk.
595 * Returns rrpc_addr with the physical address and block. Remember to return to
596 * rrpc->addr_cache when request is finished.
598 static struct rrpc_addr
*rrpc_map_page(struct rrpc
*rrpc
, sector_t laddr
,
601 struct rrpc_lun
*rlun
;
602 struct rrpc_block
*rblk
;
606 rlun
= rrpc_get_lun_rr(rrpc
, is_gc
);
609 if (!is_gc
&& lun
->nr_free_blocks
< rrpc
->nr_luns
* 4)
612 spin_lock(&rlun
->lock
);
616 paddr
= rrpc_alloc_addr(rrpc
, rblk
);
618 if (paddr
== ADDR_EMPTY
) {
619 rblk
= rrpc_get_blk(rrpc
, rlun
, 0);
621 rrpc_set_lun_cur(rlun
, rblk
);
626 /* retry from emergency gc block */
627 paddr
= rrpc_alloc_addr(rrpc
, rlun
->gc_cur
);
628 if (paddr
== ADDR_EMPTY
) {
629 rblk
= rrpc_get_blk(rrpc
, rlun
, 1);
631 pr_err("rrpc: no more blocks");
636 paddr
= rrpc_alloc_addr(rrpc
, rlun
->gc_cur
);
642 spin_unlock(&rlun
->lock
);
643 return rrpc_update_map(rrpc
, laddr
, rblk
, paddr
);
645 spin_unlock(&rlun
->lock
);
649 static void rrpc_run_gc(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
651 struct rrpc_block_gc
*gcb
;
653 gcb
= mempool_alloc(rrpc
->gcb_pool
, GFP_ATOMIC
);
655 pr_err("rrpc: unable to queue block for gc.");
662 INIT_WORK(&gcb
->ws_gc
, rrpc_gc_queue
);
663 queue_work(rrpc
->kgc_wq
, &gcb
->ws_gc
);
666 static void rrpc_end_io_write(struct rrpc
*rrpc
, struct rrpc_rq
*rrqd
,
667 sector_t laddr
, uint8_t npages
)
670 struct rrpc_block
*rblk
;
674 for (i
= 0; i
< npages
; i
++) {
675 p
= &rrpc
->trans_map
[laddr
+ i
];
677 lun
= rblk
->parent
->lun
;
679 cmnt_size
= atomic_inc_return(&rblk
->data_cmnt_size
);
680 if (unlikely(cmnt_size
== rrpc
->dev
->sec_per_blk
))
681 rrpc_run_gc(rrpc
, rblk
);
685 static void rrpc_end_io(struct nvm_rq
*rqd
)
687 struct rrpc
*rrpc
= container_of(rqd
->ins
, struct rrpc
, instance
);
688 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
689 uint8_t npages
= rqd
->nr_ppas
;
690 sector_t laddr
= rrpc_get_laddr(rqd
->bio
) - npages
;
692 if (bio_data_dir(rqd
->bio
) == WRITE
)
693 rrpc_end_io_write(rrpc
, rrqd
, laddr
, npages
);
697 if (rrqd
->flags
& NVM_IOTYPE_GC
)
700 rrpc_unlock_rq(rrpc
, rqd
);
703 nvm_dev_dma_free(rrpc
->dev
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
705 mempool_free(rqd
, rrpc
->rq_pool
);
708 static int rrpc_read_ppalist_rq(struct rrpc
*rrpc
, struct bio
*bio
,
709 struct nvm_rq
*rqd
, unsigned long flags
, int npages
)
711 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
712 struct rrpc_addr
*gp
;
713 sector_t laddr
= rrpc_get_laddr(bio
);
714 int is_gc
= flags
& NVM_IOTYPE_GC
;
717 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
)) {
718 nvm_dev_dma_free(rrpc
->dev
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
719 return NVM_IO_REQUEUE
;
722 for (i
= 0; i
< npages
; i
++) {
723 /* We assume that mapping occurs at 4KB granularity */
724 BUG_ON(!(laddr
+ i
>= 0 && laddr
+ i
< rrpc
->nr_sects
));
725 gp
= &rrpc
->trans_map
[laddr
+ i
];
728 rqd
->ppa_list
[i
] = rrpc_ppa_to_gaddr(rrpc
->dev
,
732 rrpc_unlock_laddr(rrpc
, r
);
733 nvm_dev_dma_free(rrpc
->dev
, rqd
->ppa_list
,
739 rqd
->opcode
= NVM_OP_HBREAD
;
744 static int rrpc_read_rq(struct rrpc
*rrpc
, struct bio
*bio
, struct nvm_rq
*rqd
,
747 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
748 int is_gc
= flags
& NVM_IOTYPE_GC
;
749 sector_t laddr
= rrpc_get_laddr(bio
);
750 struct rrpc_addr
*gp
;
752 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
))
753 return NVM_IO_REQUEUE
;
755 BUG_ON(!(laddr
>= 0 && laddr
< rrpc
->nr_sects
));
756 gp
= &rrpc
->trans_map
[laddr
];
759 rqd
->ppa_addr
= rrpc_ppa_to_gaddr(rrpc
->dev
, gp
->addr
);
762 rrpc_unlock_rq(rrpc
, rqd
);
766 rqd
->opcode
= NVM_OP_HBREAD
;
772 static int rrpc_write_ppalist_rq(struct rrpc
*rrpc
, struct bio
*bio
,
773 struct nvm_rq
*rqd
, unsigned long flags
, int npages
)
775 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
777 sector_t laddr
= rrpc_get_laddr(bio
);
778 int is_gc
= flags
& NVM_IOTYPE_GC
;
781 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
)) {
782 nvm_dev_dma_free(rrpc
->dev
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
783 return NVM_IO_REQUEUE
;
786 for (i
= 0; i
< npages
; i
++) {
787 /* We assume that mapping occurs at 4KB granularity */
788 p
= rrpc_map_page(rrpc
, laddr
+ i
, is_gc
);
791 rrpc_unlock_laddr(rrpc
, r
);
792 nvm_dev_dma_free(rrpc
->dev
, rqd
->ppa_list
,
795 return NVM_IO_REQUEUE
;
798 rqd
->ppa_list
[i
] = rrpc_ppa_to_gaddr(rrpc
->dev
,
802 rqd
->opcode
= NVM_OP_HBWRITE
;
807 static int rrpc_write_rq(struct rrpc
*rrpc
, struct bio
*bio
,
808 struct nvm_rq
*rqd
, unsigned long flags
)
810 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
812 int is_gc
= flags
& NVM_IOTYPE_GC
;
813 sector_t laddr
= rrpc_get_laddr(bio
);
815 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
))
816 return NVM_IO_REQUEUE
;
818 p
= rrpc_map_page(rrpc
, laddr
, is_gc
);
821 rrpc_unlock_rq(rrpc
, rqd
);
823 return NVM_IO_REQUEUE
;
826 rqd
->ppa_addr
= rrpc_ppa_to_gaddr(rrpc
->dev
, p
->addr
);
827 rqd
->opcode
= NVM_OP_HBWRITE
;
833 static int rrpc_setup_rq(struct rrpc
*rrpc
, struct bio
*bio
,
834 struct nvm_rq
*rqd
, unsigned long flags
, uint8_t npages
)
837 rqd
->ppa_list
= nvm_dev_dma_alloc(rrpc
->dev
, GFP_KERNEL
,
839 if (!rqd
->ppa_list
) {
840 pr_err("rrpc: not able to allocate ppa list\n");
844 if (bio_rw(bio
) == WRITE
)
845 return rrpc_write_ppalist_rq(rrpc
, bio
, rqd
, flags
,
848 return rrpc_read_ppalist_rq(rrpc
, bio
, rqd
, flags
, npages
);
851 if (bio_rw(bio
) == WRITE
)
852 return rrpc_write_rq(rrpc
, bio
, rqd
, flags
);
854 return rrpc_read_rq(rrpc
, bio
, rqd
, flags
);
857 static int rrpc_submit_io(struct rrpc
*rrpc
, struct bio
*bio
,
858 struct nvm_rq
*rqd
, unsigned long flags
)
861 struct rrpc_rq
*rrq
= nvm_rq_to_pdu(rqd
);
862 uint8_t nr_pages
= rrpc_get_pages(bio
);
863 int bio_size
= bio_sectors(bio
) << 9;
865 if (bio_size
< rrpc
->dev
->sec_size
)
867 else if (bio_size
> rrpc
->dev
->max_rq_size
)
870 err
= rrpc_setup_rq(rrpc
, bio
, rqd
, flags
, nr_pages
);
876 rqd
->ins
= &rrpc
->instance
;
877 rqd
->nr_ppas
= nr_pages
;
880 err
= nvm_submit_io(rrpc
->dev
, rqd
);
882 pr_err("rrpc: I/O submission failed: %d\n", err
);
884 if (!(flags
& NVM_IOTYPE_GC
)) {
885 rrpc_unlock_rq(rrpc
, rqd
);
886 if (rqd
->nr_ppas
> 1)
887 nvm_dev_dma_free(rrpc
->dev
,
888 rqd
->ppa_list
, rqd
->dma_ppa_list
);
896 static blk_qc_t
rrpc_make_rq(struct request_queue
*q
, struct bio
*bio
)
898 struct rrpc
*rrpc
= q
->queuedata
;
902 if (bio_op(bio
) == REQ_OP_DISCARD
) {
903 rrpc_discard(rrpc
, bio
);
904 return BLK_QC_T_NONE
;
907 rqd
= mempool_alloc(rrpc
->rq_pool
, GFP_KERNEL
);
909 pr_err_ratelimited("rrpc: not able to queue bio.");
911 return BLK_QC_T_NONE
;
913 memset(rqd
, 0, sizeof(struct nvm_rq
));
915 err
= rrpc_submit_io(rrpc
, bio
, rqd
, NVM_IOTYPE_NONE
);
918 return BLK_QC_T_NONE
;
926 spin_lock(&rrpc
->bio_lock
);
927 bio_list_add(&rrpc
->requeue_bios
, bio
);
928 spin_unlock(&rrpc
->bio_lock
);
929 queue_work(rrpc
->kgc_wq
, &rrpc
->ws_requeue
);
933 mempool_free(rqd
, rrpc
->rq_pool
);
934 return BLK_QC_T_NONE
;
937 static void rrpc_requeue(struct work_struct
*work
)
939 struct rrpc
*rrpc
= container_of(work
, struct rrpc
, ws_requeue
);
940 struct bio_list bios
;
943 bio_list_init(&bios
);
945 spin_lock(&rrpc
->bio_lock
);
946 bio_list_merge(&bios
, &rrpc
->requeue_bios
);
947 bio_list_init(&rrpc
->requeue_bios
);
948 spin_unlock(&rrpc
->bio_lock
);
950 while ((bio
= bio_list_pop(&bios
)))
951 rrpc_make_rq(rrpc
->disk
->queue
, bio
);
954 static void rrpc_gc_free(struct rrpc
*rrpc
)
957 destroy_workqueue(rrpc
->krqd_wq
);
960 destroy_workqueue(rrpc
->kgc_wq
);
963 static int rrpc_gc_init(struct rrpc
*rrpc
)
965 rrpc
->krqd_wq
= alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM
|WQ_UNBOUND
,
970 rrpc
->kgc_wq
= alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM
, 1);
974 setup_timer(&rrpc
->gc_timer
, rrpc_gc_timer
, (unsigned long)rrpc
);
979 static void rrpc_map_free(struct rrpc
*rrpc
)
981 vfree(rrpc
->rev_trans_map
);
982 vfree(rrpc
->trans_map
);
985 static int rrpc_l2p_update(u64 slba
, u32 nlb
, __le64
*entries
, void *private)
987 struct rrpc
*rrpc
= (struct rrpc
*)private;
988 struct nvm_dev
*dev
= rrpc
->dev
;
989 struct rrpc_addr
*addr
= rrpc
->trans_map
+ slba
;
990 struct rrpc_rev_addr
*raddr
= rrpc
->rev_trans_map
;
991 u64 elba
= slba
+ nlb
;
994 if (unlikely(elba
> dev
->total_secs
)) {
995 pr_err("nvm: L2P data from device is out of bounds!\n");
999 for (i
= 0; i
< nlb
; i
++) {
1000 u64 pba
= le64_to_cpu(entries
[i
]);
1002 /* LNVM treats address-spaces as silos, LBA and PBA are
1003 * equally large and zero-indexed.
1005 if (unlikely(pba
>= dev
->total_secs
&& pba
!= U64_MAX
)) {
1006 pr_err("nvm: L2P data entry is out of bounds!\n");
1010 /* Address zero is a special one. The first page on a disk is
1011 * protected. As it often holds internal device boot
1017 div_u64_rem(pba
, rrpc
->nr_sects
, &mod
);
1020 raddr
[mod
].addr
= slba
+ i
;
1026 static int rrpc_map_init(struct rrpc
*rrpc
)
1028 struct nvm_dev
*dev
= rrpc
->dev
;
1032 rrpc
->trans_map
= vzalloc(sizeof(struct rrpc_addr
) * rrpc
->nr_sects
);
1033 if (!rrpc
->trans_map
)
1036 rrpc
->rev_trans_map
= vmalloc(sizeof(struct rrpc_rev_addr
)
1038 if (!rrpc
->rev_trans_map
)
1041 for (i
= 0; i
< rrpc
->nr_sects
; i
++) {
1042 struct rrpc_addr
*p
= &rrpc
->trans_map
[i
];
1043 struct rrpc_rev_addr
*r
= &rrpc
->rev_trans_map
[i
];
1045 p
->addr
= ADDR_EMPTY
;
1046 r
->addr
= ADDR_EMPTY
;
1049 if (!dev
->ops
->get_l2p_tbl
)
1052 /* Bring up the mapping table from device */
1053 ret
= dev
->ops
->get_l2p_tbl(dev
, rrpc
->soffset
, rrpc
->nr_sects
,
1054 rrpc_l2p_update
, rrpc
);
1056 pr_err("nvm: rrpc: could not read L2P table.\n");
1063 /* Minimum pages needed within a lun */
1064 #define PAGE_POOL_SIZE 16
1065 #define ADDR_POOL_SIZE 64
1067 static int rrpc_core_init(struct rrpc
*rrpc
)
1069 down_write(&rrpc_lock
);
1070 if (!rrpc_gcb_cache
) {
1071 rrpc_gcb_cache
= kmem_cache_create("rrpc_gcb",
1072 sizeof(struct rrpc_block_gc
), 0, 0, NULL
);
1073 if (!rrpc_gcb_cache
) {
1074 up_write(&rrpc_lock
);
1078 rrpc_rq_cache
= kmem_cache_create("rrpc_rq",
1079 sizeof(struct nvm_rq
) + sizeof(struct rrpc_rq
),
1081 if (!rrpc_rq_cache
) {
1082 kmem_cache_destroy(rrpc_gcb_cache
);
1083 up_write(&rrpc_lock
);
1087 up_write(&rrpc_lock
);
1089 rrpc
->page_pool
= mempool_create_page_pool(PAGE_POOL_SIZE
, 0);
1090 if (!rrpc
->page_pool
)
1093 rrpc
->gcb_pool
= mempool_create_slab_pool(rrpc
->dev
->nr_luns
,
1095 if (!rrpc
->gcb_pool
)
1098 rrpc
->rq_pool
= mempool_create_slab_pool(64, rrpc_rq_cache
);
1102 spin_lock_init(&rrpc
->inflights
.lock
);
1103 INIT_LIST_HEAD(&rrpc
->inflights
.reqs
);
1108 static void rrpc_core_free(struct rrpc
*rrpc
)
1110 mempool_destroy(rrpc
->page_pool
);
1111 mempool_destroy(rrpc
->gcb_pool
);
1112 mempool_destroy(rrpc
->rq_pool
);
1115 static void rrpc_luns_free(struct rrpc
*rrpc
)
1117 struct nvm_dev
*dev
= rrpc
->dev
;
1118 struct nvm_lun
*lun
;
1119 struct rrpc_lun
*rlun
;
1125 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
1126 rlun
= &rrpc
->luns
[i
];
1130 dev
->mt
->release_lun(dev
, lun
->id
);
1131 vfree(rlun
->blocks
);
1137 static int rrpc_luns_init(struct rrpc
*rrpc
, int lun_begin
, int lun_end
)
1139 struct nvm_dev
*dev
= rrpc
->dev
;
1140 struct rrpc_lun
*rlun
;
1141 int i
, j
, ret
= -EINVAL
;
1143 if (dev
->sec_per_blk
> MAX_INVALID_PAGES_STORAGE
* BITS_PER_LONG
) {
1144 pr_err("rrpc: number of pages per block too high.");
1148 spin_lock_init(&rrpc
->rev_lock
);
1150 rrpc
->luns
= kcalloc(rrpc
->nr_luns
, sizeof(struct rrpc_lun
),
1156 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
1157 int lunid
= lun_begin
+ i
;
1158 struct nvm_lun
*lun
;
1160 if (dev
->mt
->reserve_lun(dev
, lunid
)) {
1161 pr_err("rrpc: lun %u is already allocated\n", lunid
);
1165 lun
= dev
->mt
->get_lun(dev
, lunid
);
1169 rlun
= &rrpc
->luns
[i
];
1171 rlun
->blocks
= vzalloc(sizeof(struct rrpc_block
) *
1172 rrpc
->dev
->blks_per_lun
);
1173 if (!rlun
->blocks
) {
1178 for (j
= 0; j
< rrpc
->dev
->blks_per_lun
; j
++) {
1179 struct rrpc_block
*rblk
= &rlun
->blocks
[j
];
1180 struct nvm_block
*blk
= &lun
->blocks
[j
];
1184 INIT_LIST_HEAD(&rblk
->prio
);
1185 spin_lock_init(&rblk
->lock
);
1189 INIT_LIST_HEAD(&rlun
->prio_list
);
1191 INIT_WORK(&rlun
->ws_gc
, rrpc_lun_gc
);
1192 spin_lock_init(&rlun
->lock
);
1200 /* returns 0 on success and stores the beginning address in *begin */
1201 static int rrpc_area_init(struct rrpc
*rrpc
, sector_t
*begin
)
1203 struct nvm_dev
*dev
= rrpc
->dev
;
1204 struct nvmm_type
*mt
= dev
->mt
;
1205 sector_t size
= rrpc
->nr_sects
* dev
->sec_size
;
1210 ret
= mt
->get_area(dev
, begin
, size
);
1212 *begin
>>= (ilog2(dev
->sec_size
) - 9);
1217 static void rrpc_area_free(struct rrpc
*rrpc
)
1219 struct nvm_dev
*dev
= rrpc
->dev
;
1220 struct nvmm_type
*mt
= dev
->mt
;
1221 sector_t begin
= rrpc
->soffset
<< (ilog2(dev
->sec_size
) - 9);
1223 mt
->put_area(dev
, begin
);
1226 static void rrpc_free(struct rrpc
*rrpc
)
1229 rrpc_map_free(rrpc
);
1230 rrpc_core_free(rrpc
);
1231 rrpc_luns_free(rrpc
);
1232 rrpc_area_free(rrpc
);
1237 static void rrpc_exit(void *private)
1239 struct rrpc
*rrpc
= private;
1241 del_timer(&rrpc
->gc_timer
);
1243 flush_workqueue(rrpc
->krqd_wq
);
1244 flush_workqueue(rrpc
->kgc_wq
);
1249 static sector_t
rrpc_capacity(void *private)
1251 struct rrpc
*rrpc
= private;
1252 struct nvm_dev
*dev
= rrpc
->dev
;
1253 sector_t reserved
, provisioned
;
1255 /* cur, gc, and two emergency blocks for each lun */
1256 reserved
= rrpc
->nr_luns
* dev
->sec_per_blk
* 4;
1257 provisioned
= rrpc
->nr_sects
- reserved
;
1259 if (reserved
> rrpc
->nr_sects
) {
1260 pr_err("rrpc: not enough space available to expose storage.\n");
1264 sector_div(provisioned
, 10);
1265 return provisioned
* 9 * NR_PHY_IN_LOG
;
1269 * Looks up the logical address from reverse trans map and check if its valid by
1270 * comparing the logical to physical address with the physical address.
1271 * Returns 0 on free, otherwise 1 if in use
1273 static void rrpc_block_map_update(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
1275 struct nvm_dev
*dev
= rrpc
->dev
;
1277 struct rrpc_addr
*laddr
;
1278 u64 bpaddr
, paddr
, pladdr
;
1280 bpaddr
= block_to_rel_addr(rrpc
, rblk
);
1281 for (offset
= 0; offset
< dev
->sec_per_blk
; offset
++) {
1282 paddr
= bpaddr
+ offset
;
1284 pladdr
= rrpc
->rev_trans_map
[paddr
].addr
;
1285 if (pladdr
== ADDR_EMPTY
)
1288 laddr
= &rrpc
->trans_map
[pladdr
];
1290 if (paddr
== laddr
->addr
) {
1293 set_bit(offset
, rblk
->invalid_pages
);
1294 rblk
->nr_invalid_pages
++;
1299 static int rrpc_blocks_init(struct rrpc
*rrpc
)
1301 struct rrpc_lun
*rlun
;
1302 struct rrpc_block
*rblk
;
1303 int lun_iter
, blk_iter
;
1305 for (lun_iter
= 0; lun_iter
< rrpc
->nr_luns
; lun_iter
++) {
1306 rlun
= &rrpc
->luns
[lun_iter
];
1308 for (blk_iter
= 0; blk_iter
< rrpc
->dev
->blks_per_lun
;
1310 rblk
= &rlun
->blocks
[blk_iter
];
1311 rrpc_block_map_update(rrpc
, rblk
);
1318 static int rrpc_luns_configure(struct rrpc
*rrpc
)
1320 struct rrpc_lun
*rlun
;
1321 struct rrpc_block
*rblk
;
1324 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
1325 rlun
= &rrpc
->luns
[i
];
1327 rblk
= rrpc_get_blk(rrpc
, rlun
, 0);
1331 rrpc_set_lun_cur(rlun
, rblk
);
1333 /* Emergency gc block */
1334 rblk
= rrpc_get_blk(rrpc
, rlun
, 1);
1337 rlun
->gc_cur
= rblk
;
1342 rrpc_put_blks(rrpc
);
1346 static struct nvm_tgt_type tt_rrpc
;
1348 static void *rrpc_init(struct nvm_dev
*dev
, struct gendisk
*tdisk
,
1349 int lun_begin
, int lun_end
)
1351 struct request_queue
*bqueue
= dev
->q
;
1352 struct request_queue
*tqueue
= tdisk
->queue
;
1357 if (!(dev
->identity
.dom
& NVM_RSP_L2P
)) {
1358 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1360 return ERR_PTR(-EINVAL
);
1363 rrpc
= kzalloc(sizeof(struct rrpc
), GFP_KERNEL
);
1365 return ERR_PTR(-ENOMEM
);
1367 rrpc
->instance
.tt
= &tt_rrpc
;
1371 bio_list_init(&rrpc
->requeue_bios
);
1372 spin_lock_init(&rrpc
->bio_lock
);
1373 INIT_WORK(&rrpc
->ws_requeue
, rrpc_requeue
);
1375 rrpc
->nr_luns
= lun_end
- lun_begin
+ 1;
1376 rrpc
->total_blocks
= (unsigned long)dev
->blks_per_lun
* rrpc
->nr_luns
;
1377 rrpc
->nr_sects
= (unsigned long long)dev
->sec_per_lun
* rrpc
->nr_luns
;
1379 /* simple round-robin strategy */
1380 atomic_set(&rrpc
->next_lun
, -1);
1382 ret
= rrpc_area_init(rrpc
, &soffset
);
1384 pr_err("nvm: rrpc: could not initialize area\n");
1385 return ERR_PTR(ret
);
1387 rrpc
->soffset
= soffset
;
1389 ret
= rrpc_luns_init(rrpc
, lun_begin
, lun_end
);
1391 pr_err("nvm: rrpc: could not initialize luns\n");
1395 rrpc
->poffset
= dev
->sec_per_lun
* lun_begin
;
1396 rrpc
->lun_offset
= lun_begin
;
1398 ret
= rrpc_core_init(rrpc
);
1400 pr_err("nvm: rrpc: could not initialize core\n");
1404 ret
= rrpc_map_init(rrpc
);
1406 pr_err("nvm: rrpc: could not initialize maps\n");
1410 ret
= rrpc_blocks_init(rrpc
);
1412 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1416 ret
= rrpc_luns_configure(rrpc
);
1418 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1422 ret
= rrpc_gc_init(rrpc
);
1424 pr_err("nvm: rrpc: could not initialize gc\n");
1428 /* inherit the size from the underlying device */
1429 blk_queue_logical_block_size(tqueue
, queue_physical_block_size(bqueue
));
1430 blk_queue_max_hw_sectors(tqueue
, queue_max_hw_sectors(bqueue
));
1432 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1433 rrpc
->nr_luns
, (unsigned long long)rrpc
->nr_sects
);
1435 mod_timer(&rrpc
->gc_timer
, jiffies
+ msecs_to_jiffies(10));
1440 return ERR_PTR(ret
);
1443 /* round robin, page-based FTL, and cost-based GC */
1444 static struct nvm_tgt_type tt_rrpc
= {
1446 .version
= {1, 0, 0},
1448 .make_rq
= rrpc_make_rq
,
1449 .capacity
= rrpc_capacity
,
1450 .end_io
= rrpc_end_io
,
1456 static int __init
rrpc_module_init(void)
1458 return nvm_register_tgt_type(&tt_rrpc
);
1461 static void rrpc_module_exit(void)
1463 nvm_unregister_tgt_type(&tt_rrpc
);
1466 module_init(rrpc_module_init
);
1467 module_exit(rrpc_module_exit
);
1468 MODULE_LICENSE("GPL v2");
1469 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");