2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
18 * Implementation of a generic nvm manager for Open-Channel SSDs.
23 static int gennvm_get_area(struct nvm_dev
*dev
, sector_t
*lba
, sector_t len
)
25 struct gen_nvm
*gn
= dev
->mp
;
26 struct gennvm_area
*area
, *prev
, *next
;
28 sector_t max_sectors
= (dev
->sec_size
* dev
->total_secs
) >> 9;
30 if (len
> max_sectors
)
33 area
= kmalloc(sizeof(struct gennvm_area
), GFP_KERNEL
);
39 spin_lock(&dev
->lock
);
40 list_for_each_entry(next
, &gn
->area_list
, list
) {
41 if (begin
+ len
> next
->begin
) {
49 if ((begin
+ len
) > max_sectors
) {
50 spin_unlock(&dev
->lock
);
55 area
->begin
= *lba
= begin
;
56 area
->end
= begin
+ len
;
58 if (prev
) /* insert into sorted order */
59 list_add(&area
->list
, &prev
->list
);
61 list_add(&area
->list
, &gn
->area_list
);
62 spin_unlock(&dev
->lock
);
67 static void gennvm_put_area(struct nvm_dev
*dev
, sector_t begin
)
69 struct gen_nvm
*gn
= dev
->mp
;
70 struct gennvm_area
*area
;
72 spin_lock(&dev
->lock
);
73 list_for_each_entry(area
, &gn
->area_list
, list
) {
74 if (area
->begin
!= begin
)
77 list_del(&area
->list
);
78 spin_unlock(&dev
->lock
);
82 spin_unlock(&dev
->lock
);
85 static void gennvm_blocks_free(struct nvm_dev
*dev
)
87 struct gen_nvm
*gn
= dev
->mp
;
91 gennvm_for_each_lun(gn
, lun
, i
) {
92 if (!lun
->vlun
.blocks
)
94 vfree(lun
->vlun
.blocks
);
98 static void gennvm_luns_free(struct nvm_dev
*dev
)
100 struct gen_nvm
*gn
= dev
->mp
;
105 static int gennvm_luns_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
110 gn
->luns
= kcalloc(dev
->nr_luns
, sizeof(struct gen_lun
), GFP_KERNEL
);
114 gennvm_for_each_lun(gn
, lun
, i
) {
115 spin_lock_init(&lun
->vlun
.lock
);
116 INIT_LIST_HEAD(&lun
->free_list
);
117 INIT_LIST_HEAD(&lun
->used_list
);
118 INIT_LIST_HEAD(&lun
->bb_list
);
120 lun
->reserved_blocks
= 2; /* for GC only */
122 lun
->vlun
.lun_id
= i
% dev
->luns_per_chnl
;
123 lun
->vlun
.chnl_id
= i
/ dev
->luns_per_chnl
;
124 lun
->vlun
.nr_free_blocks
= dev
->blks_per_lun
;
125 lun
->vlun
.nr_open_blocks
= 0;
126 lun
->vlun
.nr_closed_blocks
= 0;
127 lun
->vlun
.nr_bad_blocks
= 0;
132 static int gennvm_block_bb(struct gen_nvm
*gn
, struct ppa_addr ppa
,
133 u8
*blks
, int nr_blks
)
135 struct nvm_dev
*dev
= gn
->dev
;
137 struct nvm_block
*blk
;
140 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
144 lun
= &gn
->luns
[(dev
->luns_per_chnl
* ppa
.g
.ch
) + ppa
.g
.lun
];
146 for (i
= 0; i
< nr_blks
; i
++) {
150 blk
= &lun
->vlun
.blocks
[i
];
151 list_move_tail(&blk
->list
, &lun
->bb_list
);
152 lun
->vlun
.nr_bad_blocks
++;
153 lun
->vlun
.nr_free_blocks
--;
159 static int gennvm_block_map(u64 slba
, u32 nlb
, __le64
*entries
, void *private)
161 struct nvm_dev
*dev
= private;
162 struct gen_nvm
*gn
= dev
->mp
;
163 u64 elba
= slba
+ nlb
;
165 struct nvm_block
*blk
;
169 if (unlikely(elba
> dev
->total_secs
)) {
170 pr_err("gennvm: L2P data from device is out of bounds!\n");
174 for (i
= 0; i
< nlb
; i
++) {
175 u64 pba
= le64_to_cpu(entries
[i
]);
177 if (unlikely(pba
>= dev
->total_secs
&& pba
!= U64_MAX
)) {
178 pr_err("gennvm: L2P data entry is out of bounds!\n");
182 /* Address zero is a special one. The first page on a disk is
183 * protected. It often holds internal device boot
189 /* resolve block from physical address */
190 lun_id
= div_u64(pba
, dev
->sec_per_lun
);
191 lun
= &gn
->luns
[lun_id
];
193 /* Calculate block offset into lun */
194 pba
= pba
- (dev
->sec_per_lun
* lun_id
);
195 blk
= &lun
->vlun
.blocks
[div_u64(pba
, dev
->sec_per_blk
)];
198 /* at this point, we don't know anything about the
199 * block. It's up to the FTL on top to re-etablish the
200 * block state. The block is assumed to be open.
202 list_move_tail(&blk
->list
, &lun
->used_list
);
203 blk
->state
= NVM_BLK_ST_OPEN
;
204 lun
->vlun
.nr_free_blocks
--;
205 lun
->vlun
.nr_open_blocks
++;
212 static int gennvm_blocks_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
215 struct nvm_block
*block
;
216 sector_t lun_iter
, blk_iter
, cur_block_id
= 0;
220 nr_blks
= dev
->blks_per_lun
* dev
->plane_mode
;
221 blks
= kmalloc(nr_blks
, GFP_KERNEL
);
225 gennvm_for_each_lun(gn
, lun
, lun_iter
) {
226 lun
->vlun
.blocks
= vzalloc(sizeof(struct nvm_block
) *
228 if (!lun
->vlun
.blocks
) {
233 for (blk_iter
= 0; blk_iter
< dev
->blks_per_lun
; blk_iter
++) {
234 block
= &lun
->vlun
.blocks
[blk_iter
];
236 INIT_LIST_HEAD(&block
->list
);
238 block
->lun
= &lun
->vlun
;
239 block
->id
= cur_block_id
++;
241 /* First block is reserved for device */
242 if (unlikely(lun_iter
== 0 && blk_iter
== 0)) {
243 lun
->vlun
.nr_free_blocks
--;
247 list_add_tail(&block
->list
, &lun
->free_list
);
250 if (dev
->ops
->get_bb_tbl
) {
254 ppa
.g
.ch
= lun
->vlun
.chnl_id
;
255 ppa
.g
.lun
= lun
->vlun
.lun_id
;
257 ret
= nvm_get_bb_tbl(dev
, ppa
, blks
);
259 pr_err("gennvm: could not get BB table\n");
261 ret
= gennvm_block_bb(gn
, ppa
, blks
, nr_blks
);
263 pr_err("gennvm: BB table map failed\n");
267 if ((dev
->identity
.dom
& NVM_RSP_L2P
) && dev
->ops
->get_l2p_tbl
) {
268 ret
= dev
->ops
->get_l2p_tbl(dev
, 0, dev
->total_secs
,
269 gennvm_block_map
, dev
);
271 pr_err("gennvm: could not read L2P table.\n");
272 pr_warn("gennvm: default block initialization");
280 static void gennvm_free(struct nvm_dev
*dev
)
282 gennvm_blocks_free(dev
);
283 gennvm_luns_free(dev
);
288 static int gennvm_register(struct nvm_dev
*dev
)
293 if (!try_module_get(THIS_MODULE
))
296 gn
= kzalloc(sizeof(struct gen_nvm
), GFP_KERNEL
);
301 gn
->nr_luns
= dev
->nr_luns
;
302 INIT_LIST_HEAD(&gn
->area_list
);
305 ret
= gennvm_luns_init(dev
, gn
);
307 pr_err("gennvm: could not initialize luns\n");
311 ret
= gennvm_blocks_init(dev
, gn
);
313 pr_err("gennvm: could not initialize blocks\n");
320 module_put(THIS_MODULE
);
324 static void gennvm_unregister(struct nvm_dev
*dev
)
327 module_put(THIS_MODULE
);
330 static struct nvm_block
*gennvm_get_blk_unlocked(struct nvm_dev
*dev
,
331 struct nvm_lun
*vlun
, unsigned long flags
)
333 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
334 struct nvm_block
*blk
= NULL
;
335 int is_gc
= flags
& NVM_IOTYPE_GC
;
337 assert_spin_locked(&vlun
->lock
);
339 if (list_empty(&lun
->free_list
)) {
340 pr_err_ratelimited("gennvm: lun %u have no free pages available",
345 if (!is_gc
&& lun
->vlun
.nr_free_blocks
< lun
->reserved_blocks
)
348 blk
= list_first_entry(&lun
->free_list
, struct nvm_block
, list
);
349 list_move_tail(&blk
->list
, &lun
->used_list
);
350 blk
->state
= NVM_BLK_ST_OPEN
;
352 lun
->vlun
.nr_free_blocks
--;
353 lun
->vlun
.nr_open_blocks
++;
359 static struct nvm_block
*gennvm_get_blk(struct nvm_dev
*dev
,
360 struct nvm_lun
*vlun
, unsigned long flags
)
362 struct nvm_block
*blk
;
364 spin_lock(&vlun
->lock
);
365 blk
= gennvm_get_blk_unlocked(dev
, vlun
, flags
);
366 spin_unlock(&vlun
->lock
);
370 static void gennvm_put_blk_unlocked(struct nvm_dev
*dev
, struct nvm_block
*blk
)
372 struct nvm_lun
*vlun
= blk
->lun
;
373 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
375 assert_spin_locked(&vlun
->lock
);
377 if (blk
->state
& NVM_BLK_ST_OPEN
) {
378 list_move_tail(&blk
->list
, &lun
->free_list
);
379 lun
->vlun
.nr_open_blocks
--;
380 lun
->vlun
.nr_free_blocks
++;
381 blk
->state
= NVM_BLK_ST_FREE
;
382 } else if (blk
->state
& NVM_BLK_ST_CLOSED
) {
383 list_move_tail(&blk
->list
, &lun
->free_list
);
384 lun
->vlun
.nr_closed_blocks
--;
385 lun
->vlun
.nr_free_blocks
++;
386 blk
->state
= NVM_BLK_ST_FREE
;
387 } else if (blk
->state
& NVM_BLK_ST_BAD
) {
388 list_move_tail(&blk
->list
, &lun
->bb_list
);
389 lun
->vlun
.nr_bad_blocks
++;
390 blk
->state
= NVM_BLK_ST_BAD
;
393 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
394 blk
->id
, blk
->state
);
395 list_move_tail(&blk
->list
, &lun
->bb_list
);
396 lun
->vlun
.nr_bad_blocks
++;
397 blk
->state
= NVM_BLK_ST_BAD
;
401 static void gennvm_put_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
)
403 struct nvm_lun
*vlun
= blk
->lun
;
405 spin_lock(&vlun
->lock
);
406 gennvm_put_blk_unlocked(dev
, blk
);
407 spin_unlock(&vlun
->lock
);
410 static void gennvm_mark_blk(struct nvm_dev
*dev
, struct ppa_addr ppa
, int type
)
412 struct gen_nvm
*gn
= dev
->mp
;
414 struct nvm_block
*blk
;
416 pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
417 ppa
.g
.ch
, ppa
.g
.lun
, ppa
.g
.blk
, ppa
.g
.pg
, type
);
419 if (unlikely(ppa
.g
.ch
> dev
->nr_chnls
||
420 ppa
.g
.lun
> dev
->luns_per_chnl
||
421 ppa
.g
.blk
> dev
->blks_per_lun
)) {
423 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
424 ppa
.g
.ch
, dev
->nr_chnls
,
425 ppa
.g
.lun
, dev
->luns_per_chnl
,
426 ppa
.g
.blk
, dev
->blks_per_lun
);
430 lun
= &gn
->luns
[ppa
.g
.lun
* ppa
.g
.ch
];
431 blk
= &lun
->vlun
.blocks
[ppa
.g
.blk
];
433 /* will be moved to bb list on put_blk from target */
438 * mark block bad in gennvm. It is expected that the target recovers separately
440 static void gennvm_mark_blk_bad(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
443 int max_secs
= dev
->ops
->max_phys_sect
;
444 void *comp_bits
= &rqd
->ppa_status
;
446 nvm_addr_to_generic_mode(dev
, rqd
);
448 /* look up blocks and mark them as bad */
449 if (rqd
->nr_ppas
== 1) {
450 gennvm_mark_blk(dev
, rqd
->ppa_addr
, NVM_BLK_ST_BAD
);
454 while ((bit
= find_next_bit(comp_bits
, max_secs
, bit
+ 1)) < max_secs
)
455 gennvm_mark_blk(dev
, rqd
->ppa_list
[bit
], NVM_BLK_ST_BAD
);
458 static void gennvm_end_io(struct nvm_rq
*rqd
)
460 struct nvm_tgt_instance
*ins
= rqd
->ins
;
462 if (rqd
->error
== NVM_RSP_ERR_FAILWRITE
)
463 gennvm_mark_blk_bad(rqd
->dev
, rqd
);
465 ins
->tt
->end_io(rqd
);
468 static int gennvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
470 if (!dev
->ops
->submit_io
)
473 /* Convert address space */
474 nvm_generic_to_addr_mode(dev
, rqd
);
477 rqd
->end_io
= gennvm_end_io
;
478 return dev
->ops
->submit_io(dev
, rqd
);
481 static int gennvm_erase_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
,
484 struct ppa_addr addr
= block_to_ppa(dev
, blk
);
486 return nvm_erase_ppa(dev
, &addr
, 1);
489 static int gennvm_reserve_lun(struct nvm_dev
*dev
, int lunid
)
491 return test_and_set_bit(lunid
, dev
->lun_map
);
494 static void gennvm_release_lun(struct nvm_dev
*dev
, int lunid
)
496 WARN_ON(!test_and_clear_bit(lunid
, dev
->lun_map
));
499 static struct nvm_lun
*gennvm_get_lun(struct nvm_dev
*dev
, int lunid
)
501 struct gen_nvm
*gn
= dev
->mp
;
503 if (unlikely(lunid
>= dev
->nr_luns
))
506 return &gn
->luns
[lunid
].vlun
;
509 static void gennvm_lun_info_print(struct nvm_dev
*dev
)
511 struct gen_nvm
*gn
= dev
->mp
;
516 gennvm_for_each_lun(gn
, lun
, i
) {
517 spin_lock(&lun
->vlun
.lock
);
519 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
521 lun
->vlun
.nr_free_blocks
,
522 lun
->vlun
.nr_open_blocks
,
523 lun
->vlun
.nr_closed_blocks
,
524 lun
->vlun
.nr_bad_blocks
);
526 spin_unlock(&lun
->vlun
.lock
);
530 static struct nvmm_type gennvm
= {
532 .version
= {0, 1, 0},
534 .register_mgr
= gennvm_register
,
535 .unregister_mgr
= gennvm_unregister
,
537 .get_blk_unlocked
= gennvm_get_blk_unlocked
,
538 .put_blk_unlocked
= gennvm_put_blk_unlocked
,
540 .get_blk
= gennvm_get_blk
,
541 .put_blk
= gennvm_put_blk
,
543 .submit_io
= gennvm_submit_io
,
544 .erase_blk
= gennvm_erase_blk
,
546 .mark_blk
= gennvm_mark_blk
,
548 .get_lun
= gennvm_get_lun
,
549 .reserve_lun
= gennvm_reserve_lun
,
550 .release_lun
= gennvm_release_lun
,
551 .lun_info_print
= gennvm_lun_info_print
,
553 .get_area
= gennvm_get_area
,
554 .put_area
= gennvm_put_area
,
558 static int __init
gennvm_module_init(void)
560 return nvm_register_mgr(&gennvm
);
563 static void gennvm_module_exit(void)
565 nvm_unregister_mgr(&gennvm
);
568 module_init(gennvm_module_init
);
569 module_exit(gennvm_module_exit
);
570 MODULE_LICENSE("GPL v2");
571 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");
This page took 0.04826 seconds and 5 git commands to generate.