lan78xx: Add to handle mux control per chip id
[deliverable/linux.git] / drivers / lightnvm / rrpc.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15 */
16
17 #include "rrpc.h"
18
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
21
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
24
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30 {
31 struct rrpc_block *rblk = a->rblk;
32 unsigned int pg_offset;
33
34 lockdep_assert_held(&rrpc->rev_lock);
35
36 if (a->addr == ADDR_EMPTY || !rblk)
37 return;
38
39 spin_lock(&rblk->lock);
40
41 div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
42 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43 rblk->nr_invalid_pages++;
44
45 spin_unlock(&rblk->lock);
46
47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
48 }
49
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
51 unsigned len)
52 {
53 sector_t i;
54
55 spin_lock(&rrpc->rev_lock);
56 for (i = slba; i < slba + len; i++) {
57 struct rrpc_addr *gp = &rrpc->trans_map[i];
58
59 rrpc_page_invalidate(rrpc, gp);
60 gp->rblk = NULL;
61 }
62 spin_unlock(&rrpc->rev_lock);
63 }
64
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66 sector_t laddr, unsigned int pages)
67 {
68 struct nvm_rq *rqd;
69 struct rrpc_inflight_rq *inf;
70
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
72 if (!rqd)
73 return ERR_PTR(-ENOMEM);
74
75 inf = rrpc_get_inflight_rq(rqd);
76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77 mempool_free(rqd, rrpc->rq_pool);
78 return NULL;
79 }
80
81 return rqd;
82 }
83
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
85 {
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
87
88 rrpc_unlock_laddr(rrpc, inf);
89
90 mempool_free(rqd, rrpc->rq_pool);
91 }
92
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
94 {
95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
97 struct nvm_rq *rqd;
98
99 do {
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
101 schedule();
102 } while (!rqd);
103
104 if (IS_ERR(rqd)) {
105 pr_err("rrpc: unable to acquire inflight IO\n");
106 bio_io_error(bio);
107 return;
108 }
109
110 rrpc_invalidate_range(rrpc, slba, len);
111 rrpc_inflight_laddr_release(rrpc, rqd);
112 }
113
114 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
115 {
116 return (rblk->next_page == rrpc->dev->pgs_per_blk);
117 }
118
119 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
120 {
121 struct nvm_block *blk = rblk->parent;
122
123 return blk->id * rrpc->dev->pgs_per_blk;
124 }
125
126 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
127 struct ppa_addr r)
128 {
129 struct ppa_addr l;
130 int secs, pgs, blks, luns;
131 sector_t ppa = r.ppa;
132
133 l.ppa = 0;
134
135 div_u64_rem(ppa, dev->sec_per_pg, &secs);
136 l.g.sec = secs;
137
138 sector_div(ppa, dev->sec_per_pg);
139 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
140 l.g.pg = pgs;
141
142 sector_div(ppa, dev->pgs_per_blk);
143 div_u64_rem(ppa, dev->blks_per_lun, &blks);
144 l.g.blk = blks;
145
146 sector_div(ppa, dev->blks_per_lun);
147 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
148 l.g.lun = luns;
149
150 sector_div(ppa, dev->luns_per_chnl);
151 l.g.ch = ppa;
152
153 return l;
154 }
155
156 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
157 {
158 struct ppa_addr paddr;
159
160 paddr.ppa = addr;
161 return linear_to_generic_addr(dev, paddr);
162 }
163
164 /* requires lun->lock taken */
165 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
166 {
167 struct rrpc *rrpc = rlun->rrpc;
168
169 BUG_ON(!rblk);
170
171 if (rlun->cur) {
172 spin_lock(&rlun->cur->lock);
173 WARN_ON(!block_is_full(rrpc, rlun->cur));
174 spin_unlock(&rlun->cur->lock);
175 }
176 rlun->cur = rblk;
177 }
178
179 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
180 unsigned long flags)
181 {
182 struct nvm_block *blk;
183 struct rrpc_block *rblk;
184
185 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
186 if (!blk)
187 return NULL;
188
189 rblk = &rlun->blocks[blk->id];
190 blk->priv = rblk;
191
192 bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
193 rblk->next_page = 0;
194 rblk->nr_invalid_pages = 0;
195 atomic_set(&rblk->data_cmnt_size, 0);
196
197 return rblk;
198 }
199
200 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
201 {
202 nvm_put_blk(rrpc->dev, rblk->parent);
203 }
204
205 static void rrpc_put_blks(struct rrpc *rrpc)
206 {
207 struct rrpc_lun *rlun;
208 int i;
209
210 for (i = 0; i < rrpc->nr_luns; i++) {
211 rlun = &rrpc->luns[i];
212 if (rlun->cur)
213 rrpc_put_blk(rrpc, rlun->cur);
214 if (rlun->gc_cur)
215 rrpc_put_blk(rrpc, rlun->gc_cur);
216 }
217 }
218
219 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
220 {
221 int next = atomic_inc_return(&rrpc->next_lun);
222
223 return &rrpc->luns[next % rrpc->nr_luns];
224 }
225
226 static void rrpc_gc_kick(struct rrpc *rrpc)
227 {
228 struct rrpc_lun *rlun;
229 unsigned int i;
230
231 for (i = 0; i < rrpc->nr_luns; i++) {
232 rlun = &rrpc->luns[i];
233 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
234 }
235 }
236
237 /*
238 * timed GC every interval.
239 */
240 static void rrpc_gc_timer(unsigned long data)
241 {
242 struct rrpc *rrpc = (struct rrpc *)data;
243
244 rrpc_gc_kick(rrpc);
245 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
246 }
247
248 static void rrpc_end_sync_bio(struct bio *bio)
249 {
250 struct completion *waiting = bio->bi_private;
251
252 if (bio->bi_error)
253 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
254
255 complete(waiting);
256 }
257
258 /*
259 * rrpc_move_valid_pages -- migrate live data off the block
260 * @rrpc: the 'rrpc' structure
261 * @block: the block from which to migrate live pages
262 *
263 * Description:
264 * GC algorithms may call this function to migrate remaining live
265 * pages off the block prior to erasing it. This function blocks
266 * further execution until the operation is complete.
267 */
268 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
269 {
270 struct request_queue *q = rrpc->dev->q;
271 struct rrpc_rev_addr *rev;
272 struct nvm_rq *rqd;
273 struct bio *bio;
274 struct page *page;
275 int slot;
276 int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
277 u64 phys_addr;
278 DECLARE_COMPLETION_ONSTACK(wait);
279
280 if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
281 return 0;
282
283 bio = bio_alloc(GFP_NOIO, 1);
284 if (!bio) {
285 pr_err("nvm: could not alloc bio to gc\n");
286 return -ENOMEM;
287 }
288
289 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
290
291 while ((slot = find_first_zero_bit(rblk->invalid_pages,
292 nr_pgs_per_blk)) < nr_pgs_per_blk) {
293
294 /* Lock laddr */
295 phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
296
297 try:
298 spin_lock(&rrpc->rev_lock);
299 /* Get logical address from physical to logical table */
300 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
301 /* already updated by previous regular write */
302 if (rev->addr == ADDR_EMPTY) {
303 spin_unlock(&rrpc->rev_lock);
304 continue;
305 }
306
307 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
308 if (IS_ERR_OR_NULL(rqd)) {
309 spin_unlock(&rrpc->rev_lock);
310 schedule();
311 goto try;
312 }
313
314 spin_unlock(&rrpc->rev_lock);
315
316 /* Perform read to do GC */
317 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
318 bio->bi_rw = READ;
319 bio->bi_private = &wait;
320 bio->bi_end_io = rrpc_end_sync_bio;
321
322 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
323 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
324
325 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
326 pr_err("rrpc: gc read failed.\n");
327 rrpc_inflight_laddr_release(rrpc, rqd);
328 goto finished;
329 }
330 wait_for_completion_io(&wait);
331
332 bio_reset(bio);
333 reinit_completion(&wait);
334
335 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
336 bio->bi_rw = WRITE;
337 bio->bi_private = &wait;
338 bio->bi_end_io = rrpc_end_sync_bio;
339
340 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
341
342 /* turn the command around and write the data back to a new
343 * address
344 */
345 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
346 pr_err("rrpc: gc write failed.\n");
347 rrpc_inflight_laddr_release(rrpc, rqd);
348 goto finished;
349 }
350 wait_for_completion_io(&wait);
351
352 rrpc_inflight_laddr_release(rrpc, rqd);
353
354 bio_reset(bio);
355 }
356
357 finished:
358 mempool_free(page, rrpc->page_pool);
359 bio_put(bio);
360
361 if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
362 pr_err("nvm: failed to garbage collect block\n");
363 return -EIO;
364 }
365
366 return 0;
367 }
368
369 static void rrpc_block_gc(struct work_struct *work)
370 {
371 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
372 ws_gc);
373 struct rrpc *rrpc = gcb->rrpc;
374 struct rrpc_block *rblk = gcb->rblk;
375 struct nvm_dev *dev = rrpc->dev;
376
377 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
378
379 if (rrpc_move_valid_pages(rrpc, rblk))
380 goto done;
381
382 nvm_erase_blk(dev, rblk->parent);
383 rrpc_put_blk(rrpc, rblk);
384 done:
385 mempool_free(gcb, rrpc->gcb_pool);
386 }
387
388 /* the block with highest number of invalid pages, will be in the beginning
389 * of the list
390 */
391 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
392 struct rrpc_block *rb)
393 {
394 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
395 return ra;
396
397 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
398 }
399
400 /* linearly find the block with highest number of invalid pages
401 * requires lun->lock
402 */
403 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
404 {
405 struct list_head *prio_list = &rlun->prio_list;
406 struct rrpc_block *rblock, *max;
407
408 BUG_ON(list_empty(prio_list));
409
410 max = list_first_entry(prio_list, struct rrpc_block, prio);
411 list_for_each_entry(rblock, prio_list, prio)
412 max = rblock_max_invalid(max, rblock);
413
414 return max;
415 }
416
417 static void rrpc_lun_gc(struct work_struct *work)
418 {
419 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
420 struct rrpc *rrpc = rlun->rrpc;
421 struct nvm_lun *lun = rlun->parent;
422 struct rrpc_block_gc *gcb;
423 unsigned int nr_blocks_need;
424
425 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
426
427 if (nr_blocks_need < rrpc->nr_luns)
428 nr_blocks_need = rrpc->nr_luns;
429
430 spin_lock(&lun->lock);
431 while (nr_blocks_need > lun->nr_free_blocks &&
432 !list_empty(&rlun->prio_list)) {
433 struct rrpc_block *rblock = block_prio_find_max(rlun);
434 struct nvm_block *block = rblock->parent;
435
436 if (!rblock->nr_invalid_pages)
437 break;
438
439 list_del_init(&rblock->prio);
440
441 BUG_ON(!block_is_full(rrpc, rblock));
442
443 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
444
445 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
446 if (!gcb)
447 break;
448
449 gcb->rrpc = rrpc;
450 gcb->rblk = rblock;
451 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
452
453 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
454
455 nr_blocks_need--;
456 }
457 spin_unlock(&lun->lock);
458
459 /* TODO: Hint that request queue can be started again */
460 }
461
462 static void rrpc_gc_queue(struct work_struct *work)
463 {
464 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
465 ws_gc);
466 struct rrpc *rrpc = gcb->rrpc;
467 struct rrpc_block *rblk = gcb->rblk;
468 struct nvm_lun *lun = rblk->parent->lun;
469 struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
470
471 spin_lock(&rlun->lock);
472 list_add_tail(&rblk->prio, &rlun->prio_list);
473 spin_unlock(&rlun->lock);
474
475 mempool_free(gcb, rrpc->gcb_pool);
476 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
477 rblk->parent->id);
478 }
479
480 static const struct block_device_operations rrpc_fops = {
481 .owner = THIS_MODULE,
482 };
483
484 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
485 {
486 unsigned int i;
487 struct rrpc_lun *rlun, *max_free;
488
489 if (!is_gc)
490 return get_next_lun(rrpc);
491
492 /* during GC, we don't care about RR, instead we want to make
493 * sure that we maintain evenness between the block luns.
494 */
495 max_free = &rrpc->luns[0];
496 /* prevent GC-ing lun from devouring pages of a lun with
497 * little free blocks. We don't take the lock as we only need an
498 * estimate.
499 */
500 rrpc_for_each_lun(rrpc, rlun, i) {
501 if (rlun->parent->nr_free_blocks >
502 max_free->parent->nr_free_blocks)
503 max_free = rlun;
504 }
505
506 return max_free;
507 }
508
509 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
510 struct rrpc_block *rblk, u64 paddr)
511 {
512 struct rrpc_addr *gp;
513 struct rrpc_rev_addr *rev;
514
515 BUG_ON(laddr >= rrpc->nr_pages);
516
517 gp = &rrpc->trans_map[laddr];
518 spin_lock(&rrpc->rev_lock);
519 if (gp->rblk)
520 rrpc_page_invalidate(rrpc, gp);
521
522 gp->addr = paddr;
523 gp->rblk = rblk;
524
525 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
526 rev->addr = laddr;
527 spin_unlock(&rrpc->rev_lock);
528
529 return gp;
530 }
531
532 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
533 {
534 u64 addr = ADDR_EMPTY;
535
536 spin_lock(&rblk->lock);
537 if (block_is_full(rrpc, rblk))
538 goto out;
539
540 addr = block_to_addr(rrpc, rblk) + rblk->next_page;
541
542 rblk->next_page++;
543 out:
544 spin_unlock(&rblk->lock);
545 return addr;
546 }
547
548 /* Simple round-robin Logical to physical address translation.
549 *
550 * Retrieve the mapping using the active append point. Then update the ap for
551 * the next write to the disk.
552 *
553 * Returns rrpc_addr with the physical address and block. Remember to return to
554 * rrpc->addr_cache when request is finished.
555 */
556 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
557 int is_gc)
558 {
559 struct rrpc_lun *rlun;
560 struct rrpc_block *rblk;
561 struct nvm_lun *lun;
562 u64 paddr;
563
564 rlun = rrpc_get_lun_rr(rrpc, is_gc);
565 lun = rlun->parent;
566
567 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
568 return NULL;
569
570 spin_lock(&rlun->lock);
571
572 rblk = rlun->cur;
573 retry:
574 paddr = rrpc_alloc_addr(rrpc, rblk);
575
576 if (paddr == ADDR_EMPTY) {
577 rblk = rrpc_get_blk(rrpc, rlun, 0);
578 if (rblk) {
579 rrpc_set_lun_cur(rlun, rblk);
580 goto retry;
581 }
582
583 if (is_gc) {
584 /* retry from emergency gc block */
585 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
586 if (paddr == ADDR_EMPTY) {
587 rblk = rrpc_get_blk(rrpc, rlun, 1);
588 if (!rblk) {
589 pr_err("rrpc: no more blocks");
590 goto err;
591 }
592
593 rlun->gc_cur = rblk;
594 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
595 }
596 rblk = rlun->gc_cur;
597 }
598 }
599
600 spin_unlock(&rlun->lock);
601 return rrpc_update_map(rrpc, laddr, rblk, paddr);
602 err:
603 spin_unlock(&rlun->lock);
604 return NULL;
605 }
606
607 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
608 {
609 struct rrpc_block_gc *gcb;
610
611 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
612 if (!gcb) {
613 pr_err("rrpc: unable to queue block for gc.");
614 return;
615 }
616
617 gcb->rrpc = rrpc;
618 gcb->rblk = rblk;
619
620 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
621 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
622 }
623
624 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
625 sector_t laddr, uint8_t npages)
626 {
627 struct rrpc_addr *p;
628 struct rrpc_block *rblk;
629 struct nvm_lun *lun;
630 int cmnt_size, i;
631
632 for (i = 0; i < npages; i++) {
633 p = &rrpc->trans_map[laddr + i];
634 rblk = p->rblk;
635 lun = rblk->parent->lun;
636
637 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
638 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
639 rrpc_run_gc(rrpc, rblk);
640 }
641 }
642
643 static int rrpc_end_io(struct nvm_rq *rqd, int error)
644 {
645 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
646 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
647 uint8_t npages = rqd->nr_pages;
648 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
649
650 if (bio_data_dir(rqd->bio) == WRITE)
651 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
652
653 if (rrqd->flags & NVM_IOTYPE_GC)
654 return 0;
655
656 rrpc_unlock_rq(rrpc, rqd);
657 bio_put(rqd->bio);
658
659 if (npages > 1)
660 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
661 if (rqd->metadata)
662 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
663
664 mempool_free(rqd, rrpc->rq_pool);
665
666 return 0;
667 }
668
669 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
670 struct nvm_rq *rqd, unsigned long flags, int npages)
671 {
672 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
673 struct rrpc_addr *gp;
674 sector_t laddr = rrpc_get_laddr(bio);
675 int is_gc = flags & NVM_IOTYPE_GC;
676 int i;
677
678 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
679 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
680 return NVM_IO_REQUEUE;
681 }
682
683 for (i = 0; i < npages; i++) {
684 /* We assume that mapping occurs at 4KB granularity */
685 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
686 gp = &rrpc->trans_map[laddr + i];
687
688 if (gp->rblk) {
689 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
690 gp->addr);
691 } else {
692 BUG_ON(is_gc);
693 rrpc_unlock_laddr(rrpc, r);
694 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
695 rqd->dma_ppa_list);
696 return NVM_IO_DONE;
697 }
698 }
699
700 rqd->opcode = NVM_OP_HBREAD;
701
702 return NVM_IO_OK;
703 }
704
705 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
706 unsigned long flags)
707 {
708 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
709 int is_gc = flags & NVM_IOTYPE_GC;
710 sector_t laddr = rrpc_get_laddr(bio);
711 struct rrpc_addr *gp;
712
713 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
714 return NVM_IO_REQUEUE;
715
716 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
717 gp = &rrpc->trans_map[laddr];
718
719 if (gp->rblk) {
720 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
721 } else {
722 BUG_ON(is_gc);
723 rrpc_unlock_rq(rrpc, rqd);
724 return NVM_IO_DONE;
725 }
726
727 rqd->opcode = NVM_OP_HBREAD;
728 rrqd->addr = gp;
729
730 return NVM_IO_OK;
731 }
732
733 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
734 struct nvm_rq *rqd, unsigned long flags, int npages)
735 {
736 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
737 struct rrpc_addr *p;
738 sector_t laddr = rrpc_get_laddr(bio);
739 int is_gc = flags & NVM_IOTYPE_GC;
740 int i;
741
742 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
743 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
744 return NVM_IO_REQUEUE;
745 }
746
747 for (i = 0; i < npages; i++) {
748 /* We assume that mapping occurs at 4KB granularity */
749 p = rrpc_map_page(rrpc, laddr + i, is_gc);
750 if (!p) {
751 BUG_ON(is_gc);
752 rrpc_unlock_laddr(rrpc, r);
753 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
754 rqd->dma_ppa_list);
755 rrpc_gc_kick(rrpc);
756 return NVM_IO_REQUEUE;
757 }
758
759 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
760 p->addr);
761 }
762
763 rqd->opcode = NVM_OP_HBWRITE;
764
765 return NVM_IO_OK;
766 }
767
768 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
769 struct nvm_rq *rqd, unsigned long flags)
770 {
771 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
772 struct rrpc_addr *p;
773 int is_gc = flags & NVM_IOTYPE_GC;
774 sector_t laddr = rrpc_get_laddr(bio);
775
776 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
777 return NVM_IO_REQUEUE;
778
779 p = rrpc_map_page(rrpc, laddr, is_gc);
780 if (!p) {
781 BUG_ON(is_gc);
782 rrpc_unlock_rq(rrpc, rqd);
783 rrpc_gc_kick(rrpc);
784 return NVM_IO_REQUEUE;
785 }
786
787 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
788 rqd->opcode = NVM_OP_HBWRITE;
789 rrqd->addr = p;
790
791 return NVM_IO_OK;
792 }
793
794 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
795 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
796 {
797 if (npages > 1) {
798 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
799 &rqd->dma_ppa_list);
800 if (!rqd->ppa_list) {
801 pr_err("rrpc: not able to allocate ppa list\n");
802 return NVM_IO_ERR;
803 }
804
805 if (bio_rw(bio) == WRITE)
806 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
807 npages);
808
809 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
810 }
811
812 if (bio_rw(bio) == WRITE)
813 return rrpc_write_rq(rrpc, bio, rqd, flags);
814
815 return rrpc_read_rq(rrpc, bio, rqd, flags);
816 }
817
818 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
819 struct nvm_rq *rqd, unsigned long flags)
820 {
821 int err;
822 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
823 uint8_t nr_pages = rrpc_get_pages(bio);
824 int bio_size = bio_sectors(bio) << 9;
825
826 if (bio_size < rrpc->dev->sec_size)
827 return NVM_IO_ERR;
828 else if (bio_size > rrpc->dev->max_rq_size)
829 return NVM_IO_ERR;
830
831 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
832 if (err)
833 return err;
834
835 bio_get(bio);
836 rqd->bio = bio;
837 rqd->ins = &rrpc->instance;
838 rqd->nr_pages = nr_pages;
839 rrq->flags = flags;
840
841 err = nvm_submit_io(rrpc->dev, rqd);
842 if (err) {
843 pr_err("rrpc: I/O submission failed: %d\n", err);
844 return NVM_IO_ERR;
845 }
846
847 return NVM_IO_OK;
848 }
849
850 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
851 {
852 struct rrpc *rrpc = q->queuedata;
853 struct nvm_rq *rqd;
854 int err;
855
856 if (bio->bi_rw & REQ_DISCARD) {
857 rrpc_discard(rrpc, bio);
858 return BLK_QC_T_NONE;
859 }
860
861 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
862 if (!rqd) {
863 pr_err_ratelimited("rrpc: not able to queue bio.");
864 bio_io_error(bio);
865 return BLK_QC_T_NONE;
866 }
867 memset(rqd, 0, sizeof(struct nvm_rq));
868
869 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
870 switch (err) {
871 case NVM_IO_OK:
872 return BLK_QC_T_NONE;
873 case NVM_IO_ERR:
874 bio_io_error(bio);
875 break;
876 case NVM_IO_DONE:
877 bio_endio(bio);
878 break;
879 case NVM_IO_REQUEUE:
880 spin_lock(&rrpc->bio_lock);
881 bio_list_add(&rrpc->requeue_bios, bio);
882 spin_unlock(&rrpc->bio_lock);
883 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
884 break;
885 }
886
887 mempool_free(rqd, rrpc->rq_pool);
888 return BLK_QC_T_NONE;
889 }
890
891 static void rrpc_requeue(struct work_struct *work)
892 {
893 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
894 struct bio_list bios;
895 struct bio *bio;
896
897 bio_list_init(&bios);
898
899 spin_lock(&rrpc->bio_lock);
900 bio_list_merge(&bios, &rrpc->requeue_bios);
901 bio_list_init(&rrpc->requeue_bios);
902 spin_unlock(&rrpc->bio_lock);
903
904 while ((bio = bio_list_pop(&bios)))
905 rrpc_make_rq(rrpc->disk->queue, bio);
906 }
907
908 static void rrpc_gc_free(struct rrpc *rrpc)
909 {
910 struct rrpc_lun *rlun;
911 int i;
912
913 if (rrpc->krqd_wq)
914 destroy_workqueue(rrpc->krqd_wq);
915
916 if (rrpc->kgc_wq)
917 destroy_workqueue(rrpc->kgc_wq);
918
919 if (!rrpc->luns)
920 return;
921
922 for (i = 0; i < rrpc->nr_luns; i++) {
923 rlun = &rrpc->luns[i];
924
925 if (!rlun->blocks)
926 break;
927 vfree(rlun->blocks);
928 }
929 }
930
931 static int rrpc_gc_init(struct rrpc *rrpc)
932 {
933 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
934 rrpc->nr_luns);
935 if (!rrpc->krqd_wq)
936 return -ENOMEM;
937
938 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
939 if (!rrpc->kgc_wq)
940 return -ENOMEM;
941
942 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
943
944 return 0;
945 }
946
947 static void rrpc_map_free(struct rrpc *rrpc)
948 {
949 vfree(rrpc->rev_trans_map);
950 vfree(rrpc->trans_map);
951 }
952
953 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
954 {
955 struct rrpc *rrpc = (struct rrpc *)private;
956 struct nvm_dev *dev = rrpc->dev;
957 struct rrpc_addr *addr = rrpc->trans_map + slba;
958 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
959 sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
960 u64 elba = slba + nlb;
961 u64 i;
962
963 if (unlikely(elba > dev->total_pages)) {
964 pr_err("nvm: L2P data from device is out of bounds!\n");
965 return -EINVAL;
966 }
967
968 for (i = 0; i < nlb; i++) {
969 u64 pba = le64_to_cpu(entries[i]);
970 /* LNVM treats address-spaces as silos, LBA and PBA are
971 * equally large and zero-indexed.
972 */
973 if (unlikely(pba >= max_pages && pba != U64_MAX)) {
974 pr_err("nvm: L2P data entry is out of bounds!\n");
975 return -EINVAL;
976 }
977
978 /* Address zero is a special one. The first page on a disk is
979 * protected. As it often holds internal device boot
980 * information.
981 */
982 if (!pba)
983 continue;
984
985 addr[i].addr = pba;
986 raddr[pba].addr = slba + i;
987 }
988
989 return 0;
990 }
991
992 static int rrpc_map_init(struct rrpc *rrpc)
993 {
994 struct nvm_dev *dev = rrpc->dev;
995 sector_t i;
996 int ret;
997
998 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
999 if (!rrpc->trans_map)
1000 return -ENOMEM;
1001
1002 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1003 * rrpc->nr_pages);
1004 if (!rrpc->rev_trans_map)
1005 return -ENOMEM;
1006
1007 for (i = 0; i < rrpc->nr_pages; i++) {
1008 struct rrpc_addr *p = &rrpc->trans_map[i];
1009 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1010
1011 p->addr = ADDR_EMPTY;
1012 r->addr = ADDR_EMPTY;
1013 }
1014
1015 if (!dev->ops->get_l2p_tbl)
1016 return 0;
1017
1018 /* Bring up the mapping table from device */
1019 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
1020 rrpc_l2p_update, rrpc);
1021 if (ret) {
1022 pr_err("nvm: rrpc: could not read L2P table.\n");
1023 return -EINVAL;
1024 }
1025
1026 return 0;
1027 }
1028
1029
1030 /* Minimum pages needed within a lun */
1031 #define PAGE_POOL_SIZE 16
1032 #define ADDR_POOL_SIZE 64
1033
1034 static int rrpc_core_init(struct rrpc *rrpc)
1035 {
1036 down_write(&rrpc_lock);
1037 if (!rrpc_gcb_cache) {
1038 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1039 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1040 if (!rrpc_gcb_cache) {
1041 up_write(&rrpc_lock);
1042 return -ENOMEM;
1043 }
1044
1045 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1046 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1047 0, 0, NULL);
1048 if (!rrpc_rq_cache) {
1049 kmem_cache_destroy(rrpc_gcb_cache);
1050 up_write(&rrpc_lock);
1051 return -ENOMEM;
1052 }
1053 }
1054 up_write(&rrpc_lock);
1055
1056 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1057 if (!rrpc->page_pool)
1058 return -ENOMEM;
1059
1060 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1061 rrpc_gcb_cache);
1062 if (!rrpc->gcb_pool)
1063 return -ENOMEM;
1064
1065 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1066 if (!rrpc->rq_pool)
1067 return -ENOMEM;
1068
1069 spin_lock_init(&rrpc->inflights.lock);
1070 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1071
1072 return 0;
1073 }
1074
1075 static void rrpc_core_free(struct rrpc *rrpc)
1076 {
1077 mempool_destroy(rrpc->page_pool);
1078 mempool_destroy(rrpc->gcb_pool);
1079 mempool_destroy(rrpc->rq_pool);
1080 }
1081
1082 static void rrpc_luns_free(struct rrpc *rrpc)
1083 {
1084 kfree(rrpc->luns);
1085 }
1086
1087 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1088 {
1089 struct nvm_dev *dev = rrpc->dev;
1090 struct rrpc_lun *rlun;
1091 int i, j;
1092
1093 spin_lock_init(&rrpc->rev_lock);
1094
1095 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1096 GFP_KERNEL);
1097 if (!rrpc->luns)
1098 return -ENOMEM;
1099
1100 /* 1:1 mapping */
1101 for (i = 0; i < rrpc->nr_luns; i++) {
1102 struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
1103
1104 if (dev->pgs_per_blk >
1105 MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1106 pr_err("rrpc: number of pages per block too high.");
1107 goto err;
1108 }
1109
1110 rlun = &rrpc->luns[i];
1111 rlun->rrpc = rrpc;
1112 rlun->parent = lun;
1113 INIT_LIST_HEAD(&rlun->prio_list);
1114 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1115 spin_lock_init(&rlun->lock);
1116
1117 rrpc->total_blocks += dev->blks_per_lun;
1118 rrpc->nr_pages += dev->sec_per_lun;
1119
1120 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1121 rrpc->dev->blks_per_lun);
1122 if (!rlun->blocks)
1123 goto err;
1124
1125 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1126 struct rrpc_block *rblk = &rlun->blocks[j];
1127 struct nvm_block *blk = &lun->blocks[j];
1128
1129 rblk->parent = blk;
1130 INIT_LIST_HEAD(&rblk->prio);
1131 spin_lock_init(&rblk->lock);
1132 }
1133 }
1134
1135 return 0;
1136 err:
1137 return -ENOMEM;
1138 }
1139
1140 static void rrpc_free(struct rrpc *rrpc)
1141 {
1142 rrpc_gc_free(rrpc);
1143 rrpc_map_free(rrpc);
1144 rrpc_core_free(rrpc);
1145 rrpc_luns_free(rrpc);
1146
1147 kfree(rrpc);
1148 }
1149
1150 static void rrpc_exit(void *private)
1151 {
1152 struct rrpc *rrpc = private;
1153
1154 del_timer(&rrpc->gc_timer);
1155
1156 flush_workqueue(rrpc->krqd_wq);
1157 flush_workqueue(rrpc->kgc_wq);
1158
1159 rrpc_free(rrpc);
1160 }
1161
1162 static sector_t rrpc_capacity(void *private)
1163 {
1164 struct rrpc *rrpc = private;
1165 struct nvm_dev *dev = rrpc->dev;
1166 sector_t reserved, provisioned;
1167
1168 /* cur, gc, and two emergency blocks for each lun */
1169 reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
1170 provisioned = rrpc->nr_pages - reserved;
1171
1172 if (reserved > rrpc->nr_pages) {
1173 pr_err("rrpc: not enough space available to expose storage.\n");
1174 return 0;
1175 }
1176
1177 sector_div(provisioned, 10);
1178 return provisioned * 9 * NR_PHY_IN_LOG;
1179 }
1180
1181 /*
1182 * Looks up the logical address from reverse trans map and check if its valid by
1183 * comparing the logical to physical address with the physical address.
1184 * Returns 0 on free, otherwise 1 if in use
1185 */
1186 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1187 {
1188 struct nvm_dev *dev = rrpc->dev;
1189 int offset;
1190 struct rrpc_addr *laddr;
1191 u64 paddr, pladdr;
1192
1193 for (offset = 0; offset < dev->pgs_per_blk; offset++) {
1194 paddr = block_to_addr(rrpc, rblk) + offset;
1195
1196 pladdr = rrpc->rev_trans_map[paddr].addr;
1197 if (pladdr == ADDR_EMPTY)
1198 continue;
1199
1200 laddr = &rrpc->trans_map[pladdr];
1201
1202 if (paddr == laddr->addr) {
1203 laddr->rblk = rblk;
1204 } else {
1205 set_bit(offset, rblk->invalid_pages);
1206 rblk->nr_invalid_pages++;
1207 }
1208 }
1209 }
1210
1211 static int rrpc_blocks_init(struct rrpc *rrpc)
1212 {
1213 struct rrpc_lun *rlun;
1214 struct rrpc_block *rblk;
1215 int lun_iter, blk_iter;
1216
1217 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1218 rlun = &rrpc->luns[lun_iter];
1219
1220 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1221 blk_iter++) {
1222 rblk = &rlun->blocks[blk_iter];
1223 rrpc_block_map_update(rrpc, rblk);
1224 }
1225 }
1226
1227 return 0;
1228 }
1229
1230 static int rrpc_luns_configure(struct rrpc *rrpc)
1231 {
1232 struct rrpc_lun *rlun;
1233 struct rrpc_block *rblk;
1234 int i;
1235
1236 for (i = 0; i < rrpc->nr_luns; i++) {
1237 rlun = &rrpc->luns[i];
1238
1239 rblk = rrpc_get_blk(rrpc, rlun, 0);
1240 if (!rblk)
1241 goto err;
1242
1243 rrpc_set_lun_cur(rlun, rblk);
1244
1245 /* Emergency gc block */
1246 rblk = rrpc_get_blk(rrpc, rlun, 1);
1247 if (!rblk)
1248 goto err;
1249 rlun->gc_cur = rblk;
1250 }
1251
1252 return 0;
1253 err:
1254 rrpc_put_blks(rrpc);
1255 return -EINVAL;
1256 }
1257
1258 static struct nvm_tgt_type tt_rrpc;
1259
1260 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1261 int lun_begin, int lun_end)
1262 {
1263 struct request_queue *bqueue = dev->q;
1264 struct request_queue *tqueue = tdisk->queue;
1265 struct rrpc *rrpc;
1266 int ret;
1267
1268 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1269 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1270 dev->identity.dom);
1271 return ERR_PTR(-EINVAL);
1272 }
1273
1274 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1275 if (!rrpc)
1276 return ERR_PTR(-ENOMEM);
1277
1278 rrpc->instance.tt = &tt_rrpc;
1279 rrpc->dev = dev;
1280 rrpc->disk = tdisk;
1281
1282 bio_list_init(&rrpc->requeue_bios);
1283 spin_lock_init(&rrpc->bio_lock);
1284 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1285
1286 rrpc->nr_luns = lun_end - lun_begin + 1;
1287
1288 /* simple round-robin strategy */
1289 atomic_set(&rrpc->next_lun, -1);
1290
1291 ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1292 if (ret) {
1293 pr_err("nvm: rrpc: could not initialize luns\n");
1294 goto err;
1295 }
1296
1297 rrpc->poffset = dev->sec_per_lun * lun_begin;
1298 rrpc->lun_offset = lun_begin;
1299
1300 ret = rrpc_core_init(rrpc);
1301 if (ret) {
1302 pr_err("nvm: rrpc: could not initialize core\n");
1303 goto err;
1304 }
1305
1306 ret = rrpc_map_init(rrpc);
1307 if (ret) {
1308 pr_err("nvm: rrpc: could not initialize maps\n");
1309 goto err;
1310 }
1311
1312 ret = rrpc_blocks_init(rrpc);
1313 if (ret) {
1314 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1315 goto err;
1316 }
1317
1318 ret = rrpc_luns_configure(rrpc);
1319 if (ret) {
1320 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1321 goto err;
1322 }
1323
1324 ret = rrpc_gc_init(rrpc);
1325 if (ret) {
1326 pr_err("nvm: rrpc: could not initialize gc\n");
1327 goto err;
1328 }
1329
1330 /* inherit the size from the underlying device */
1331 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1332 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1333
1334 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1335 rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
1336
1337 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1338
1339 return rrpc;
1340 err:
1341 rrpc_free(rrpc);
1342 return ERR_PTR(ret);
1343 }
1344
1345 /* round robin, page-based FTL, and cost-based GC */
1346 static struct nvm_tgt_type tt_rrpc = {
1347 .name = "rrpc",
1348 .version = {1, 0, 0},
1349
1350 .make_rq = rrpc_make_rq,
1351 .capacity = rrpc_capacity,
1352 .end_io = rrpc_end_io,
1353
1354 .init = rrpc_init,
1355 .exit = rrpc_exit,
1356 };
1357
1358 static int __init rrpc_module_init(void)
1359 {
1360 return nvm_register_target(&tt_rrpc);
1361 }
1362
1363 static void rrpc_module_exit(void)
1364 {
1365 nvm_unregister_target(&tt_rrpc);
1366 }
1367
1368 module_init(rrpc_module_init);
1369 module_exit(rrpc_module_exit);
1370 MODULE_LICENSE("GPL v2");
1371 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
This page took 0.060439 seconds and 5 git commands to generate.