Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[deliverable/linux.git] / drivers / lightnvm / rrpc.c
index 2103e97a974f6e3598f38bceea3852577a52f1b9..37fcaadbf80ca05dd22d9a018837ea3974d21a8e 100644 (file)
@@ -48,7 +48,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
 }
 
 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
-                                                               unsigned len)
+                                                       unsigned int len)
 {
        sector_t i;
 
@@ -96,10 +96,13 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
        sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
        struct nvm_rq *rqd;
 
-       do {
+       while (1) {
                rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
+               if (rqd)
+                       break;
+
                schedule();
-       } while (!rqd);
+       }
 
        if (IS_ERR(rqd)) {
                pr_err("rrpc: unable to acquire inflight IO\n");
@@ -172,39 +175,32 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
 }
 
 /* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
+static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
+                                               struct rrpc_block **cur_rblk)
 {
        struct rrpc *rrpc = rlun->rrpc;
 
-       BUG_ON(!rblk);
-
-       if (rlun->cur) {
-               spin_lock(&rlun->cur->lock);
-               WARN_ON(!block_is_full(rrpc, rlun->cur));
-               spin_unlock(&rlun->cur->lock);
+       if (*cur_rblk) {
+               spin_lock(&(*cur_rblk)->lock);
+               WARN_ON(!block_is_full(rrpc, *cur_rblk));
+               spin_unlock(&(*cur_rblk)->lock);
        }
-       rlun->cur = rblk;
+       *cur_rblk = new_rblk;
 }
 
 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
                                                        unsigned long flags)
 {
-       struct nvm_lun *lun = rlun->parent;
        struct nvm_block *blk;
        struct rrpc_block *rblk;
 
-       spin_lock(&lun->lock);
-       blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
+       blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
        if (!blk) {
                pr_err("nvm: rrpc: cannot get new block from media manager\n");
-               spin_unlock(&lun->lock);
                return NULL;
        }
 
        rblk = rrpc_get_rblk(rlun, blk->id);
-       list_add_tail(&rblk->list, &rlun->open_list);
-       spin_unlock(&lun->lock);
-
        blk->priv = rblk;
        bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
        rblk->next_page = 0;
@@ -216,13 +212,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
 
 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-       struct rrpc_lun *rlun = rblk->rlun;
-       struct nvm_lun *lun = rlun->parent;
-
-       spin_lock(&lun->lock);
-       nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
-       list_del(&rblk->list);
-       spin_unlock(&lun->lock);
+       nvm_put_blk(rrpc->dev, rblk->parent);
 }
 
 static void rrpc_put_blks(struct rrpc *rrpc)
@@ -342,7 +332,7 @@ try:
 
                /* Perform read to do GC */
                bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
-               bio->bi_rw = READ;
+               bio_set_op_attrs(bio,  REQ_OP_READ, 0);
                bio->bi_private = &wait;
                bio->bi_end_io = rrpc_end_sync_bio;
 
@@ -364,7 +354,7 @@ try:
                reinit_completion(&wait);
 
                bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
-               bio->bi_rw = WRITE;
+               bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
                bio->bi_private = &wait;
                bio->bi_end_io = rrpc_end_sync_bio;
 
@@ -508,21 +498,11 @@ static void rrpc_gc_queue(struct work_struct *work)
        struct rrpc *rrpc = gcb->rrpc;
        struct rrpc_block *rblk = gcb->rblk;
        struct rrpc_lun *rlun = rblk->rlun;
-       struct nvm_lun *lun = rblk->parent->lun;
-       struct nvm_block *blk = rblk->parent;
 
        spin_lock(&rlun->lock);
        list_add_tail(&rblk->prio, &rlun->prio_list);
        spin_unlock(&rlun->lock);
 
-       spin_lock(&lun->lock);
-       lun->nr_open_blocks--;
-       lun->nr_closed_blocks++;
-       blk->state &= ~NVM_BLK_ST_OPEN;
-       blk->state |= NVM_BLK_ST_CLOSED;
-       list_move_tail(&rblk->list, &rlun->closed_list);
-       spin_unlock(&lun->lock);
-
        mempool_free(gcb, rrpc->gcb_pool);
        pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
                                                        rblk->parent->id);
@@ -596,21 +576,20 @@ out:
        return addr;
 }
 
-/* Simple round-robin Logical to physical address translation.
- *
- * Retrieve the mapping using the active append point. Then update the ap for
- * the next write to the disk.
+/* Map logical address to a physical page. The mapping implements a round robin
+ * approach and allocates a page from the next lun available.
  *
- * Returns rrpc_addr with the physical address and block. Remember to return to
- * rrpc->addr_cache when request is finished.
+ * Returns rrpc_addr with the physical address and block. Returns NULL if no
+ * blocks in the next rlun are available.
  */
 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
                                                                int is_gc)
 {
        struct rrpc_lun *rlun;
-       struct rrpc_block *rblk;
+       struct rrpc_block *rblk, **cur_rblk;
        struct nvm_lun *lun;
        u64 paddr;
+       int gc_force = 0;
 
        rlun = rrpc_get_lun_rr(rrpc, is_gc);
        lun = rlun->parent;
@@ -618,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
        if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
                return NULL;
 
-       spin_lock(&rlun->lock);
+       /*
+        * page allocation steps:
+        * 1. Try to allocate new page from current rblk
+        * 2a. If succeed, proceed to map it in and return
+        * 2b. If fail, first try to allocate a new block from media manger,
+        *     and then retry step 1. Retry until the normal block pool is
+        *     exhausted.
+        * 3. If exhausted, and garbage collector is requesting the block,
+        *    go to the reserved block and retry step 1.
+        *    In the case that this fails as well, or it is not GC
+        *    requesting, report not able to retrieve a block and let the
+        *    caller handle further processing.
+        */
 
+       spin_lock(&rlun->lock);
+       cur_rblk = &rlun->cur;
        rblk = rlun->cur;
 retry:
        paddr = rrpc_alloc_addr(rrpc, rblk);
 
-       if (paddr == ADDR_EMPTY) {
-               rblk = rrpc_get_blk(rrpc, rlun, 0);
-               if (rblk) {
-                       rrpc_set_lun_cur(rlun, rblk);
-                       goto retry;
-               }
+       if (paddr != ADDR_EMPTY)
+               goto done;
 
-               if (is_gc) {
-                       /* retry from emergency gc block */
-                       paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
-                       if (paddr == ADDR_EMPTY) {
-                               rblk = rrpc_get_blk(rrpc, rlun, 1);
-                               if (!rblk) {
-                                       pr_err("rrpc: no more blocks");
-                                       goto err;
-                               }
-
-                               rlun->gc_cur = rblk;
-                               paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
-                       }
-                       rblk = rlun->gc_cur;
-               }
+       if (!list_empty(&rlun->wblk_list)) {
+new_blk:
+               rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
+                                                                       prio);
+               rrpc_set_lun_cur(rlun, rblk, cur_rblk);
+               list_del(&rblk->prio);
+               goto retry;
+       }
+       spin_unlock(&rlun->lock);
+
+       rblk = rrpc_get_blk(rrpc, rlun, gc_force);
+       if (rblk) {
+               spin_lock(&rlun->lock);
+               list_add_tail(&rblk->prio, &rlun->wblk_list);
+               /*
+                * another thread might already have added a new block,
+                * Therefore, make sure that one is used, instead of the
+                * one just added.
+                */
+               goto new_blk;
        }
 
+       if (unlikely(is_gc) && !gc_force) {
+               /* retry from emergency gc block */
+               cur_rblk = &rlun->gc_cur;
+               rblk = rlun->gc_cur;
+               gc_force = 1;
+               spin_lock(&rlun->lock);
+               goto retry;
+       }
+
+       pr_err("rrpc: failed to allocate new block\n");
+       return NULL;
+done:
        spin_unlock(&rlun->lock);
        return rrpc_update_map(rrpc, laddr, rblk, paddr);
-err:
-       spin_unlock(&rlun->lock);
-       return NULL;
 }
 
 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -850,14 +853,14 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
                        return NVM_IO_ERR;
                }
 
-               if (bio_rw(bio) == WRITE)
+               if (bio_op(bio) == REQ_OP_WRITE)
                        return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
                                                                        npages);
 
                return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
        }
 
-       if (bio_rw(bio) == WRITE)
+       if (bio_op(bio) == REQ_OP_WRITE)
                return rrpc_write_rq(rrpc, bio, rqd, flags);
 
        return rrpc_read_rq(rrpc, bio, rqd, flags);
@@ -908,7 +911,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
        struct nvm_rq *rqd;
        int err;
 
-       if (bio->bi_rw & REQ_DISCARD) {
+       if (bio_op(bio) == REQ_OP_DISCARD) {
                rrpc_discard(rrpc, bio);
                return BLK_QC_T_NONE;
        }
@@ -1196,8 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
                rlun->rrpc = rrpc;
                INIT_LIST_HEAD(&rlun->prio_list);
-               INIT_LIST_HEAD(&rlun->open_list);
-               INIT_LIST_HEAD(&rlun->closed_list);
+               INIT_LIST_HEAD(&rlun->wblk_list);
 
                INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
                spin_lock_init(&rlun->lock);
@@ -1338,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
                rblk = rrpc_get_blk(rrpc, rlun, 0);
                if (!rblk)
                        goto err;
-
-               rrpc_set_lun_cur(rlun, rblk);
+               rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
 
                /* Emergency gc block */
                rblk = rrpc_get_blk(rrpc, rlun, 1);
                if (!rblk)
                        goto err;
-               rlun->gc_cur = rblk;
+               rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
        }
 
        return 0;
This page took 0.052185 seconds and 5 git commands to generate.