bcache: Initialize sectors_dirty when attaching
[deliverable/linux.git] / drivers / md / bcache / super.c
index f88e2b653a3fc9c82a7b308c8988cd10eda0c96b..dbfa1c38e85e331a3ecd9e3d1f5985158c9b1abf 100644 (file)
@@ -11,6 +11,7 @@
 #include "debug.h"
 #include "request.h"
 
+#include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/debugfs.h>
 #include <linux/genhd.h>
@@ -342,6 +343,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
        struct closure *cl = &c->uuid_write.cl;
        struct uuid_entry *u;
        unsigned i;
+       char buf[80];
 
        BUG_ON(!parent);
        closure_lock(&c->uuid_write, parent);
@@ -362,8 +364,8 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                        break;
        }
 
-       pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read",
-                pkey(&c->uuid_bucket));
+       bch_bkey_to_text(buf, sizeof(buf), k);
+       pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
 
        for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
                if (!bch_is_zero(u->uuid, 16))
@@ -543,7 +545,6 @@ void bch_prio_write(struct cache *ca)
 
        pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
                 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
-       blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
 
        for (i = prio_buckets(ca) - 1; i >= 0; --i) {
                long bucket;
@@ -960,6 +961,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
        atomic_set(&dc->count, 1);
 
        if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+               bch_sectors_dirty_init(dc);
                atomic_set(&dc->has_dirty, 1);
                atomic_inc(&dc->count);
                bch_writeback_queue(dc);
@@ -1255,9 +1257,10 @@ static void cache_set_free(struct closure *cl)
        free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
        free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
 
-       kfree(c->fill_iter);
        if (c->bio_split)
                bioset_free(c->bio_split);
+       if (c->fill_iter)
+               mempool_destroy(c->fill_iter);
        if (c->bio_meta)
                mempool_destroy(c->bio_meta);
        if (c->search)
@@ -1282,7 +1285,7 @@ static void cache_set_flush(struct closure *cl)
 
        /* Shut down allocator threads */
        set_bit(CACHE_SET_STOPPING_2, &c->flags);
-       wake_up(&c->alloc_wait);
+       wake_up_allocators(c);
 
        bch_cache_accounting_destroy(&c->accounting);
 
@@ -1295,7 +1298,7 @@ static void cache_set_flush(struct closure *cl)
        /* Should skip this if we're unregistering because of an error */
        list_for_each_entry(b, &c->btree_cache, list)
                if (btree_node_dirty(b))
-                       bch_btree_write(b, true, NULL);
+                       bch_btree_node_write(b, NULL);
 
        closure_return(cl);
 }
@@ -1373,9 +1376,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
                c->btree_pages = max_t(int, c->btree_pages / 4,
                                       BTREE_MAX_PAGES);
 
-       init_waitqueue_head(&c->alloc_wait);
+       c->sort_crit_factor = int_sqrt(c->btree_pages);
+
        mutex_init(&c->bucket_lock);
-       mutex_init(&c->fill_lock);
        mutex_init(&c->sort_lock);
        spin_lock_init(&c->sort_time_lock);
        closure_init_unlocked(&c->sb_write);
@@ -1401,8 +1404,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
            !(c->bio_meta = mempool_create_kmalloc_pool(2,
                                sizeof(struct bbio) + sizeof(struct bio_vec) *
                                bucket_pages(c))) ||
+           !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
            !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) ||
            !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
            !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
            bch_journal_alloc(c) ||
@@ -1410,8 +1413,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
            bch_open_buckets_alloc(c))
                goto err;
 
-       c->fill_iter->size = sb->bucket_size / sb->block_size;
-
        c->congested_read_threshold_us  = 2000;
        c->congested_write_threshold_us = 20000;
        c->error_limit  = 8 << IO_ERROR_SHIFT;
@@ -1496,9 +1497,10 @@ static void run_cache_set(struct cache_set *c)
                 */
                bch_journal_next(&c->journal);
 
+               err = "error starting allocator thread";
                for_each_cache(ca, c, i)
-                       closure_call(&ca->alloc, bch_allocator_thread,
-                                    system_wq, &c->cl);
+                       if (bch_cache_allocator_start(ca))
+                               goto err;
 
                /*
                 * First place it's safe to allocate: btree_check() and
@@ -1531,17 +1533,16 @@ static void run_cache_set(struct cache_set *c)
 
                bch_btree_gc_finish(c);
 
+               err = "error starting allocator thread";
                for_each_cache(ca, c, i)
-                       closure_call(&ca->alloc, bch_allocator_thread,
-                                    ca->alloc_workqueue, &c->cl);
+                       if (bch_cache_allocator_start(ca))
+                               goto err;
 
                mutex_lock(&c->bucket_lock);
                for_each_cache(ca, c, i)
                        bch_prio_write(ca);
                mutex_unlock(&c->bucket_lock);
 
-               wake_up(&c->alloc_wait);
-
                err = "cannot allocate new UUID bucket";
                if (__uuid_write(c))
                        goto err_unlock_gc;
@@ -1552,7 +1553,7 @@ static void run_cache_set(struct cache_set *c)
                        goto err_unlock_gc;
 
                bkey_copy_key(&c->root->key, &MAX_KEY);
-               bch_btree_write(c->root, true, &op);
+               bch_btree_node_write(c->root, &op.cl);
 
                bch_btree_set_root(c->root);
                rw_unlock(true, c->root);
@@ -1673,9 +1674,6 @@ void bch_cache_release(struct kobject *kobj)
 
        bio_split_pool_free(&ca->bio_split_hook);
 
-       if (ca->alloc_workqueue)
-               destroy_workqueue(ca->alloc_workqueue);
-
        free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
        kfree(ca->prio_buckets);
        vfree(ca->buckets);
@@ -1723,7 +1721,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
            !(ca->prio_buckets  = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
                                          2, GFP_KERNEL)) ||
            !(ca->disk_buckets  = alloc_bucket_pages(GFP_KERNEL, ca)) ||
-           !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
            bio_split_pool_init(&ca->bio_split_hook))
                return -ENOMEM;
 
@@ -1786,6 +1783,36 @@ static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
 kobj_attribute_write(register,         register_bcache);
 kobj_attribute_write(register_quiet,   register_bcache);
 
+static bool bch_is_open_backing(struct block_device *bdev) {
+       struct cache_set *c, *tc;
+       struct cached_dev *dc, *t;
+
+       list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+               list_for_each_entry_safe(dc, t, &c->cached_devs, list)
+                       if (dc->bdev == bdev)
+                               return true;
+       list_for_each_entry_safe(dc, t, &uncached_devices, list)
+               if (dc->bdev == bdev)
+                       return true;
+       return false;
+}
+
+static bool bch_is_open_cache(struct block_device *bdev) {
+       struct cache_set *c, *tc;
+       struct cache *ca;
+       unsigned i;
+
+       list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+               for_each_cache(ca, c, i)
+                       if (ca->bdev == bdev)
+                               return true;
+       return false;
+}
+
+static bool bch_is_open(struct block_device *bdev) {
+       return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
+}
+
 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
                               const char *buffer, size_t size)
 {
@@ -1810,8 +1837,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
                                  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
                                  sb);
        if (IS_ERR(bdev)) {
-               if (bdev == ERR_PTR(-EBUSY))
-                       err = "device busy";
+               if (bdev == ERR_PTR(-EBUSY)) {
+                       bdev = lookup_bdev(strim(path));
+                       if (!IS_ERR(bdev) && bch_is_open(bdev))
+                               err = "device already registered";
+                       else
+                               err = "device busy";
+               }
                goto err;
        }
 
This page took 0.040281 seconds and 5 git commands to generate.