2 * bcache setup/teardown code, and some metadata io - read a superblock and
3 * figure out what to do with it.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
14 #include <linux/blkdev.h>
15 #include <linux/buffer_head.h>
16 #include <linux/debugfs.h>
17 #include <linux/genhd.h>
18 #include <linux/module.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <linux/sysfs.h>
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
26 static const char bcache_magic
[] = {
27 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
28 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
31 static const char invalid_uuid
[] = {
32 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
33 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
36 /* Default is -1; we skip past it for struct cached_dev's cache mode */
37 const char * const bch_cache_modes
[] = {
46 struct uuid_entry_v0
{
55 static struct kobject
*bcache_kobj
;
56 struct mutex bch_register_lock
;
57 LIST_HEAD(bch_cache_sets
);
58 static LIST_HEAD(uncached_devices
);
60 static int bcache_major
, bcache_minor
;
61 static wait_queue_head_t unregister_wait
;
62 struct workqueue_struct
*bcache_wq
;
64 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
66 static void bio_split_pool_free(struct bio_split_pool
*p
)
68 if (p
->bio_split_hook
)
69 mempool_destroy(p
->bio_split_hook
);
72 bioset_free(p
->bio_split
);
75 static int bio_split_pool_init(struct bio_split_pool
*p
)
77 p
->bio_split
= bioset_create(4, 0);
81 p
->bio_split_hook
= mempool_create_kmalloc_pool(4,
82 sizeof(struct bio_split_hook
));
83 if (!p
->bio_split_hook
)
91 static const char *read_super(struct cache_sb
*sb
, struct block_device
*bdev
,
96 struct buffer_head
*bh
= __bread(bdev
, 1, SB_SIZE
);
102 s
= (struct cache_sb
*) bh
->b_data
;
104 sb
->offset
= le64_to_cpu(s
->offset
);
105 sb
->version
= le64_to_cpu(s
->version
);
107 memcpy(sb
->magic
, s
->magic
, 16);
108 memcpy(sb
->uuid
, s
->uuid
, 16);
109 memcpy(sb
->set_uuid
, s
->set_uuid
, 16);
110 memcpy(sb
->label
, s
->label
, SB_LABEL_SIZE
);
112 sb
->flags
= le64_to_cpu(s
->flags
);
113 sb
->seq
= le64_to_cpu(s
->seq
);
114 sb
->last_mount
= le32_to_cpu(s
->last_mount
);
115 sb
->first_bucket
= le16_to_cpu(s
->first_bucket
);
116 sb
->keys
= le16_to_cpu(s
->keys
);
118 for (i
= 0; i
< SB_JOURNAL_BUCKETS
; i
++)
119 sb
->d
[i
] = le64_to_cpu(s
->d
[i
]);
121 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
122 sb
->version
, sb
->flags
, sb
->seq
, sb
->keys
);
124 err
= "Not a bcache superblock";
125 if (sb
->offset
!= SB_SECTOR
)
128 if (memcmp(sb
->magic
, bcache_magic
, 16))
131 err
= "Too many journal buckets";
132 if (sb
->keys
> SB_JOURNAL_BUCKETS
)
135 err
= "Bad checksum";
136 if (s
->csum
!= csum_set(s
))
140 if (bch_is_zero(sb
->uuid
, 16))
143 sb
->block_size
= le16_to_cpu(s
->block_size
);
145 err
= "Superblock block size smaller than device block size";
146 if (sb
->block_size
<< 9 < bdev_logical_block_size(bdev
))
149 switch (sb
->version
) {
150 case BCACHE_SB_VERSION_BDEV
:
151 sb
->data_offset
= BDEV_DATA_START_DEFAULT
;
153 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET
:
154 sb
->data_offset
= le64_to_cpu(s
->data_offset
);
156 err
= "Bad data offset";
157 if (sb
->data_offset
< BDEV_DATA_START_DEFAULT
)
161 case BCACHE_SB_VERSION_CDEV
:
162 case BCACHE_SB_VERSION_CDEV_WITH_UUID
:
163 sb
->nbuckets
= le64_to_cpu(s
->nbuckets
);
164 sb
->block_size
= le16_to_cpu(s
->block_size
);
165 sb
->bucket_size
= le16_to_cpu(s
->bucket_size
);
167 sb
->nr_in_set
= le16_to_cpu(s
->nr_in_set
);
168 sb
->nr_this_dev
= le16_to_cpu(s
->nr_this_dev
);
170 err
= "Too many buckets";
171 if (sb
->nbuckets
> LONG_MAX
)
174 err
= "Not enough buckets";
175 if (sb
->nbuckets
< 1 << 7)
178 err
= "Bad block/bucket size";
179 if (!is_power_of_2(sb
->block_size
) ||
180 sb
->block_size
> PAGE_SECTORS
||
181 !is_power_of_2(sb
->bucket_size
) ||
182 sb
->bucket_size
< PAGE_SECTORS
)
185 err
= "Invalid superblock: device too small";
186 if (get_capacity(bdev
->bd_disk
) < sb
->bucket_size
* sb
->nbuckets
)
190 if (bch_is_zero(sb
->set_uuid
, 16))
193 err
= "Bad cache device number in set";
194 if (!sb
->nr_in_set
||
195 sb
->nr_in_set
<= sb
->nr_this_dev
||
196 sb
->nr_in_set
> MAX_CACHES_PER_SET
)
199 err
= "Journal buckets not sequential";
200 for (i
= 0; i
< sb
->keys
; i
++)
201 if (sb
->d
[i
] != sb
->first_bucket
+ i
)
204 err
= "Too many journal buckets";
205 if (sb
->first_bucket
+ sb
->keys
> sb
->nbuckets
)
208 err
= "Invalid superblock: first bucket comes before end of super";
209 if (sb
->first_bucket
* sb
->bucket_size
< 16)
214 err
= "Unsupported superblock version";
218 sb
->last_mount
= get_seconds();
221 get_page(bh
->b_page
);
228 static void write_bdev_super_endio(struct bio
*bio
, int error
)
230 struct cached_dev
*dc
= bio
->bi_private
;
231 /* XXX: error checking */
233 closure_put(&dc
->sb_write
.cl
);
236 static void __write_super(struct cache_sb
*sb
, struct bio
*bio
)
238 struct cache_sb
*out
= page_address(bio
->bi_io_vec
[0].bv_page
);
241 bio
->bi_sector
= SB_SECTOR
;
242 bio
->bi_rw
= REQ_SYNC
|REQ_META
;
243 bio
->bi_size
= SB_SIZE
;
244 bch_bio_map(bio
, NULL
);
246 out
->offset
= cpu_to_le64(sb
->offset
);
247 out
->version
= cpu_to_le64(sb
->version
);
249 memcpy(out
->uuid
, sb
->uuid
, 16);
250 memcpy(out
->set_uuid
, sb
->set_uuid
, 16);
251 memcpy(out
->label
, sb
->label
, SB_LABEL_SIZE
);
253 out
->flags
= cpu_to_le64(sb
->flags
);
254 out
->seq
= cpu_to_le64(sb
->seq
);
256 out
->last_mount
= cpu_to_le32(sb
->last_mount
);
257 out
->first_bucket
= cpu_to_le16(sb
->first_bucket
);
258 out
->keys
= cpu_to_le16(sb
->keys
);
260 for (i
= 0; i
< sb
->keys
; i
++)
261 out
->d
[i
] = cpu_to_le64(sb
->d
[i
]);
263 out
->csum
= csum_set(out
);
265 pr_debug("ver %llu, flags %llu, seq %llu",
266 sb
->version
, sb
->flags
, sb
->seq
);
268 submit_bio(REQ_WRITE
, bio
);
271 void bch_write_bdev_super(struct cached_dev
*dc
, struct closure
*parent
)
273 struct closure
*cl
= &dc
->sb_write
.cl
;
274 struct bio
*bio
= &dc
->sb_bio
;
276 closure_lock(&dc
->sb_write
, parent
);
279 bio
->bi_bdev
= dc
->bdev
;
280 bio
->bi_end_io
= write_bdev_super_endio
;
281 bio
->bi_private
= dc
;
284 __write_super(&dc
->sb
, bio
);
289 static void write_super_endio(struct bio
*bio
, int error
)
291 struct cache
*ca
= bio
->bi_private
;
293 bch_count_io_errors(ca
, error
, "writing superblock");
294 closure_put(&ca
->set
->sb_write
.cl
);
297 void bcache_write_super(struct cache_set
*c
)
299 struct closure
*cl
= &c
->sb_write
.cl
;
303 closure_lock(&c
->sb_write
, &c
->cl
);
307 for_each_cache(ca
, c
, i
) {
308 struct bio
*bio
= &ca
->sb_bio
;
310 ca
->sb
.version
= BCACHE_SB_VERSION_CDEV_WITH_UUID
;
311 ca
->sb
.seq
= c
->sb
.seq
;
312 ca
->sb
.last_mount
= c
->sb
.last_mount
;
314 SET_CACHE_SYNC(&ca
->sb
, CACHE_SYNC(&c
->sb
));
317 bio
->bi_bdev
= ca
->bdev
;
318 bio
->bi_end_io
= write_super_endio
;
319 bio
->bi_private
= ca
;
322 __write_super(&ca
->sb
, bio
);
330 static void uuid_endio(struct bio
*bio
, int error
)
332 struct closure
*cl
= bio
->bi_private
;
333 struct cache_set
*c
= container_of(cl
, struct cache_set
, uuid_write
.cl
);
335 cache_set_err_on(error
, c
, "accessing uuids");
336 bch_bbio_free(bio
, c
);
340 static void uuid_io(struct cache_set
*c
, unsigned long rw
,
341 struct bkey
*k
, struct closure
*parent
)
343 struct closure
*cl
= &c
->uuid_write
.cl
;
344 struct uuid_entry
*u
;
349 closure_lock(&c
->uuid_write
, parent
);
351 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
352 struct bio
*bio
= bch_bbio_alloc(c
);
354 bio
->bi_rw
= REQ_SYNC
|REQ_META
|rw
;
355 bio
->bi_size
= KEY_SIZE(k
) << 9;
357 bio
->bi_end_io
= uuid_endio
;
358 bio
->bi_private
= cl
;
359 bch_bio_map(bio
, c
->uuids
);
361 bch_submit_bbio(bio
, c
, k
, i
);
367 bch_bkey_to_text(buf
, sizeof(buf
), k
);
368 pr_debug("%s UUIDs at %s", rw
& REQ_WRITE
? "wrote" : "read", buf
);
370 for (u
= c
->uuids
; u
< c
->uuids
+ c
->nr_uuids
; u
++)
371 if (!bch_is_zero(u
->uuid
, 16))
372 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
373 u
- c
->uuids
, u
->uuid
, u
->label
,
374 u
->first_reg
, u
->last_reg
, u
->invalidated
);
379 static char *uuid_read(struct cache_set
*c
, struct jset
*j
, struct closure
*cl
)
381 struct bkey
*k
= &j
->uuid_bucket
;
383 if (__bch_ptr_invalid(c
, 1, k
))
384 return "bad uuid pointer";
386 bkey_copy(&c
->uuid_bucket
, k
);
387 uuid_io(c
, READ_SYNC
, k
, cl
);
389 if (j
->version
< BCACHE_JSET_VERSION_UUIDv1
) {
390 struct uuid_entry_v0
*u0
= (void *) c
->uuids
;
391 struct uuid_entry
*u1
= (void *) c
->uuids
;
397 * Since the new uuid entry is bigger than the old, we have to
398 * convert starting at the highest memory address and work down
399 * in order to do it in place
402 for (i
= c
->nr_uuids
- 1;
405 memcpy(u1
[i
].uuid
, u0
[i
].uuid
, 16);
406 memcpy(u1
[i
].label
, u0
[i
].label
, 32);
408 u1
[i
].first_reg
= u0
[i
].first_reg
;
409 u1
[i
].last_reg
= u0
[i
].last_reg
;
410 u1
[i
].invalidated
= u0
[i
].invalidated
;
420 static int __uuid_write(struct cache_set
*c
)
424 closure_init_stack(&cl
);
426 lockdep_assert_held(&bch_register_lock
);
428 if (bch_bucket_alloc_set(c
, WATERMARK_METADATA
, &k
.key
, 1, &cl
))
431 SET_KEY_SIZE(&k
.key
, c
->sb
.bucket_size
);
432 uuid_io(c
, REQ_WRITE
, &k
.key
, &cl
);
435 bkey_copy(&c
->uuid_bucket
, &k
.key
);
436 __bkey_put(c
, &k
.key
);
440 int bch_uuid_write(struct cache_set
*c
)
442 int ret
= __uuid_write(c
);
445 bch_journal_meta(c
, NULL
);
450 static struct uuid_entry
*uuid_find(struct cache_set
*c
, const char *uuid
)
452 struct uuid_entry
*u
;
455 u
< c
->uuids
+ c
->nr_uuids
; u
++)
456 if (!memcmp(u
->uuid
, uuid
, 16))
462 static struct uuid_entry
*uuid_find_empty(struct cache_set
*c
)
464 static const char zero_uuid
[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
465 return uuid_find(c
, zero_uuid
);
469 * Bucket priorities/gens:
471 * For each bucket, we store on disk its
475 * See alloc.c for an explanation of the gen. The priority is used to implement
476 * lru (and in the future other) cache replacement policies; for most purposes
477 * it's just an opaque integer.
479 * The gens and the priorities don't have a whole lot to do with each other, and
480 * it's actually the gens that must be written out at specific times - it's no
481 * big deal if the priorities don't get written, if we lose them we just reuse
482 * buckets in suboptimal order.
484 * On disk they're stored in a packed array, and in as many buckets are required
485 * to fit them all. The buckets we use to store them form a list; the journal
486 * header points to the first bucket, the first bucket points to the second
489 * This code is used by the allocation code; periodically (whenever it runs out
490 * of buckets to allocate from) the allocation code will invalidate some
491 * buckets, but it can't use those buckets until their new gens are safely on
495 static void prio_endio(struct bio
*bio
, int error
)
497 struct cache
*ca
= bio
->bi_private
;
499 cache_set_err_on(error
, ca
->set
, "accessing priorities");
500 bch_bbio_free(bio
, ca
->set
);
501 closure_put(&ca
->prio
);
504 static void prio_io(struct cache
*ca
, uint64_t bucket
, unsigned long rw
)
506 struct closure
*cl
= &ca
->prio
;
507 struct bio
*bio
= bch_bbio_alloc(ca
->set
);
509 closure_init_stack(cl
);
511 bio
->bi_sector
= bucket
* ca
->sb
.bucket_size
;
512 bio
->bi_bdev
= ca
->bdev
;
513 bio
->bi_rw
= REQ_SYNC
|REQ_META
|rw
;
514 bio
->bi_size
= bucket_bytes(ca
);
516 bio
->bi_end_io
= prio_endio
;
517 bio
->bi_private
= ca
;
518 bch_bio_map(bio
, ca
->disk_buckets
);
520 closure_bio_submit(bio
, &ca
->prio
, ca
);
524 #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \
525 fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused)
527 void bch_prio_write(struct cache
*ca
)
533 closure_init_stack(&cl
);
535 lockdep_assert_held(&ca
->set
->bucket_lock
);
537 for (b
= ca
->buckets
;
538 b
< ca
->buckets
+ ca
->sb
.nbuckets
; b
++)
539 b
->disk_gen
= b
->gen
;
541 ca
->disk_buckets
->seq
++;
543 atomic_long_add(ca
->sb
.bucket_size
* prio_buckets(ca
),
544 &ca
->meta_sectors_written
);
546 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca
->free
),
547 fifo_used(&ca
->free_inc
), fifo_used(&ca
->unused
));
549 for (i
= prio_buckets(ca
) - 1; i
>= 0; --i
) {
551 struct prio_set
*p
= ca
->disk_buckets
;
552 struct bucket_disk
*d
= p
->data
;
553 struct bucket_disk
*end
= d
+ prios_per_bucket(ca
);
555 for (b
= ca
->buckets
+ i
* prios_per_bucket(ca
);
556 b
< ca
->buckets
+ ca
->sb
.nbuckets
&& d
< end
;
558 d
->prio
= cpu_to_le16(b
->prio
);
562 p
->next_bucket
= ca
->prio_buckets
[i
+ 1];
563 p
->magic
= pset_magic(ca
);
564 p
->csum
= bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8);
566 bucket
= bch_bucket_alloc(ca
, WATERMARK_PRIO
, &cl
);
567 BUG_ON(bucket
== -1);
569 mutex_unlock(&ca
->set
->bucket_lock
);
570 prio_io(ca
, bucket
, REQ_WRITE
);
571 mutex_lock(&ca
->set
->bucket_lock
);
573 ca
->prio_buckets
[i
] = bucket
;
574 atomic_dec_bug(&ca
->buckets
[bucket
].pin
);
577 mutex_unlock(&ca
->set
->bucket_lock
);
579 bch_journal_meta(ca
->set
, &cl
);
582 mutex_lock(&ca
->set
->bucket_lock
);
584 ca
->need_save_prio
= 0;
587 * Don't want the old priorities to get garbage collected until after we
588 * finish writing the new ones, and they're journalled
590 for (i
= 0; i
< prio_buckets(ca
); i
++)
591 ca
->prio_last_buckets
[i
] = ca
->prio_buckets
[i
];
594 static void prio_read(struct cache
*ca
, uint64_t bucket
)
596 struct prio_set
*p
= ca
->disk_buckets
;
597 struct bucket_disk
*d
= p
->data
+ prios_per_bucket(ca
), *end
= d
;
599 unsigned bucket_nr
= 0;
601 for (b
= ca
->buckets
;
602 b
< ca
->buckets
+ ca
->sb
.nbuckets
;
605 ca
->prio_buckets
[bucket_nr
] = bucket
;
606 ca
->prio_last_buckets
[bucket_nr
] = bucket
;
609 prio_io(ca
, bucket
, READ_SYNC
);
611 if (p
->csum
!= bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8))
612 pr_warn("bad csum reading priorities");
614 if (p
->magic
!= pset_magic(ca
))
615 pr_warn("bad magic reading priorities");
617 bucket
= p
->next_bucket
;
621 b
->prio
= le16_to_cpu(d
->prio
);
622 b
->gen
= b
->disk_gen
= b
->last_gc
= b
->gc_gen
= d
->gen
;
628 static int open_dev(struct block_device
*b
, fmode_t mode
)
630 struct bcache_device
*d
= b
->bd_disk
->private_data
;
631 if (atomic_read(&d
->closing
))
638 static void release_dev(struct gendisk
*b
, fmode_t mode
)
640 struct bcache_device
*d
= b
->private_data
;
644 static int ioctl_dev(struct block_device
*b
, fmode_t mode
,
645 unsigned int cmd
, unsigned long arg
)
647 struct bcache_device
*d
= b
->bd_disk
->private_data
;
648 return d
->ioctl(d
, mode
, cmd
, arg
);
651 static const struct block_device_operations bcache_ops
= {
653 .release
= release_dev
,
655 .owner
= THIS_MODULE
,
658 void bcache_device_stop(struct bcache_device
*d
)
660 if (!atomic_xchg(&d
->closing
, 1))
661 closure_queue(&d
->cl
);
664 static void bcache_device_unlink(struct bcache_device
*d
)
669 sysfs_remove_link(&d
->c
->kobj
, d
->name
);
670 sysfs_remove_link(&d
->kobj
, "cache");
672 for_each_cache(ca
, d
->c
, i
)
673 bd_unlink_disk_holder(ca
->bdev
, d
->disk
);
676 static void bcache_device_link(struct bcache_device
*d
, struct cache_set
*c
,
682 for_each_cache(ca
, d
->c
, i
)
683 bd_link_disk_holder(ca
->bdev
, d
->disk
);
685 snprintf(d
->name
, BCACHEDEVNAME_SIZE
,
686 "%s%u", name
, d
->id
);
688 WARN(sysfs_create_link(&d
->kobj
, &c
->kobj
, "cache") ||
689 sysfs_create_link(&c
->kobj
, &d
->kobj
, d
->name
),
690 "Couldn't create device <-> cache set symlinks");
693 static void bcache_device_detach(struct bcache_device
*d
)
695 lockdep_assert_held(&bch_register_lock
);
697 if (atomic_read(&d
->detaching
)) {
698 struct uuid_entry
*u
= d
->c
->uuids
+ d
->id
;
700 SET_UUID_FLASH_ONLY(u
, 0);
701 memcpy(u
->uuid
, invalid_uuid
, 16);
702 u
->invalidated
= cpu_to_le32(get_seconds());
703 bch_uuid_write(d
->c
);
705 atomic_set(&d
->detaching
, 0);
708 bcache_device_unlink(d
);
710 d
->c
->devices
[d
->id
] = NULL
;
711 closure_put(&d
->c
->caching
);
715 static void bcache_device_attach(struct bcache_device
*d
, struct cache_set
*c
,
718 BUG_ON(test_bit(CACHE_SET_STOPPING
, &c
->flags
));
724 closure_get(&c
->caching
);
727 static void bcache_device_free(struct bcache_device
*d
)
729 lockdep_assert_held(&bch_register_lock
);
731 pr_info("%s stopped", d
->disk
->disk_name
);
734 bcache_device_detach(d
);
735 if (d
->disk
&& d
->disk
->flags
& GENHD_FL_UP
)
736 del_gendisk(d
->disk
);
737 if (d
->disk
&& d
->disk
->queue
)
738 blk_cleanup_queue(d
->disk
->queue
);
742 bio_split_pool_free(&d
->bio_split_hook
);
743 if (d
->unaligned_bvec
)
744 mempool_destroy(d
->unaligned_bvec
);
746 bioset_free(d
->bio_split
);
748 closure_debug_destroy(&d
->cl
);
751 static int bcache_device_init(struct bcache_device
*d
, unsigned block_size
)
753 struct request_queue
*q
;
755 if (!(d
->bio_split
= bioset_create(4, offsetof(struct bbio
, bio
))) ||
756 !(d
->unaligned_bvec
= mempool_create_kmalloc_pool(1,
757 sizeof(struct bio_vec
) * BIO_MAX_PAGES
)) ||
758 bio_split_pool_init(&d
->bio_split_hook
) ||
759 !(d
->disk
= alloc_disk(1)) ||
760 !(q
= blk_alloc_queue(GFP_KERNEL
)))
763 snprintf(d
->disk
->disk_name
, DISK_NAME_LEN
, "bcache%i", bcache_minor
);
765 d
->disk
->major
= bcache_major
;
766 d
->disk
->first_minor
= bcache_minor
++;
767 d
->disk
->fops
= &bcache_ops
;
768 d
->disk
->private_data
= d
;
770 blk_queue_make_request(q
, NULL
);
773 q
->backing_dev_info
.congested_data
= d
;
774 q
->limits
.max_hw_sectors
= UINT_MAX
;
775 q
->limits
.max_sectors
= UINT_MAX
;
776 q
->limits
.max_segment_size
= UINT_MAX
;
777 q
->limits
.max_segments
= BIO_MAX_PAGES
;
778 q
->limits
.max_discard_sectors
= UINT_MAX
;
779 q
->limits
.io_min
= block_size
;
780 q
->limits
.logical_block_size
= block_size
;
781 q
->limits
.physical_block_size
= block_size
;
782 set_bit(QUEUE_FLAG_NONROT
, &d
->disk
->queue
->queue_flags
);
783 set_bit(QUEUE_FLAG_DISCARD
, &d
->disk
->queue
->queue_flags
);
790 static void calc_cached_dev_sectors(struct cache_set
*c
)
792 uint64_t sectors
= 0;
793 struct cached_dev
*dc
;
795 list_for_each_entry(dc
, &c
->cached_devs
, list
)
796 sectors
+= bdev_sectors(dc
->bdev
);
798 c
->cached_dev_sectors
= sectors
;
801 void bch_cached_dev_run(struct cached_dev
*dc
)
803 struct bcache_device
*d
= &dc
->disk
;
805 if (atomic_xchg(&dc
->running
, 1))
809 BDEV_STATE(&dc
->sb
) != BDEV_STATE_NONE
) {
811 closure_init_stack(&cl
);
813 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_STALE
);
814 bch_write_bdev_super(dc
, &cl
);
819 bd_link_disk_holder(dc
->bdev
, dc
->disk
.disk
);
821 char *env
[] = { "SYMLINK=label" , NULL
};
822 kobject_uevent_env(&disk_to_dev(d
->disk
)->kobj
, KOBJ_CHANGE
, env
);
824 if (sysfs_create_link(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "dev") ||
825 sysfs_create_link(&disk_to_dev(d
->disk
)->kobj
, &d
->kobj
, "bcache"))
826 pr_debug("error creating sysfs link");
829 static void cached_dev_detach_finish(struct work_struct
*w
)
831 struct cached_dev
*dc
= container_of(w
, struct cached_dev
, detach
);
832 char buf
[BDEVNAME_SIZE
];
834 closure_init_stack(&cl
);
836 BUG_ON(!atomic_read(&dc
->disk
.detaching
));
837 BUG_ON(atomic_read(&dc
->count
));
839 mutex_lock(&bch_register_lock
);
841 memset(&dc
->sb
.set_uuid
, 0, 16);
842 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_NONE
);
844 bch_write_bdev_super(dc
, &cl
);
847 bcache_device_detach(&dc
->disk
);
848 list_move(&dc
->list
, &uncached_devices
);
850 mutex_unlock(&bch_register_lock
);
852 pr_info("Caching disabled for %s", bdevname(dc
->bdev
, buf
));
854 /* Drop ref we took in cached_dev_detach() */
855 closure_put(&dc
->disk
.cl
);
858 void bch_cached_dev_detach(struct cached_dev
*dc
)
860 lockdep_assert_held(&bch_register_lock
);
862 if (atomic_read(&dc
->disk
.closing
))
865 if (atomic_xchg(&dc
->disk
.detaching
, 1))
869 * Block the device from being closed and freed until we're finished
872 closure_get(&dc
->disk
.cl
);
874 bch_writeback_queue(dc
);
878 int bch_cached_dev_attach(struct cached_dev
*dc
, struct cache_set
*c
)
880 uint32_t rtime
= cpu_to_le32(get_seconds());
881 struct uuid_entry
*u
;
882 char buf
[BDEVNAME_SIZE
];
884 bdevname(dc
->bdev
, buf
);
886 if (memcmp(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16))
890 pr_err("Can't attach %s: already attached", buf
);
894 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
)) {
895 pr_err("Can't attach %s: shutting down", buf
);
899 if (dc
->sb
.block_size
< c
->sb
.block_size
) {
901 pr_err("Couldn't attach %s: block size less than set's block size",
906 u
= uuid_find(c
, dc
->sb
.uuid
);
909 (BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
||
910 BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
)) {
911 memcpy(u
->uuid
, invalid_uuid
, 16);
912 u
->invalidated
= cpu_to_le32(get_seconds());
917 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
918 pr_err("Couldn't find uuid for %s in set", buf
);
922 u
= uuid_find_empty(c
);
924 pr_err("Not caching %s, no room for UUID", buf
);
929 /* Deadlocks since we're called via sysfs...
930 sysfs_remove_file(&dc->kobj, &sysfs_attach);
933 if (bch_is_zero(u
->uuid
, 16)) {
935 closure_init_stack(&cl
);
937 memcpy(u
->uuid
, dc
->sb
.uuid
, 16);
938 memcpy(u
->label
, dc
->sb
.label
, SB_LABEL_SIZE
);
939 u
->first_reg
= u
->last_reg
= rtime
;
942 memcpy(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16);
943 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
945 bch_write_bdev_super(dc
, &cl
);
952 bcache_device_attach(&dc
->disk
, c
, u
- c
->uuids
);
953 list_move(&dc
->list
, &c
->cached_devs
);
954 calc_cached_dev_sectors(c
);
958 * dc->c must be set before dc->count != 0 - paired with the mb in
961 atomic_set(&dc
->count
, 1);
963 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
964 atomic_set(&dc
->has_dirty
, 1);
965 atomic_inc(&dc
->count
);
966 bch_writeback_queue(dc
);
969 bch_cached_dev_run(dc
);
970 bcache_device_link(&dc
->disk
, c
, "bdev");
972 pr_info("Caching %s as %s on set %pU",
973 bdevname(dc
->bdev
, buf
), dc
->disk
.disk
->disk_name
,
974 dc
->disk
.c
->sb
.set_uuid
);
978 void bch_cached_dev_release(struct kobject
*kobj
)
980 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
983 module_put(THIS_MODULE
);
986 static void cached_dev_free(struct closure
*cl
)
988 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
990 cancel_delayed_work_sync(&dc
->writeback_rate_update
);
992 mutex_lock(&bch_register_lock
);
994 if (atomic_read(&dc
->running
))
995 bd_unlink_disk_holder(dc
->bdev
, dc
->disk
.disk
);
996 bcache_device_free(&dc
->disk
);
999 mutex_unlock(&bch_register_lock
);
1001 if (!IS_ERR_OR_NULL(dc
->bdev
)) {
1002 if (dc
->bdev
->bd_disk
)
1003 blk_sync_queue(bdev_get_queue(dc
->bdev
));
1005 blkdev_put(dc
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
1008 wake_up(&unregister_wait
);
1010 kobject_put(&dc
->disk
.kobj
);
1013 static void cached_dev_flush(struct closure
*cl
)
1015 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
1016 struct bcache_device
*d
= &dc
->disk
;
1018 bch_cache_accounting_destroy(&dc
->accounting
);
1019 kobject_del(&d
->kobj
);
1021 continue_at(cl
, cached_dev_free
, system_wq
);
1024 static int cached_dev_init(struct cached_dev
*dc
, unsigned block_size
)
1028 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1030 __module_get(THIS_MODULE
);
1031 INIT_LIST_HEAD(&dc
->list
);
1032 closure_init(&dc
->disk
.cl
, NULL
);
1033 set_closure_fn(&dc
->disk
.cl
, cached_dev_flush
, system_wq
);
1034 kobject_init(&dc
->disk
.kobj
, &bch_cached_dev_ktype
);
1035 INIT_WORK(&dc
->detach
, cached_dev_detach_finish
);
1036 closure_init_unlocked(&dc
->sb_write
);
1037 INIT_LIST_HEAD(&dc
->io_lru
);
1038 spin_lock_init(&dc
->io_lock
);
1039 bch_cache_accounting_init(&dc
->accounting
, &dc
->disk
.cl
);
1041 dc
->sequential_merge
= true;
1042 dc
->sequential_cutoff
= 4 << 20;
1044 for (io
= dc
->io
; io
< dc
->io
+ RECENT_IO
; io
++) {
1045 list_add(&io
->lru
, &dc
->io_lru
);
1046 hlist_add_head(&io
->hash
, dc
->io_hash
+ RECENT_IO
);
1049 ret
= bcache_device_init(&dc
->disk
, block_size
);
1053 set_capacity(dc
->disk
.disk
,
1054 dc
->bdev
->bd_part
->nr_sects
- dc
->sb
.data_offset
);
1056 dc
->disk
.disk
->queue
->backing_dev_info
.ra_pages
=
1057 max(dc
->disk
.disk
->queue
->backing_dev_info
.ra_pages
,
1058 q
->backing_dev_info
.ra_pages
);
1060 bch_cached_dev_request_init(dc
);
1061 bch_cached_dev_writeback_init(dc
);
1065 /* Cached device - bcache superblock */
1067 static void register_bdev(struct cache_sb
*sb
, struct page
*sb_page
,
1068 struct block_device
*bdev
,
1069 struct cached_dev
*dc
)
1071 char name
[BDEVNAME_SIZE
];
1072 const char *err
= "cannot allocate memory";
1073 struct cache_set
*c
;
1075 memcpy(&dc
->sb
, sb
, sizeof(struct cache_sb
));
1077 dc
->bdev
->bd_holder
= dc
;
1079 bio_init(&dc
->sb_bio
);
1080 dc
->sb_bio
.bi_max_vecs
= 1;
1081 dc
->sb_bio
.bi_io_vec
= dc
->sb_bio
.bi_inline_vecs
;
1082 dc
->sb_bio
.bi_io_vec
[0].bv_page
= sb_page
;
1085 if (cached_dev_init(dc
, sb
->block_size
<< 9))
1088 err
= "error creating kobject";
1089 if (kobject_add(&dc
->disk
.kobj
, &part_to_dev(bdev
->bd_part
)->kobj
,
1092 if (bch_cache_accounting_add_kobjs(&dc
->accounting
, &dc
->disk
.kobj
))
1095 pr_info("registered backing device %s", bdevname(bdev
, name
));
1097 list_add(&dc
->list
, &uncached_devices
);
1098 list_for_each_entry(c
, &bch_cache_sets
, list
)
1099 bch_cached_dev_attach(dc
, c
);
1101 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
||
1102 BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
)
1103 bch_cached_dev_run(dc
);
1107 pr_notice("error opening %s: %s", bdevname(bdev
, name
), err
);
1108 bcache_device_stop(&dc
->disk
);
1111 /* Flash only volumes */
1113 void bch_flash_dev_release(struct kobject
*kobj
)
1115 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
1120 static void flash_dev_free(struct closure
*cl
)
1122 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1123 bcache_device_free(d
);
1124 kobject_put(&d
->kobj
);
1127 static void flash_dev_flush(struct closure
*cl
)
1129 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1131 bcache_device_unlink(d
);
1132 kobject_del(&d
->kobj
);
1133 continue_at(cl
, flash_dev_free
, system_wq
);
1136 static int flash_dev_run(struct cache_set
*c
, struct uuid_entry
*u
)
1138 struct bcache_device
*d
= kzalloc(sizeof(struct bcache_device
),
1143 closure_init(&d
->cl
, NULL
);
1144 set_closure_fn(&d
->cl
, flash_dev_flush
, system_wq
);
1146 kobject_init(&d
->kobj
, &bch_flash_dev_ktype
);
1148 if (bcache_device_init(d
, block_bytes(c
)))
1151 bcache_device_attach(d
, c
, u
- c
->uuids
);
1152 set_capacity(d
->disk
, u
->sectors
);
1153 bch_flash_dev_request_init(d
);
1156 if (kobject_add(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "bcache"))
1159 bcache_device_link(d
, c
, "volume");
1163 kobject_put(&d
->kobj
);
1167 static int flash_devs_run(struct cache_set
*c
)
1170 struct uuid_entry
*u
;
1173 u
< c
->uuids
+ c
->nr_uuids
&& !ret
;
1175 if (UUID_FLASH_ONLY(u
))
1176 ret
= flash_dev_run(c
, u
);
1181 int bch_flash_dev_create(struct cache_set
*c
, uint64_t size
)
1183 struct uuid_entry
*u
;
1185 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1188 u
= uuid_find_empty(c
);
1190 pr_err("Can't create volume, no room for UUID");
1194 get_random_bytes(u
->uuid
, 16);
1195 memset(u
->label
, 0, 32);
1196 u
->first_reg
= u
->last_reg
= cpu_to_le32(get_seconds());
1198 SET_UUID_FLASH_ONLY(u
, 1);
1199 u
->sectors
= size
>> 9;
1203 return flash_dev_run(c
, u
);
1209 bool bch_cache_set_error(struct cache_set
*c
, const char *fmt
, ...)
1213 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1216 /* XXX: we can be called from atomic context
1217 acquire_console_sem();
1220 printk(KERN_ERR
"bcache: error on %pU: ", c
->sb
.set_uuid
);
1222 va_start(args
, fmt
);
1226 printk(", disabling caching\n");
1228 bch_cache_set_unregister(c
);
1232 void bch_cache_set_release(struct kobject
*kobj
)
1234 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
1236 module_put(THIS_MODULE
);
1239 static void cache_set_free(struct closure
*cl
)
1241 struct cache_set
*c
= container_of(cl
, struct cache_set
, cl
);
1245 if (!IS_ERR_OR_NULL(c
->debug
))
1246 debugfs_remove(c
->debug
);
1248 bch_open_buckets_free(c
);
1249 bch_btree_cache_free(c
);
1250 bch_journal_free(c
);
1252 for_each_cache(ca
, c
, i
)
1254 kobject_put(&ca
->kobj
);
1256 free_pages((unsigned long) c
->uuids
, ilog2(bucket_pages(c
)));
1257 free_pages((unsigned long) c
->sort
, ilog2(bucket_pages(c
)));
1260 bioset_free(c
->bio_split
);
1262 mempool_destroy(c
->fill_iter
);
1264 mempool_destroy(c
->bio_meta
);
1266 mempool_destroy(c
->search
);
1269 mutex_lock(&bch_register_lock
);
1271 mutex_unlock(&bch_register_lock
);
1273 pr_info("Cache set %pU unregistered", c
->sb
.set_uuid
);
1274 wake_up(&unregister_wait
);
1276 closure_debug_destroy(&c
->cl
);
1277 kobject_put(&c
->kobj
);
1280 static void cache_set_flush(struct closure
*cl
)
1282 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1285 /* Shut down allocator threads */
1286 set_bit(CACHE_SET_STOPPING_2
, &c
->flags
);
1287 wake_up_allocators(c
);
1289 bch_cache_accounting_destroy(&c
->accounting
);
1291 kobject_put(&c
->internal
);
1292 kobject_del(&c
->kobj
);
1294 if (!IS_ERR_OR_NULL(c
->root
))
1295 list_add(&c
->root
->list
, &c
->btree_cache
);
1297 /* Should skip this if we're unregistering because of an error */
1298 list_for_each_entry(b
, &c
->btree_cache
, list
)
1299 if (btree_node_dirty(b
))
1300 bch_btree_node_write(b
, NULL
);
1305 static void __cache_set_unregister(struct closure
*cl
)
1307 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1308 struct cached_dev
*dc
, *t
;
1311 mutex_lock(&bch_register_lock
);
1313 if (test_bit(CACHE_SET_UNREGISTERING
, &c
->flags
))
1314 list_for_each_entry_safe(dc
, t
, &c
->cached_devs
, list
)
1315 bch_cached_dev_detach(dc
);
1317 for (i
= 0; i
< c
->nr_uuids
; i
++)
1318 if (c
->devices
[i
] && UUID_FLASH_ONLY(&c
->uuids
[i
]))
1319 bcache_device_stop(c
->devices
[i
]);
1321 mutex_unlock(&bch_register_lock
);
1323 continue_at(cl
, cache_set_flush
, system_wq
);
1326 void bch_cache_set_stop(struct cache_set
*c
)
1328 if (!test_and_set_bit(CACHE_SET_STOPPING
, &c
->flags
))
1329 closure_queue(&c
->caching
);
1332 void bch_cache_set_unregister(struct cache_set
*c
)
1334 set_bit(CACHE_SET_UNREGISTERING
, &c
->flags
);
1335 bch_cache_set_stop(c
);
1338 #define alloc_bucket_pages(gfp, c) \
1339 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1341 struct cache_set
*bch_cache_set_alloc(struct cache_sb
*sb
)
1344 struct cache_set
*c
= kzalloc(sizeof(struct cache_set
), GFP_KERNEL
);
1348 __module_get(THIS_MODULE
);
1349 closure_init(&c
->cl
, NULL
);
1350 set_closure_fn(&c
->cl
, cache_set_free
, system_wq
);
1352 closure_init(&c
->caching
, &c
->cl
);
1353 set_closure_fn(&c
->caching
, __cache_set_unregister
, system_wq
);
1355 /* Maybe create continue_at_noreturn() and use it here? */
1356 closure_set_stopped(&c
->cl
);
1357 closure_put(&c
->cl
);
1359 kobject_init(&c
->kobj
, &bch_cache_set_ktype
);
1360 kobject_init(&c
->internal
, &bch_cache_set_internal_ktype
);
1362 bch_cache_accounting_init(&c
->accounting
, &c
->cl
);
1364 memcpy(c
->sb
.set_uuid
, sb
->set_uuid
, 16);
1365 c
->sb
.block_size
= sb
->block_size
;
1366 c
->sb
.bucket_size
= sb
->bucket_size
;
1367 c
->sb
.nr_in_set
= sb
->nr_in_set
;
1368 c
->sb
.last_mount
= sb
->last_mount
;
1369 c
->bucket_bits
= ilog2(sb
->bucket_size
);
1370 c
->block_bits
= ilog2(sb
->block_size
);
1371 c
->nr_uuids
= bucket_bytes(c
) / sizeof(struct uuid_entry
);
1373 c
->btree_pages
= c
->sb
.bucket_size
/ PAGE_SECTORS
;
1374 if (c
->btree_pages
> BTREE_MAX_PAGES
)
1375 c
->btree_pages
= max_t(int, c
->btree_pages
/ 4,
1378 c
->sort_crit_factor
= int_sqrt(c
->btree_pages
);
1380 mutex_init(&c
->bucket_lock
);
1381 mutex_init(&c
->sort_lock
);
1382 spin_lock_init(&c
->sort_time_lock
);
1383 closure_init_unlocked(&c
->sb_write
);
1384 closure_init_unlocked(&c
->uuid_write
);
1385 spin_lock_init(&c
->btree_read_time_lock
);
1386 bch_moving_init_cache_set(c
);
1388 INIT_LIST_HEAD(&c
->list
);
1389 INIT_LIST_HEAD(&c
->cached_devs
);
1390 INIT_LIST_HEAD(&c
->btree_cache
);
1391 INIT_LIST_HEAD(&c
->btree_cache_freeable
);
1392 INIT_LIST_HEAD(&c
->btree_cache_freed
);
1393 INIT_LIST_HEAD(&c
->data_buckets
);
1395 c
->search
= mempool_create_slab_pool(32, bch_search_cache
);
1399 iter_size
= (sb
->bucket_size
/ sb
->block_size
+ 1) *
1400 sizeof(struct btree_iter_set
);
1402 if (!(c
->devices
= kzalloc(c
->nr_uuids
* sizeof(void *), GFP_KERNEL
)) ||
1403 !(c
->bio_meta
= mempool_create_kmalloc_pool(2,
1404 sizeof(struct bbio
) + sizeof(struct bio_vec
) *
1405 bucket_pages(c
))) ||
1406 !(c
->fill_iter
= mempool_create_kmalloc_pool(1, iter_size
)) ||
1407 !(c
->bio_split
= bioset_create(4, offsetof(struct bbio
, bio
))) ||
1408 !(c
->sort
= alloc_bucket_pages(GFP_KERNEL
, c
)) ||
1409 !(c
->uuids
= alloc_bucket_pages(GFP_KERNEL
, c
)) ||
1410 bch_journal_alloc(c
) ||
1411 bch_btree_cache_alloc(c
) ||
1412 bch_open_buckets_alloc(c
))
1415 c
->congested_read_threshold_us
= 2000;
1416 c
->congested_write_threshold_us
= 20000;
1417 c
->error_limit
= 8 << IO_ERROR_SHIFT
;
1421 bch_cache_set_unregister(c
);
1425 static void run_cache_set(struct cache_set
*c
)
1427 const char *err
= "cannot allocate memory";
1428 struct cached_dev
*dc
, *t
;
1433 bch_btree_op_init_stack(&op
);
1436 for_each_cache(ca
, c
, i
)
1437 c
->nbuckets
+= ca
->sb
.nbuckets
;
1439 if (CACHE_SYNC(&c
->sb
)) {
1444 err
= "cannot allocate memory for journal";
1445 if (bch_journal_read(c
, &journal
, &op
))
1448 pr_debug("btree_journal_read() done");
1450 err
= "no journal entries found";
1451 if (list_empty(&journal
))
1454 j
= &list_entry(journal
.prev
, struct journal_replay
, list
)->j
;
1456 err
= "IO error reading priorities";
1457 for_each_cache(ca
, c
, i
)
1458 prio_read(ca
, j
->prio_bucket
[ca
->sb
.nr_this_dev
]);
1461 * If prio_read() fails it'll call cache_set_error and we'll
1462 * tear everything down right away, but if we perhaps checked
1463 * sooner we could avoid journal replay.
1468 err
= "bad btree root";
1469 if (__bch_ptr_invalid(c
, j
->btree_level
+ 1, k
))
1472 err
= "error reading btree root";
1473 c
->root
= bch_btree_node_get(c
, k
, j
->btree_level
, &op
);
1474 if (IS_ERR_OR_NULL(c
->root
))
1477 list_del_init(&c
->root
->list
);
1478 rw_unlock(true, c
->root
);
1480 err
= uuid_read(c
, j
, &op
.cl
);
1484 err
= "error in recovery";
1485 if (bch_btree_check(c
, &op
))
1488 bch_journal_mark(c
, &journal
);
1489 bch_btree_gc_finish(c
);
1490 pr_debug("btree_check() done");
1493 * bcache_journal_next() can't happen sooner, or
1494 * btree_gc_finish() will give spurious errors about last_gc >
1495 * gc_gen - this is a hack but oh well.
1497 bch_journal_next(&c
->journal
);
1499 err
= "error starting allocator thread";
1500 for_each_cache(ca
, c
, i
)
1501 if (bch_cache_allocator_start(ca
))
1505 * First place it's safe to allocate: btree_check() and
1506 * btree_gc_finish() have to run before we have buckets to
1507 * allocate, and bch_bucket_alloc_set() might cause a journal
1508 * entry to be written so bcache_journal_next() has to be called
1511 * If the uuids were in the old format we have to rewrite them
1512 * before the next journal entry is written:
1514 if (j
->version
< BCACHE_JSET_VERSION_UUID
)
1517 bch_journal_replay(c
, &journal
, &op
);
1519 pr_notice("invalidating existing data");
1520 /* Don't want invalidate_buckets() to queue a gc yet */
1521 closure_lock(&c
->gc
, NULL
);
1523 for_each_cache(ca
, c
, i
) {
1526 ca
->sb
.keys
= clamp_t(int, ca
->sb
.nbuckets
>> 7,
1527 2, SB_JOURNAL_BUCKETS
);
1529 for (j
= 0; j
< ca
->sb
.keys
; j
++)
1530 ca
->sb
.d
[j
] = ca
->sb
.first_bucket
+ j
;
1533 bch_btree_gc_finish(c
);
1535 err
= "error starting allocator thread";
1536 for_each_cache(ca
, c
, i
)
1537 if (bch_cache_allocator_start(ca
))
1540 mutex_lock(&c
->bucket_lock
);
1541 for_each_cache(ca
, c
, i
)
1543 mutex_unlock(&c
->bucket_lock
);
1545 err
= "cannot allocate new UUID bucket";
1546 if (__uuid_write(c
))
1549 err
= "cannot allocate new btree root";
1550 c
->root
= bch_btree_node_alloc(c
, 0, &op
.cl
);
1551 if (IS_ERR_OR_NULL(c
->root
))
1554 bkey_copy_key(&c
->root
->key
, &MAX_KEY
);
1555 bch_btree_node_write(c
->root
, &op
.cl
);
1557 bch_btree_set_root(c
->root
);
1558 rw_unlock(true, c
->root
);
1561 * We don't want to write the first journal entry until
1562 * everything is set up - fortunately journal entries won't be
1563 * written until the SET_CACHE_SYNC() here:
1565 SET_CACHE_SYNC(&c
->sb
, true);
1567 bch_journal_next(&c
->journal
);
1568 bch_journal_meta(c
, &op
.cl
);
1571 closure_set_stopped(&c
->gc
.cl
);
1572 closure_put(&c
->gc
.cl
);
1575 closure_sync(&op
.cl
);
1576 c
->sb
.last_mount
= get_seconds();
1577 bcache_write_super(c
);
1579 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
1580 bch_cached_dev_attach(dc
, c
);
1586 closure_set_stopped(&c
->gc
.cl
);
1587 closure_put(&c
->gc
.cl
);
1589 closure_sync(&op
.cl
);
1590 /* XXX: test this, it's broken */
1591 bch_cache_set_error(c
, err
);
1594 static bool can_attach_cache(struct cache
*ca
, struct cache_set
*c
)
1596 return ca
->sb
.block_size
== c
->sb
.block_size
&&
1597 ca
->sb
.bucket_size
== c
->sb
.block_size
&&
1598 ca
->sb
.nr_in_set
== c
->sb
.nr_in_set
;
1601 static const char *register_cache_set(struct cache
*ca
)
1604 const char *err
= "cannot allocate memory";
1605 struct cache_set
*c
;
1607 list_for_each_entry(c
, &bch_cache_sets
, list
)
1608 if (!memcmp(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16)) {
1609 if (c
->cache
[ca
->sb
.nr_this_dev
])
1610 return "duplicate cache set member";
1612 if (!can_attach_cache(ca
, c
))
1613 return "cache sb does not match set";
1615 if (!CACHE_SYNC(&ca
->sb
))
1616 SET_CACHE_SYNC(&c
->sb
, false);
1621 c
= bch_cache_set_alloc(&ca
->sb
);
1625 err
= "error creating kobject";
1626 if (kobject_add(&c
->kobj
, bcache_kobj
, "%pU", c
->sb
.set_uuid
) ||
1627 kobject_add(&c
->internal
, &c
->kobj
, "internal"))
1630 if (bch_cache_accounting_add_kobjs(&c
->accounting
, &c
->kobj
))
1633 bch_debug_init_cache_set(c
);
1635 list_add(&c
->list
, &bch_cache_sets
);
1637 sprintf(buf
, "cache%i", ca
->sb
.nr_this_dev
);
1638 if (sysfs_create_link(&ca
->kobj
, &c
->kobj
, "set") ||
1639 sysfs_create_link(&c
->kobj
, &ca
->kobj
, buf
))
1642 if (ca
->sb
.seq
> c
->sb
.seq
) {
1643 c
->sb
.version
= ca
->sb
.version
;
1644 memcpy(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16);
1645 c
->sb
.flags
= ca
->sb
.flags
;
1646 c
->sb
.seq
= ca
->sb
.seq
;
1647 pr_debug("set version = %llu", c
->sb
.version
);
1651 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = ca
;
1652 c
->cache_by_alloc
[c
->caches_loaded
++] = ca
;
1654 if (c
->caches_loaded
== c
->sb
.nr_in_set
)
1659 bch_cache_set_unregister(c
);
1665 void bch_cache_release(struct kobject
*kobj
)
1667 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
1670 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = NULL
;
1672 bch_cache_allocator_exit(ca
);
1674 bio_split_pool_free(&ca
->bio_split_hook
);
1676 free_pages((unsigned long) ca
->disk_buckets
, ilog2(bucket_pages(ca
)));
1677 kfree(ca
->prio_buckets
);
1680 free_heap(&ca
->heap
);
1681 free_fifo(&ca
->unused
);
1682 free_fifo(&ca
->free_inc
);
1683 free_fifo(&ca
->free
);
1685 if (ca
->sb_bio
.bi_inline_vecs
[0].bv_page
)
1686 put_page(ca
->sb_bio
.bi_io_vec
[0].bv_page
);
1688 if (!IS_ERR_OR_NULL(ca
->bdev
)) {
1689 blk_sync_queue(bdev_get_queue(ca
->bdev
));
1690 blkdev_put(ca
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
1694 module_put(THIS_MODULE
);
1697 static int cache_alloc(struct cache_sb
*sb
, struct cache
*ca
)
1702 __module_get(THIS_MODULE
);
1703 kobject_init(&ca
->kobj
, &bch_cache_ktype
);
1705 INIT_LIST_HEAD(&ca
->discards
);
1707 bio_init(&ca
->journal
.bio
);
1708 ca
->journal
.bio
.bi_max_vecs
= 8;
1709 ca
->journal
.bio
.bi_io_vec
= ca
->journal
.bio
.bi_inline_vecs
;
1711 free
= roundup_pow_of_two(ca
->sb
.nbuckets
) >> 9;
1712 free
= max_t(size_t, free
, (prio_buckets(ca
) + 8) * 2);
1714 if (!init_fifo(&ca
->free
, free
, GFP_KERNEL
) ||
1715 !init_fifo(&ca
->free_inc
, free
<< 2, GFP_KERNEL
) ||
1716 !init_fifo(&ca
->unused
, free
<< 2, GFP_KERNEL
) ||
1717 !init_heap(&ca
->heap
, free
<< 3, GFP_KERNEL
) ||
1718 !(ca
->buckets
= vzalloc(sizeof(struct bucket
) *
1719 ca
->sb
.nbuckets
)) ||
1720 !(ca
->prio_buckets
= kzalloc(sizeof(uint64_t) * prio_buckets(ca
) *
1722 !(ca
->disk_buckets
= alloc_bucket_pages(GFP_KERNEL
, ca
)) ||
1723 bio_split_pool_init(&ca
->bio_split_hook
))
1726 ca
->prio_last_buckets
= ca
->prio_buckets
+ prio_buckets(ca
);
1728 for_each_bucket(b
, ca
)
1729 atomic_set(&b
->pin
, 0);
1731 if (bch_cache_allocator_init(ca
))
1736 kobject_put(&ca
->kobj
);
1740 static void register_cache(struct cache_sb
*sb
, struct page
*sb_page
,
1741 struct block_device
*bdev
, struct cache
*ca
)
1743 char name
[BDEVNAME_SIZE
];
1744 const char *err
= "cannot allocate memory";
1746 memcpy(&ca
->sb
, sb
, sizeof(struct cache_sb
));
1748 ca
->bdev
->bd_holder
= ca
;
1750 bio_init(&ca
->sb_bio
);
1751 ca
->sb_bio
.bi_max_vecs
= 1;
1752 ca
->sb_bio
.bi_io_vec
= ca
->sb_bio
.bi_inline_vecs
;
1753 ca
->sb_bio
.bi_io_vec
[0].bv_page
= sb_page
;
1756 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
1757 ca
->discard
= CACHE_DISCARD(&ca
->sb
);
1759 if (cache_alloc(sb
, ca
) != 0)
1762 err
= "error creating kobject";
1763 if (kobject_add(&ca
->kobj
, &part_to_dev(bdev
->bd_part
)->kobj
, "bcache"))
1766 err
= register_cache_set(ca
);
1770 pr_info("registered cache device %s", bdevname(bdev
, name
));
1773 pr_notice("error opening %s: %s", bdevname(bdev
, name
), err
);
1774 kobject_put(&ca
->kobj
);
1777 /* Global interfaces/init */
1779 static ssize_t
register_bcache(struct kobject
*, struct kobj_attribute
*,
1780 const char *, size_t);
1782 kobj_attribute_write(register, register_bcache
);
1783 kobj_attribute_write(register_quiet
, register_bcache
);
1785 static bool bch_is_open_backing(struct block_device
*bdev
) {
1786 struct cache_set
*c
, *tc
;
1787 struct cached_dev
*dc
, *t
;
1789 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
1790 list_for_each_entry_safe(dc
, t
, &c
->cached_devs
, list
)
1791 if (dc
->bdev
== bdev
)
1793 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
1794 if (dc
->bdev
== bdev
)
1799 static bool bch_is_open_cache(struct block_device
*bdev
) {
1800 struct cache_set
*c
, *tc
;
1804 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
1805 for_each_cache(ca
, c
, i
)
1806 if (ca
->bdev
== bdev
)
1811 static bool bch_is_open(struct block_device
*bdev
) {
1812 return bch_is_open_cache(bdev
) || bch_is_open_backing(bdev
);
1815 static ssize_t
register_bcache(struct kobject
*k
, struct kobj_attribute
*attr
,
1816 const char *buffer
, size_t size
)
1819 const char *err
= "cannot allocate memory";
1821 struct cache_sb
*sb
= NULL
;
1822 struct block_device
*bdev
= NULL
;
1823 struct page
*sb_page
= NULL
;
1825 if (!try_module_get(THIS_MODULE
))
1828 mutex_lock(&bch_register_lock
);
1830 if (!(path
= kstrndup(buffer
, size
, GFP_KERNEL
)) ||
1831 !(sb
= kmalloc(sizeof(struct cache_sb
), GFP_KERNEL
)))
1834 err
= "failed to open device";
1835 bdev
= blkdev_get_by_path(strim(path
),
1836 FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
1839 if (bdev
== ERR_PTR(-EBUSY
)) {
1840 bdev
= lookup_bdev(strim(path
));
1841 if (!IS_ERR(bdev
) && bch_is_open(bdev
))
1842 err
= "device already registered";
1844 err
= "device busy";
1849 err
= "failed to set blocksize";
1850 if (set_blocksize(bdev
, 4096))
1853 err
= read_super(sb
, bdev
, &sb_page
);
1857 if (SB_IS_BDEV(sb
)) {
1858 struct cached_dev
*dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
1862 register_bdev(sb
, sb_page
, bdev
, dc
);
1864 struct cache
*ca
= kzalloc(sizeof(*ca
), GFP_KERNEL
);
1868 register_cache(sb
, sb_page
, bdev
, ca
);
1875 mutex_unlock(&bch_register_lock
);
1876 module_put(THIS_MODULE
);
1880 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
1882 if (attr
!= &ksysfs_register_quiet
)
1883 pr_info("error opening %s: %s", path
, err
);
1888 static int bcache_reboot(struct notifier_block
*n
, unsigned long code
, void *x
)
1890 if (code
== SYS_DOWN
||
1892 code
== SYS_POWER_OFF
) {
1894 unsigned long start
= jiffies
;
1895 bool stopped
= false;
1897 struct cache_set
*c
, *tc
;
1898 struct cached_dev
*dc
, *tdc
;
1900 mutex_lock(&bch_register_lock
);
1902 if (list_empty(&bch_cache_sets
) &&
1903 list_empty(&uncached_devices
))
1906 pr_info("Stopping all devices:");
1908 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
1909 bch_cache_set_stop(c
);
1911 list_for_each_entry_safe(dc
, tdc
, &uncached_devices
, list
)
1912 bcache_device_stop(&dc
->disk
);
1914 /* What's a condition variable? */
1916 long timeout
= start
+ 2 * HZ
- jiffies
;
1918 stopped
= list_empty(&bch_cache_sets
) &&
1919 list_empty(&uncached_devices
);
1921 if (timeout
< 0 || stopped
)
1924 prepare_to_wait(&unregister_wait
, &wait
,
1925 TASK_UNINTERRUPTIBLE
);
1927 mutex_unlock(&bch_register_lock
);
1928 schedule_timeout(timeout
);
1929 mutex_lock(&bch_register_lock
);
1932 finish_wait(&unregister_wait
, &wait
);
1935 pr_info("All devices stopped");
1937 pr_notice("Timeout waiting for devices to be closed");
1939 mutex_unlock(&bch_register_lock
);
1945 static struct notifier_block reboot
= {
1946 .notifier_call
= bcache_reboot
,
1947 .priority
= INT_MAX
, /* before any real devices */
1950 static void bcache_exit(void)
1953 bch_writeback_exit();
1957 kobject_put(bcache_kobj
);
1959 destroy_workqueue(bcache_wq
);
1960 unregister_blkdev(bcache_major
, "bcache");
1961 unregister_reboot_notifier(&reboot
);
1964 static int __init
bcache_init(void)
1966 static const struct attribute
*files
[] = {
1967 &ksysfs_register
.attr
,
1968 &ksysfs_register_quiet
.attr
,
1972 mutex_init(&bch_register_lock
);
1973 init_waitqueue_head(&unregister_wait
);
1974 register_reboot_notifier(&reboot
);
1975 closure_debug_init();
1977 bcache_major
= register_blkdev(0, "bcache");
1978 if (bcache_major
< 0)
1979 return bcache_major
;
1981 if (!(bcache_wq
= create_workqueue("bcache")) ||
1982 !(bcache_kobj
= kobject_create_and_add("bcache", fs_kobj
)) ||
1983 sysfs_create_files(bcache_kobj
, files
) ||
1985 bch_request_init() ||
1986 bch_writeback_init() ||
1987 bch_debug_init(bcache_kobj
))
1996 module_exit(bcache_exit
);
1997 module_init(bcache_init
);