2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include "writeback.h"
14 #include <linux/blkdev.h>
15 #include <linux/sort.h>
17 static const char * const cache_replacement_policies
[] = {
24 write_attribute(attach
);
25 write_attribute(detach
);
26 write_attribute(unregister
);
27 write_attribute(stop
);
28 write_attribute(clear_stats
);
29 write_attribute(trigger_gc
);
30 write_attribute(prune_cache
);
31 write_attribute(flash_vol_create
);
33 read_attribute(bucket_size
);
34 read_attribute(block_size
);
35 read_attribute(nbuckets
);
36 read_attribute(tree_depth
);
37 read_attribute(root_usage_percent
);
38 read_attribute(priority_stats
);
39 read_attribute(btree_cache_size
);
40 read_attribute(btree_cache_max_chain
);
41 read_attribute(cache_available_percent
);
42 read_attribute(written
);
43 read_attribute(btree_written
);
44 read_attribute(metadata_written
);
45 read_attribute(active_journal_entries
);
47 sysfs_time_stats_attribute(btree_gc
, sec
, ms
);
48 sysfs_time_stats_attribute(btree_split
, sec
, us
);
49 sysfs_time_stats_attribute(btree_sort
, ms
, us
);
50 sysfs_time_stats_attribute(btree_read
, ms
, us
);
51 sysfs_time_stats_attribute(try_harder
, ms
, us
);
53 read_attribute(btree_nodes
);
54 read_attribute(btree_used_percent
);
55 read_attribute(average_key_size
);
56 read_attribute(dirty_data
);
57 read_attribute(bset_tree_stats
);
59 read_attribute(state
);
60 read_attribute(cache_read_races
);
61 read_attribute(writeback_keys_done
);
62 read_attribute(writeback_keys_failed
);
63 read_attribute(io_errors
);
64 read_attribute(congested
);
65 rw_attribute(congested_read_threshold_us
);
66 rw_attribute(congested_write_threshold_us
);
68 rw_attribute(sequential_cutoff
);
69 rw_attribute(sequential_merge
);
70 rw_attribute(data_csum
);
71 rw_attribute(cache_mode
);
72 rw_attribute(writeback_metadata
);
73 rw_attribute(writeback_running
);
74 rw_attribute(writeback_percent
);
75 rw_attribute(writeback_delay
);
76 rw_attribute(writeback_rate
);
78 rw_attribute(writeback_rate_update_seconds
);
79 rw_attribute(writeback_rate_d_term
);
80 rw_attribute(writeback_rate_p_term_inverse
);
81 rw_attribute(writeback_rate_d_smooth
);
82 read_attribute(writeback_rate_debug
);
84 rw_attribute(synchronous
);
85 rw_attribute(journal_delay_ms
);
86 rw_attribute(discard
);
87 rw_attribute(running
);
89 rw_attribute(readahead
);
90 rw_attribute(io_error_limit
);
91 rw_attribute(io_error_halflife
);
93 rw_attribute(key_merging_disabled
);
94 rw_attribute(gc_always_rewrite
);
95 rw_attribute(freelist_percent
);
96 rw_attribute(cache_replacement_policy
);
97 rw_attribute(btree_shrinker_disabled
);
98 rw_attribute(copy_gc_enabled
);
101 SHOW(__bch_cached_dev
)
103 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
105 const char *states
[] = { "no cache", "clean", "dirty", "inconsistent" };
107 #define var(stat) (dc->stat)
109 if (attr
== &sysfs_cache_mode
)
110 return bch_snprint_string_list(buf
, PAGE_SIZE
,
112 BDEV_CACHE_MODE(&dc
->sb
));
114 sysfs_printf(data_csum
, "%i", dc
->disk
.data_csum
);
115 var_printf(verify
, "%i");
116 var_printf(writeback_metadata
, "%i");
117 var_printf(writeback_running
, "%i");
118 var_print(writeback_delay
);
119 var_print(writeback_percent
);
120 sysfs_print(writeback_rate
, dc
->writeback_rate
.rate
);
122 var_print(writeback_rate_update_seconds
);
123 var_print(writeback_rate_d_term
);
124 var_print(writeback_rate_p_term_inverse
);
125 var_print(writeback_rate_d_smooth
);
127 if (attr
== &sysfs_writeback_rate_debug
) {
132 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
133 bch_hprint(derivative
, dc
->writeback_rate_derivative
<< 9);
134 bch_hprint(target
, dc
->writeback_rate_target
<< 9);
142 dc
->writeback_rate
.rate
,
143 dc
->writeback_rate_change
,
144 dirty
, derivative
, target
);
147 sysfs_hprint(dirty_data
,
148 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
150 var_printf(sequential_merge
, "%i");
151 var_hprint(sequential_cutoff
);
152 var_hprint(readahead
);
154 sysfs_print(running
, atomic_read(&dc
->running
));
155 sysfs_print(state
, states
[BDEV_STATE(&dc
->sb
)]);
157 if (attr
== &sysfs_label
) {
158 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
159 buf
[SB_LABEL_SIZE
+ 1] = '\0';
167 SHOW_LOCKED(bch_cached_dev
)
171 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
176 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
177 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
179 sysfs_strtoul(data_csum
, dc
->disk
.data_csum
);
181 d_strtoul(writeback_metadata
);
182 d_strtoul(writeback_running
);
183 d_strtoul(writeback_delay
);
184 sysfs_strtoul_clamp(writeback_rate
,
185 dc
->writeback_rate
.rate
, 1, 1000000);
186 sysfs_strtoul_clamp(writeback_percent
, dc
->writeback_percent
, 0, 40);
188 d_strtoul(writeback_rate_update_seconds
);
189 d_strtoul(writeback_rate_d_term
);
190 d_strtoul(writeback_rate_p_term_inverse
);
191 sysfs_strtoul_clamp(writeback_rate_p_term_inverse
,
192 dc
->writeback_rate_p_term_inverse
, 1, INT_MAX
);
193 d_strtoul(writeback_rate_d_smooth
);
195 d_strtoul(sequential_merge
);
196 d_strtoi_h(sequential_cutoff
);
197 d_strtoi_h(readahead
);
199 if (attr
== &sysfs_clear_stats
)
200 bch_cache_accounting_clear(&dc
->accounting
);
202 if (attr
== &sysfs_running
&&
203 strtoul_or_return(buf
))
204 bch_cached_dev_run(dc
);
206 if (attr
== &sysfs_cache_mode
) {
207 ssize_t v
= bch_read_string_list(buf
, bch_cache_modes
+ 1);
212 if ((unsigned) v
!= BDEV_CACHE_MODE(&dc
->sb
)) {
213 SET_BDEV_CACHE_MODE(&dc
->sb
, v
);
214 bch_write_bdev_super(dc
, NULL
);
218 if (attr
== &sysfs_label
) {
219 memcpy(dc
->sb
.label
, buf
, SB_LABEL_SIZE
);
220 bch_write_bdev_super(dc
, NULL
);
222 memcpy(dc
->disk
.c
->uuids
[dc
->disk
.id
].label
,
224 bch_uuid_write(dc
->disk
.c
);
228 if (attr
== &sysfs_attach
) {
229 if (bch_parse_uuid(buf
, dc
->sb
.set_uuid
) < 16)
232 list_for_each_entry(c
, &bch_cache_sets
, list
) {
233 v
= bch_cached_dev_attach(dc
, c
);
238 pr_err("Can't attach %s: cache set not found", buf
);
242 if (attr
== &sysfs_detach
&& dc
->disk
.c
)
243 bch_cached_dev_detach(dc
);
245 if (attr
== &sysfs_stop
)
246 bcache_device_stop(&dc
->disk
);
251 STORE(bch_cached_dev
)
253 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
256 mutex_lock(&bch_register_lock
);
257 size
= __cached_dev_store(kobj
, attr
, buf
, size
);
259 if (attr
== &sysfs_writeback_running
)
260 bch_writeback_queue(dc
);
262 if (attr
== &sysfs_writeback_percent
)
263 schedule_delayed_work(&dc
->writeback_rate_update
,
264 dc
->writeback_rate_update_seconds
* HZ
);
266 mutex_unlock(&bch_register_lock
);
270 static struct attribute
*bch_cached_dev_files
[] = {
278 &sysfs_writeback_metadata
,
279 &sysfs_writeback_running
,
280 &sysfs_writeback_delay
,
281 &sysfs_writeback_percent
,
282 &sysfs_writeback_rate
,
283 &sysfs_writeback_rate_update_seconds
,
284 &sysfs_writeback_rate_d_term
,
285 &sysfs_writeback_rate_p_term_inverse
,
286 &sysfs_writeback_rate_d_smooth
,
287 &sysfs_writeback_rate_debug
,
289 &sysfs_sequential_cutoff
,
290 &sysfs_sequential_merge
,
296 #ifdef CONFIG_BCACHE_DEBUG
301 KTYPE(bch_cached_dev
);
305 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
307 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
309 sysfs_printf(data_csum
, "%i", d
->data_csum
);
310 sysfs_hprint(size
, u
->sectors
<< 9);
312 if (attr
== &sysfs_label
) {
313 memcpy(buf
, u
->label
, SB_LABEL_SIZE
);
314 buf
[SB_LABEL_SIZE
+ 1] = '\0';
322 STORE(__bch_flash_dev
)
324 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
326 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
328 sysfs_strtoul(data_csum
, d
->data_csum
);
330 if (attr
== &sysfs_size
) {
332 strtoi_h_or_return(buf
, v
);
335 bch_uuid_write(d
->c
);
336 set_capacity(d
->disk
, u
->sectors
);
339 if (attr
== &sysfs_label
) {
340 memcpy(u
->label
, buf
, SB_LABEL_SIZE
);
341 bch_uuid_write(d
->c
);
344 if (attr
== &sysfs_unregister
) {
345 atomic_set(&d
->detaching
, 1);
346 bcache_device_stop(d
);
351 STORE_LOCKED(bch_flash_dev
)
353 static struct attribute
*bch_flash_dev_files
[] = {
362 KTYPE(bch_flash_dev
);
364 SHOW(__bch_cache_set
)
366 unsigned root_usage(struct cache_set
*c
)
371 struct btree_iter iter
;
379 rw_lock(false, b
, b
->level
);
380 } while (b
!= c
->root
);
382 for_each_key_filter(b
, k
, &iter
, bch_ptr_bad
)
383 bytes
+= bkey_bytes(k
);
387 return (bytes
* 100) / btree_bytes(c
);
390 size_t cache_size(struct cache_set
*c
)
395 mutex_lock(&c
->bucket_lock
);
396 list_for_each_entry(b
, &c
->btree_cache
, list
)
397 ret
+= 1 << (b
->page_order
+ PAGE_SHIFT
);
399 mutex_unlock(&c
->bucket_lock
);
403 unsigned cache_max_chain(struct cache_set
*c
)
406 struct hlist_head
*h
;
408 mutex_lock(&c
->bucket_lock
);
410 for (h
= c
->bucket_hash
;
411 h
< c
->bucket_hash
+ (1 << BUCKET_HASH_BITS
);
414 struct hlist_node
*p
;
422 mutex_unlock(&c
->bucket_lock
);
426 unsigned btree_used(struct cache_set
*c
)
428 return div64_u64(c
->gc_stats
.key_bytes
* 100,
429 (c
->gc_stats
.nodes
?: 1) * btree_bytes(c
));
432 unsigned average_key_size(struct cache_set
*c
)
434 return c
->gc_stats
.nkeys
435 ? div64_u64(c
->gc_stats
.data
, c
->gc_stats
.nkeys
)
439 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
441 sysfs_print(synchronous
, CACHE_SYNC(&c
->sb
));
442 sysfs_print(journal_delay_ms
, c
->journal_delay_ms
);
443 sysfs_hprint(bucket_size
, bucket_bytes(c
));
444 sysfs_hprint(block_size
, block_bytes(c
));
445 sysfs_print(tree_depth
, c
->root
->level
);
446 sysfs_print(root_usage_percent
, root_usage(c
));
448 sysfs_hprint(btree_cache_size
, cache_size(c
));
449 sysfs_print(btree_cache_max_chain
, cache_max_chain(c
));
450 sysfs_print(cache_available_percent
, 100 - c
->gc_stats
.in_use
);
452 sysfs_print_time_stats(&c
->btree_gc_time
, btree_gc
, sec
, ms
);
453 sysfs_print_time_stats(&c
->btree_split_time
, btree_split
, sec
, us
);
454 sysfs_print_time_stats(&c
->sort_time
, btree_sort
, ms
, us
);
455 sysfs_print_time_stats(&c
->btree_read_time
, btree_read
, ms
, us
);
456 sysfs_print_time_stats(&c
->try_harder_time
, try_harder
, ms
, us
);
458 sysfs_print(btree_used_percent
, btree_used(c
));
459 sysfs_print(btree_nodes
, c
->gc_stats
.nodes
);
460 sysfs_hprint(dirty_data
, c
->gc_stats
.dirty
);
461 sysfs_hprint(average_key_size
, average_key_size(c
));
463 sysfs_print(cache_read_races
,
464 atomic_long_read(&c
->cache_read_races
));
466 sysfs_print(writeback_keys_done
,
467 atomic_long_read(&c
->writeback_keys_done
));
468 sysfs_print(writeback_keys_failed
,
469 atomic_long_read(&c
->writeback_keys_failed
));
471 /* See count_io_errors for why 88 */
472 sysfs_print(io_error_halflife
, c
->error_decay
* 88);
473 sysfs_print(io_error_limit
, c
->error_limit
>> IO_ERROR_SHIFT
);
475 sysfs_hprint(congested
,
476 ((uint64_t) bch_get_congested(c
)) << 9);
477 sysfs_print(congested_read_threshold_us
,
478 c
->congested_read_threshold_us
);
479 sysfs_print(congested_write_threshold_us
,
480 c
->congested_write_threshold_us
);
482 sysfs_print(active_journal_entries
, fifo_used(&c
->journal
.pin
));
483 sysfs_printf(verify
, "%i", c
->verify
);
484 sysfs_printf(key_merging_disabled
, "%i", c
->key_merging_disabled
);
485 sysfs_printf(gc_always_rewrite
, "%i", c
->gc_always_rewrite
);
486 sysfs_printf(btree_shrinker_disabled
, "%i", c
->shrinker_disabled
);
487 sysfs_printf(copy_gc_enabled
, "%i", c
->copy_gc_enabled
);
489 if (attr
== &sysfs_bset_tree_stats
)
490 return bch_bset_print_stats(c
, buf
);
494 SHOW_LOCKED(bch_cache_set
)
496 STORE(__bch_cache_set
)
498 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
500 if (attr
== &sysfs_unregister
)
501 bch_cache_set_unregister(c
);
503 if (attr
== &sysfs_stop
)
504 bch_cache_set_stop(c
);
506 if (attr
== &sysfs_synchronous
) {
507 bool sync
= strtoul_or_return(buf
);
509 if (sync
!= CACHE_SYNC(&c
->sb
)) {
510 SET_CACHE_SYNC(&c
->sb
, sync
);
511 bcache_write_super(c
);
515 if (attr
== &sysfs_flash_vol_create
) {
518 strtoi_h_or_return(buf
, v
);
520 r
= bch_flash_dev_create(c
, v
);
525 if (attr
== &sysfs_clear_stats
) {
526 atomic_long_set(&c
->writeback_keys_done
, 0);
527 atomic_long_set(&c
->writeback_keys_failed
, 0);
529 memset(&c
->gc_stats
, 0, sizeof(struct gc_stat
));
530 bch_cache_accounting_clear(&c
->accounting
);
533 if (attr
== &sysfs_trigger_gc
)
536 if (attr
== &sysfs_prune_cache
) {
537 struct shrink_control sc
;
538 sc
.gfp_mask
= GFP_KERNEL
;
539 sc
.nr_to_scan
= strtoul_or_return(buf
);
540 c
->shrink
.shrink(&c
->shrink
, &sc
);
543 sysfs_strtoul(congested_read_threshold_us
,
544 c
->congested_read_threshold_us
);
545 sysfs_strtoul(congested_write_threshold_us
,
546 c
->congested_write_threshold_us
);
548 if (attr
== &sysfs_io_error_limit
)
549 c
->error_limit
= strtoul_or_return(buf
) << IO_ERROR_SHIFT
;
551 /* See count_io_errors() for why 88 */
552 if (attr
== &sysfs_io_error_halflife
)
553 c
->error_decay
= strtoul_or_return(buf
) / 88;
555 sysfs_strtoul(journal_delay_ms
, c
->journal_delay_ms
);
556 sysfs_strtoul(verify
, c
->verify
);
557 sysfs_strtoul(key_merging_disabled
, c
->key_merging_disabled
);
558 sysfs_strtoul(gc_always_rewrite
, c
->gc_always_rewrite
);
559 sysfs_strtoul(btree_shrinker_disabled
, c
->shrinker_disabled
);
560 sysfs_strtoul(copy_gc_enabled
, c
->copy_gc_enabled
);
564 STORE_LOCKED(bch_cache_set
)
566 SHOW(bch_cache_set_internal
)
568 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
569 return bch_cache_set_show(&c
->kobj
, attr
, buf
);
572 STORE(bch_cache_set_internal
)
574 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
575 return bch_cache_set_store(&c
->kobj
, attr
, buf
, size
);
578 static void bch_cache_set_internal_release(struct kobject
*k
)
582 static struct attribute
*bch_cache_set_files
[] = {
586 &sysfs_journal_delay_ms
,
587 &sysfs_flash_vol_create
,
592 &sysfs_root_usage_percent
,
593 &sysfs_btree_cache_size
,
594 &sysfs_cache_available_percent
,
596 &sysfs_average_key_size
,
599 &sysfs_io_error_limit
,
600 &sysfs_io_error_halflife
,
602 &sysfs_congested_read_threshold_us
,
603 &sysfs_congested_write_threshold_us
,
607 KTYPE(bch_cache_set
);
609 static struct attribute
*bch_cache_set_internal_files
[] = {
610 &sysfs_active_journal_entries
,
612 sysfs_time_stats_attribute_list(btree_gc
, sec
, ms
)
613 sysfs_time_stats_attribute_list(btree_split
, sec
, us
)
614 sysfs_time_stats_attribute_list(btree_sort
, ms
, us
)
615 sysfs_time_stats_attribute_list(btree_read
, ms
, us
)
616 sysfs_time_stats_attribute_list(try_harder
, ms
, us
)
619 &sysfs_btree_used_percent
,
620 &sysfs_btree_cache_max_chain
,
622 &sysfs_bset_tree_stats
,
623 &sysfs_cache_read_races
,
624 &sysfs_writeback_keys_done
,
625 &sysfs_writeback_keys_failed
,
629 #ifdef CONFIG_BCACHE_DEBUG
631 &sysfs_key_merging_disabled
,
633 &sysfs_gc_always_rewrite
,
634 &sysfs_btree_shrinker_disabled
,
635 &sysfs_copy_gc_enabled
,
638 KTYPE(bch_cache_set_internal
);
642 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
644 sysfs_hprint(bucket_size
, bucket_bytes(ca
));
645 sysfs_hprint(block_size
, block_bytes(ca
));
646 sysfs_print(nbuckets
, ca
->sb
.nbuckets
);
647 sysfs_print(discard
, ca
->discard
);
648 sysfs_hprint(written
, atomic_long_read(&ca
->sectors_written
) << 9);
649 sysfs_hprint(btree_written
,
650 atomic_long_read(&ca
->btree_sectors_written
) << 9);
651 sysfs_hprint(metadata_written
,
652 (atomic_long_read(&ca
->meta_sectors_written
) +
653 atomic_long_read(&ca
->btree_sectors_written
)) << 9);
655 sysfs_print(io_errors
,
656 atomic_read(&ca
->io_errors
) >> IO_ERROR_SHIFT
);
658 sysfs_print(freelist_percent
, ca
->free
.size
* 100 /
659 ((size_t) ca
->sb
.nbuckets
));
661 if (attr
== &sysfs_cache_replacement_policy
)
662 return bch_snprint_string_list(buf
, PAGE_SIZE
,
663 cache_replacement_policies
,
664 CACHE_REPLACEMENT(&ca
->sb
));
666 if (attr
== &sysfs_priority_stats
) {
667 int cmp(const void *l
, const void *r
)
668 { return *((uint16_t *) r
) - *((uint16_t *) l
); }
670 size_t n
= ca
->sb
.nbuckets
, i
, unused
, btree
;
672 /* Compute 31 quantiles */
673 uint16_t q
[31], *p
, *cached
;
676 cached
= p
= vmalloc(ca
->sb
.nbuckets
* sizeof(uint16_t));
680 mutex_lock(&ca
->set
->bucket_lock
);
681 for (i
= ca
->sb
.first_bucket
; i
< n
; i
++)
682 p
[i
] = ca
->buckets
[i
].prio
;
683 mutex_unlock(&ca
->set
->bucket_lock
);
685 sort(p
, n
, sizeof(uint16_t), cmp
, NULL
);
691 unused
= ca
->sb
.nbuckets
- n
;
693 while (cached
< p
+ n
&&
694 *cached
== BTREE_PRIO
)
700 for (i
= 0; i
< n
; i
++)
701 sum
+= INITIAL_PRIO
- cached
[i
];
706 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
707 q
[i
] = INITIAL_PRIO
- cached
[n
* (i
+ 1) /
708 (ARRAY_SIZE(q
) + 1)];
712 ret
= scnprintf(buf
, PAGE_SIZE
,
716 "Sectors per Q: %zu\n"
718 unused
* 100 / (size_t) ca
->sb
.nbuckets
,
719 btree
* 100 / (size_t) ca
->sb
.nbuckets
, sum
,
720 n
* ca
->sb
.bucket_size
/ (ARRAY_SIZE(q
) + 1));
722 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
723 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
,
727 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
, "]\n");
734 SHOW_LOCKED(bch_cache
)
738 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
740 if (attr
== &sysfs_discard
) {
741 bool v
= strtoul_or_return(buf
);
743 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
746 if (v
!= CACHE_DISCARD(&ca
->sb
)) {
747 SET_CACHE_DISCARD(&ca
->sb
, v
);
748 bcache_write_super(ca
->set
);
752 if (attr
== &sysfs_cache_replacement_policy
) {
753 ssize_t v
= bch_read_string_list(buf
, cache_replacement_policies
);
758 if ((unsigned) v
!= CACHE_REPLACEMENT(&ca
->sb
)) {
759 mutex_lock(&ca
->set
->bucket_lock
);
760 SET_CACHE_REPLACEMENT(&ca
->sb
, v
);
761 mutex_unlock(&ca
->set
->bucket_lock
);
763 bcache_write_super(ca
->set
);
767 if (attr
== &sysfs_freelist_percent
) {
768 DECLARE_FIFO(long, free
);
770 size_t p
= strtoul_or_return(buf
);
773 ((size_t) ca
->sb
.nbuckets
* p
) / 100,
774 roundup_pow_of_two(ca
->sb
.nbuckets
) >> 9,
775 ca
->sb
.nbuckets
/ 2);
777 if (!init_fifo_exact(&free
, p
, GFP_KERNEL
))
780 mutex_lock(&ca
->set
->bucket_lock
);
782 fifo_move(&free
, &ca
->free
);
783 fifo_swap(&free
, &ca
->free
);
785 mutex_unlock(&ca
->set
->bucket_lock
);
787 while (fifo_pop(&free
, i
))
788 atomic_dec(&ca
->buckets
[i
].pin
);
793 if (attr
== &sysfs_clear_stats
) {
794 atomic_long_set(&ca
->sectors_written
, 0);
795 atomic_long_set(&ca
->btree_sectors_written
, 0);
796 atomic_long_set(&ca
->meta_sectors_written
, 0);
797 atomic_set(&ca
->io_count
, 0);
798 atomic_set(&ca
->io_errors
, 0);
803 STORE_LOCKED(bch_cache
)
805 static struct attribute
*bch_cache_files
[] = {
809 &sysfs_priority_stats
,
812 &sysfs_btree_written
,
813 &sysfs_metadata_written
,
816 &sysfs_freelist_percent
,
817 &sysfs_cache_replacement_policy
,