2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/vmalloc.h>
16 #include <linux/shrinker.h>
17 #include <linux/module.h>
18 #include <linux/rbtree.h>
19 #include <linux/stacktrace.h>
21 #define DM_MSG_PREFIX "bufio"
24 * Memory management policy:
25 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
26 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
27 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
28 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
31 #define DM_BUFIO_MIN_BUFFERS 8
33 #define DM_BUFIO_MEMORY_PERCENT 2
34 #define DM_BUFIO_VMALLOC_PERCENT 25
35 #define DM_BUFIO_WRITEBACK_PERCENT 75
38 * Check buffer ages in this interval (seconds)
40 #define DM_BUFIO_WORK_TIMER_SECS 30
43 * Free buffers when they are older than this (seconds)
45 #define DM_BUFIO_DEFAULT_AGE_SECS 300
48 * The nr of bytes of cached data to keep around.
50 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
53 * The number of bvec entries that are embedded directly in the buffer.
54 * If the chunk size is larger, dm-io is used to do the io.
56 #define DM_BUFIO_INLINE_VECS 16
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
66 * dm_buffer->list_mode
74 * All buffers are linked to cache_hash with their hash_list field.
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
87 struct dm_bufio_client
{
90 struct list_head lru
[LIST_SIZE
];
91 unsigned long n_buffers
[LIST_SIZE
];
93 struct block_device
*bdev
;
95 unsigned char sectors_per_block_bits
;
96 unsigned char pages_per_block_bits
;
97 unsigned char blocks_per_page_bits
;
99 void (*alloc_callback
)(struct dm_buffer
*);
100 void (*write_callback
)(struct dm_buffer
*);
102 struct dm_io_client
*dm_io
;
104 struct list_head reserved_buffers
;
105 unsigned need_reserved_buffers
;
107 unsigned minimum_buffers
;
109 struct rb_root buffer_tree
;
110 wait_queue_head_t free_buffer_wait
;
112 int async_write_error
;
114 struct list_head client_list
;
115 struct shrinker shrinker
;
126 * Describes how the block was allocated:
127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
128 * See the comment at alloc_buffer_data.
132 DATA_MODE_GET_FREE_PAGES
= 1,
133 DATA_MODE_VMALLOC
= 2,
139 struct list_head lru_list
;
142 enum data_mode data_mode
;
143 unsigned char list_mode
; /* LIST_* */
148 unsigned long last_accessed
;
149 struct dm_bufio_client
*c
;
150 struct list_head write_list
;
152 struct bio_vec bio_vec
[DM_BUFIO_INLINE_VECS
];
153 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
155 struct stack_trace stack_trace
;
156 unsigned long stack_entries
[MAX_STACK
];
160 /*----------------------------------------------------------------*/
162 static struct kmem_cache
*dm_bufio_caches
[PAGE_SHIFT
- SECTOR_SHIFT
];
163 static char *dm_bufio_cache_names
[PAGE_SHIFT
- SECTOR_SHIFT
];
165 static inline int dm_bufio_cache_index(struct dm_bufio_client
*c
)
167 unsigned ret
= c
->blocks_per_page_bits
- 1;
169 BUG_ON(ret
>= ARRAY_SIZE(dm_bufio_caches
));
174 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
175 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
177 #define dm_bufio_in_request() (!!current->bio_list)
179 static void dm_bufio_lock(struct dm_bufio_client
*c
)
181 mutex_lock_nested(&c
->lock
, dm_bufio_in_request());
184 static int dm_bufio_trylock(struct dm_bufio_client
*c
)
186 return mutex_trylock(&c
->lock
);
189 static void dm_bufio_unlock(struct dm_bufio_client
*c
)
191 mutex_unlock(&c
->lock
);
195 * FIXME Move to sched.h?
197 #ifdef CONFIG_PREEMPT_VOLUNTARY
198 # define dm_bufio_cond_resched() \
200 if (unlikely(need_resched())) \
204 # define dm_bufio_cond_resched() do { } while (0)
207 /*----------------------------------------------------------------*/
210 * Default cache size: available memory divided by the ratio.
212 static unsigned long dm_bufio_default_cache_size
;
215 * Total cache size set by the user.
217 static unsigned long dm_bufio_cache_size
;
220 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
221 * at any time. If it disagrees, the user has changed cache size.
223 static unsigned long dm_bufio_cache_size_latch
;
225 static DEFINE_SPINLOCK(param_spinlock
);
228 * Buffers are freed after this timeout
230 static unsigned dm_bufio_max_age
= DM_BUFIO_DEFAULT_AGE_SECS
;
231 static unsigned dm_bufio_retain_bytes
= DM_BUFIO_DEFAULT_RETAIN_BYTES
;
233 static unsigned long dm_bufio_peak_allocated
;
234 static unsigned long dm_bufio_allocated_kmem_cache
;
235 static unsigned long dm_bufio_allocated_get_free_pages
;
236 static unsigned long dm_bufio_allocated_vmalloc
;
237 static unsigned long dm_bufio_current_allocated
;
239 /*----------------------------------------------------------------*/
242 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
244 static unsigned long dm_bufio_cache_size_per_client
;
247 * The current number of clients.
249 static int dm_bufio_client_count
;
252 * The list of all clients.
254 static LIST_HEAD(dm_bufio_all_clients
);
257 * This mutex protects dm_bufio_cache_size_latch,
258 * dm_bufio_cache_size_per_client and dm_bufio_client_count
260 static DEFINE_MUTEX(dm_bufio_clients_lock
);
262 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
263 static void buffer_record_stack(struct dm_buffer
*b
)
265 b
->stack_trace
.nr_entries
= 0;
266 b
->stack_trace
.max_entries
= MAX_STACK
;
267 b
->stack_trace
.entries
= b
->stack_entries
;
268 b
->stack_trace
.skip
= 2;
269 save_stack_trace(&b
->stack_trace
);
273 /*----------------------------------------------------------------
274 * A red/black tree acts as an index for all the buffers.
275 *--------------------------------------------------------------*/
276 static struct dm_buffer
*__find(struct dm_bufio_client
*c
, sector_t block
)
278 struct rb_node
*n
= c
->buffer_tree
.rb_node
;
282 b
= container_of(n
, struct dm_buffer
, node
);
284 if (b
->block
== block
)
287 n
= (b
->block
< block
) ? n
->rb_left
: n
->rb_right
;
293 static void __insert(struct dm_bufio_client
*c
, struct dm_buffer
*b
)
295 struct rb_node
**new = &c
->buffer_tree
.rb_node
, *parent
= NULL
;
296 struct dm_buffer
*found
;
299 found
= container_of(*new, struct dm_buffer
, node
);
301 if (found
->block
== b
->block
) {
307 new = (found
->block
< b
->block
) ?
308 &((*new)->rb_left
) : &((*new)->rb_right
);
311 rb_link_node(&b
->node
, parent
, new);
312 rb_insert_color(&b
->node
, &c
->buffer_tree
);
315 static void __remove(struct dm_bufio_client
*c
, struct dm_buffer
*b
)
317 rb_erase(&b
->node
, &c
->buffer_tree
);
320 /*----------------------------------------------------------------*/
322 static void adjust_total_allocated(enum data_mode data_mode
, long diff
)
324 static unsigned long * const class_ptr
[DATA_MODE_LIMIT
] = {
325 &dm_bufio_allocated_kmem_cache
,
326 &dm_bufio_allocated_get_free_pages
,
327 &dm_bufio_allocated_vmalloc
,
330 spin_lock(¶m_spinlock
);
332 *class_ptr
[data_mode
] += diff
;
334 dm_bufio_current_allocated
+= diff
;
336 if (dm_bufio_current_allocated
> dm_bufio_peak_allocated
)
337 dm_bufio_peak_allocated
= dm_bufio_current_allocated
;
339 spin_unlock(¶m_spinlock
);
343 * Change the number of clients and recalculate per-client limit.
345 static void __cache_size_refresh(void)
347 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock
));
348 BUG_ON(dm_bufio_client_count
< 0);
350 dm_bufio_cache_size_latch
= ACCESS_ONCE(dm_bufio_cache_size
);
353 * Use default if set to 0 and report the actual cache size used.
355 if (!dm_bufio_cache_size_latch
) {
356 (void)cmpxchg(&dm_bufio_cache_size
, 0,
357 dm_bufio_default_cache_size
);
358 dm_bufio_cache_size_latch
= dm_bufio_default_cache_size
;
361 dm_bufio_cache_size_per_client
= dm_bufio_cache_size_latch
/
362 (dm_bufio_client_count
? : 1);
366 * Allocating buffer data.
368 * Small buffers are allocated with kmem_cache, to use space optimally.
370 * For large buffers, we choose between get_free_pages and vmalloc.
371 * Each has advantages and disadvantages.
373 * __get_free_pages can randomly fail if the memory is fragmented.
374 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
375 * as low as 128M) so using it for caching is not appropriate.
377 * If the allocation may fail we use __get_free_pages. Memory fragmentation
378 * won't have a fatal effect here, but it just causes flushes of some other
379 * buffers and more I/O will be performed. Don't use __get_free_pages if it
380 * always fails (i.e. order >= MAX_ORDER).
382 * If the allocation shouldn't fail we use __vmalloc. This is only for the
383 * initial reserve allocation, so there's no risk of wasting all vmalloc
386 static void *alloc_buffer_data(struct dm_bufio_client
*c
, gfp_t gfp_mask
,
387 enum data_mode
*data_mode
)
392 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT
) {
393 *data_mode
= DATA_MODE_SLAB
;
394 return kmem_cache_alloc(DM_BUFIO_CACHE(c
), gfp_mask
);
397 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT
&&
398 gfp_mask
& __GFP_NORETRY
) {
399 *data_mode
= DATA_MODE_GET_FREE_PAGES
;
400 return (void *)__get_free_pages(gfp_mask
,
401 c
->pages_per_block_bits
);
404 *data_mode
= DATA_MODE_VMALLOC
;
407 * __vmalloc allocates the data pages and auxiliary structures with
408 * gfp_flags that were specified, but pagetables are always allocated
409 * with GFP_KERNEL, no matter what was specified as gfp_mask.
411 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
412 * all allocations done by this process (including pagetables) are done
413 * as if GFP_NOIO was specified.
416 if (gfp_mask
& __GFP_NORETRY
)
417 noio_flag
= memalloc_noio_save();
419 ptr
= __vmalloc(c
->block_size
, gfp_mask
| __GFP_HIGHMEM
, PAGE_KERNEL
);
421 if (gfp_mask
& __GFP_NORETRY
)
422 memalloc_noio_restore(noio_flag
);
428 * Free buffer's data.
430 static void free_buffer_data(struct dm_bufio_client
*c
,
431 void *data
, enum data_mode data_mode
)
435 kmem_cache_free(DM_BUFIO_CACHE(c
), data
);
438 case DATA_MODE_GET_FREE_PAGES
:
439 free_pages((unsigned long)data
, c
->pages_per_block_bits
);
442 case DATA_MODE_VMALLOC
:
447 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
454 * Allocate buffer and its data.
456 static struct dm_buffer
*alloc_buffer(struct dm_bufio_client
*c
, gfp_t gfp_mask
)
458 struct dm_buffer
*b
= kmalloc(sizeof(struct dm_buffer
) + c
->aux_size
,
466 b
->data
= alloc_buffer_data(c
, gfp_mask
, &b
->data_mode
);
472 adjust_total_allocated(b
->data_mode
, (long)c
->block_size
);
474 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
475 memset(&b
->stack_trace
, 0, sizeof(b
->stack_trace
));
481 * Free buffer and its data.
483 static void free_buffer(struct dm_buffer
*b
)
485 struct dm_bufio_client
*c
= b
->c
;
487 adjust_total_allocated(b
->data_mode
, -(long)c
->block_size
);
489 free_buffer_data(c
, b
->data
, b
->data_mode
);
494 * Link buffer to the hash list and clean or dirty queue.
496 static void __link_buffer(struct dm_buffer
*b
, sector_t block
, int dirty
)
498 struct dm_bufio_client
*c
= b
->c
;
500 c
->n_buffers
[dirty
]++;
502 b
->list_mode
= dirty
;
503 list_add(&b
->lru_list
, &c
->lru
[dirty
]);
505 b
->last_accessed
= jiffies
;
509 * Unlink buffer from the hash list and dirty or clean queue.
511 static void __unlink_buffer(struct dm_buffer
*b
)
513 struct dm_bufio_client
*c
= b
->c
;
515 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
517 c
->n_buffers
[b
->list_mode
]--;
519 list_del(&b
->lru_list
);
523 * Place the buffer to the head of dirty or clean LRU queue.
525 static void __relink_lru(struct dm_buffer
*b
, int dirty
)
527 struct dm_bufio_client
*c
= b
->c
;
529 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
531 c
->n_buffers
[b
->list_mode
]--;
532 c
->n_buffers
[dirty
]++;
533 b
->list_mode
= dirty
;
534 list_move(&b
->lru_list
, &c
->lru
[dirty
]);
535 b
->last_accessed
= jiffies
;
538 /*----------------------------------------------------------------
539 * Submit I/O on the buffer.
541 * Bio interface is faster but it has some problems:
542 * the vector list is limited (increasing this limit increases
543 * memory-consumption per buffer, so it is not viable);
545 * the memory must be direct-mapped, not vmalloced;
547 * the I/O driver can reject requests spuriously if it thinks that
548 * the requests are too big for the device or if they cross a
549 * controller-defined memory boundary.
551 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
552 * it is not vmalloced, try using the bio interface.
554 * If the buffer is big, if it is vmalloced or if the underlying device
555 * rejects the bio because it is too large, use dm-io layer to do the I/O.
556 * The dm-io layer splits the I/O into multiple requests, avoiding the above
558 *--------------------------------------------------------------*/
561 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
562 * that the request was handled directly with bio interface.
564 static void dmio_complete(unsigned long error
, void *context
)
566 struct dm_buffer
*b
= context
;
568 b
->bio
.bi_error
= error
? -EIO
: 0;
569 b
->bio
.bi_end_io(&b
->bio
);
572 static void use_dmio(struct dm_buffer
*b
, int rw
, sector_t block
,
573 bio_end_io_t
*end_io
)
576 struct dm_io_request io_req
= {
578 .notify
.fn
= dmio_complete
,
580 .client
= b
->c
->dm_io
,
582 struct dm_io_region region
= {
584 .sector
= block
<< b
->c
->sectors_per_block_bits
,
585 .count
= b
->c
->block_size
>> SECTOR_SHIFT
,
588 if (b
->data_mode
!= DATA_MODE_VMALLOC
) {
589 io_req
.mem
.type
= DM_IO_KMEM
;
590 io_req
.mem
.ptr
.addr
= b
->data
;
592 io_req
.mem
.type
= DM_IO_VMA
;
593 io_req
.mem
.ptr
.vma
= b
->data
;
596 b
->bio
.bi_end_io
= end_io
;
598 r
= dm_io(&io_req
, 1, ®ion
, NULL
);
605 static void inline_endio(struct bio
*bio
)
607 bio_end_io_t
*end_fn
= bio
->bi_private
;
608 int error
= bio
->bi_error
;
611 * Reset the bio to free any attached resources
612 * (e.g. bio integrity profiles).
616 bio
->bi_error
= error
;
620 static void use_inline_bio(struct dm_buffer
*b
, int rw
, sector_t block
,
621 bio_end_io_t
*end_io
)
627 b
->bio
.bi_io_vec
= b
->bio_vec
;
628 b
->bio
.bi_max_vecs
= DM_BUFIO_INLINE_VECS
;
629 b
->bio
.bi_iter
.bi_sector
= block
<< b
->c
->sectors_per_block_bits
;
630 b
->bio
.bi_bdev
= b
->c
->bdev
;
631 b
->bio
.bi_end_io
= inline_endio
;
633 * Use of .bi_private isn't a problem here because
634 * the dm_buffer's inline bio is local to bufio.
636 b
->bio
.bi_private
= end_io
;
640 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
641 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
644 len
= b
->c
->block_size
;
646 if (len
>= PAGE_SIZE
)
647 BUG_ON((unsigned long)ptr
& (PAGE_SIZE
- 1));
649 BUG_ON((unsigned long)ptr
& (len
- 1));
652 if (!bio_add_page(&b
->bio
, virt_to_page(ptr
),
653 len
< PAGE_SIZE
? len
: PAGE_SIZE
,
654 offset_in_page(ptr
))) {
655 BUG_ON(b
->c
->block_size
<= PAGE_SIZE
);
656 use_dmio(b
, rw
, block
, end_io
);
667 static void submit_io(struct dm_buffer
*b
, int rw
, sector_t block
,
668 bio_end_io_t
*end_io
)
670 if (rw
== WRITE
&& b
->c
->write_callback
)
671 b
->c
->write_callback(b
);
673 if (b
->c
->block_size
<= DM_BUFIO_INLINE_VECS
* PAGE_SIZE
&&
674 b
->data_mode
!= DATA_MODE_VMALLOC
)
675 use_inline_bio(b
, rw
, block
, end_io
);
677 use_dmio(b
, rw
, block
, end_io
);
680 /*----------------------------------------------------------------
681 * Writing dirty buffers
682 *--------------------------------------------------------------*/
685 * The endio routine for write.
687 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
690 static void write_endio(struct bio
*bio
)
692 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
694 b
->write_error
= bio
->bi_error
;
695 if (unlikely(bio
->bi_error
)) {
696 struct dm_bufio_client
*c
= b
->c
;
697 int error
= bio
->bi_error
;
698 (void)cmpxchg(&c
->async_write_error
, 0, error
);
701 BUG_ON(!test_bit(B_WRITING
, &b
->state
));
703 smp_mb__before_atomic();
704 clear_bit(B_WRITING
, &b
->state
);
705 smp_mb__after_atomic();
707 wake_up_bit(&b
->state
, B_WRITING
);
711 * Initiate a write on a dirty buffer, but don't wait for it.
713 * - If the buffer is not dirty, exit.
714 * - If there some previous write going on, wait for it to finish (we can't
715 * have two writes on the same buffer simultaneously).
716 * - Submit our write and don't wait on it. We set B_WRITING indicating
717 * that there is a write in progress.
719 static void __write_dirty_buffer(struct dm_buffer
*b
,
720 struct list_head
*write_list
)
722 if (!test_bit(B_DIRTY
, &b
->state
))
725 clear_bit(B_DIRTY
, &b
->state
);
726 wait_on_bit_lock_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
729 submit_io(b
, WRITE
, b
->block
, write_endio
);
731 list_add_tail(&b
->write_list
, write_list
);
734 static void __flush_write_list(struct list_head
*write_list
)
736 struct blk_plug plug
;
737 blk_start_plug(&plug
);
738 while (!list_empty(write_list
)) {
739 struct dm_buffer
*b
=
740 list_entry(write_list
->next
, struct dm_buffer
, write_list
);
741 list_del(&b
->write_list
);
742 submit_io(b
, WRITE
, b
->block
, write_endio
);
743 dm_bufio_cond_resched();
745 blk_finish_plug(&plug
);
749 * Wait until any activity on the buffer finishes. Possibly write the
750 * buffer if it is dirty. When this function finishes, there is no I/O
751 * running on the buffer and the buffer is not dirty.
753 static void __make_buffer_clean(struct dm_buffer
*b
)
755 BUG_ON(b
->hold_count
);
757 if (!b
->state
) /* fast case */
760 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
761 __write_dirty_buffer(b
, NULL
);
762 wait_on_bit_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
766 * Find some buffer that is not held by anybody, clean it, unlink it and
769 static struct dm_buffer
*__get_unclaimed_buffer(struct dm_bufio_client
*c
)
773 list_for_each_entry_reverse(b
, &c
->lru
[LIST_CLEAN
], lru_list
) {
774 BUG_ON(test_bit(B_WRITING
, &b
->state
));
775 BUG_ON(test_bit(B_DIRTY
, &b
->state
));
777 if (!b
->hold_count
) {
778 __make_buffer_clean(b
);
782 dm_bufio_cond_resched();
785 list_for_each_entry_reverse(b
, &c
->lru
[LIST_DIRTY
], lru_list
) {
786 BUG_ON(test_bit(B_READING
, &b
->state
));
788 if (!b
->hold_count
) {
789 __make_buffer_clean(b
);
793 dm_bufio_cond_resched();
800 * Wait until some other threads free some buffer or release hold count on
803 * This function is entered with c->lock held, drops it and regains it
806 static void __wait_for_free_buffer(struct dm_bufio_client
*c
)
808 DECLARE_WAITQUEUE(wait
, current
);
810 add_wait_queue(&c
->free_buffer_wait
, &wait
);
811 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
816 remove_wait_queue(&c
->free_buffer_wait
, &wait
);
829 * Allocate a new buffer. If the allocation is not possible, wait until
830 * some other thread frees a buffer.
832 * May drop the lock and regain it.
834 static struct dm_buffer
*__alloc_buffer_wait_no_callback(struct dm_bufio_client
*c
, enum new_flag nf
)
839 * dm-bufio is resistant to allocation failures (it just keeps
840 * one buffer reserved in cases all the allocations fail).
841 * So set flags to not try too hard:
842 * GFP_NOIO: don't recurse into the I/O layer
843 * __GFP_NORETRY: don't retry and rather return failure
844 * __GFP_NOMEMALLOC: don't use emergency reserves
845 * __GFP_NOWARN: don't print a warning in case of failure
847 * For debugging, if we set the cache size to 1, no new buffers will
851 if (dm_bufio_cache_size_latch
!= 1) {
852 b
= alloc_buffer(c
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
857 if (nf
== NF_PREFETCH
)
860 if (!list_empty(&c
->reserved_buffers
)) {
861 b
= list_entry(c
->reserved_buffers
.next
,
862 struct dm_buffer
, lru_list
);
863 list_del(&b
->lru_list
);
864 c
->need_reserved_buffers
++;
869 b
= __get_unclaimed_buffer(c
);
873 __wait_for_free_buffer(c
);
877 static struct dm_buffer
*__alloc_buffer_wait(struct dm_bufio_client
*c
, enum new_flag nf
)
879 struct dm_buffer
*b
= __alloc_buffer_wait_no_callback(c
, nf
);
884 if (c
->alloc_callback
)
885 c
->alloc_callback(b
);
891 * Free a buffer and wake other threads waiting for free buffers.
893 static void __free_buffer_wake(struct dm_buffer
*b
)
895 struct dm_bufio_client
*c
= b
->c
;
897 if (!c
->need_reserved_buffers
)
900 list_add(&b
->lru_list
, &c
->reserved_buffers
);
901 c
->need_reserved_buffers
--;
904 wake_up(&c
->free_buffer_wait
);
907 static void __write_dirty_buffers_async(struct dm_bufio_client
*c
, int no_wait
,
908 struct list_head
*write_list
)
910 struct dm_buffer
*b
, *tmp
;
912 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
913 BUG_ON(test_bit(B_READING
, &b
->state
));
915 if (!test_bit(B_DIRTY
, &b
->state
) &&
916 !test_bit(B_WRITING
, &b
->state
)) {
917 __relink_lru(b
, LIST_CLEAN
);
921 if (no_wait
&& test_bit(B_WRITING
, &b
->state
))
924 __write_dirty_buffer(b
, write_list
);
925 dm_bufio_cond_resched();
930 * Get writeback threshold and buffer limit for a given client.
932 static void __get_memory_limit(struct dm_bufio_client
*c
,
933 unsigned long *threshold_buffers
,
934 unsigned long *limit_buffers
)
936 unsigned long buffers
;
938 if (ACCESS_ONCE(dm_bufio_cache_size
) != dm_bufio_cache_size_latch
) {
939 mutex_lock(&dm_bufio_clients_lock
);
940 __cache_size_refresh();
941 mutex_unlock(&dm_bufio_clients_lock
);
944 buffers
= dm_bufio_cache_size_per_client
>>
945 (c
->sectors_per_block_bits
+ SECTOR_SHIFT
);
947 if (buffers
< c
->minimum_buffers
)
948 buffers
= c
->minimum_buffers
;
950 *limit_buffers
= buffers
;
951 *threshold_buffers
= buffers
* DM_BUFIO_WRITEBACK_PERCENT
/ 100;
955 * Check if we're over watermark.
956 * If we are over threshold_buffers, start freeing buffers.
957 * If we're over "limit_buffers", block until we get under the limit.
959 static void __check_watermark(struct dm_bufio_client
*c
,
960 struct list_head
*write_list
)
962 unsigned long threshold_buffers
, limit_buffers
;
964 __get_memory_limit(c
, &threshold_buffers
, &limit_buffers
);
966 while (c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
] >
969 struct dm_buffer
*b
= __get_unclaimed_buffer(c
);
974 __free_buffer_wake(b
);
975 dm_bufio_cond_resched();
978 if (c
->n_buffers
[LIST_DIRTY
] > threshold_buffers
)
979 __write_dirty_buffers_async(c
, 1, write_list
);
982 /*----------------------------------------------------------------
984 *--------------------------------------------------------------*/
986 static struct dm_buffer
*__bufio_new(struct dm_bufio_client
*c
, sector_t block
,
987 enum new_flag nf
, int *need_submit
,
988 struct list_head
*write_list
)
990 struct dm_buffer
*b
, *new_b
= NULL
;
994 b
= __find(c
, block
);
1001 new_b
= __alloc_buffer_wait(c
, nf
);
1006 * We've had a period where the mutex was unlocked, so need to
1007 * recheck the hash table.
1009 b
= __find(c
, block
);
1011 __free_buffer_wake(new_b
);
1015 __check_watermark(c
, write_list
);
1021 __link_buffer(b
, block
, LIST_CLEAN
);
1023 if (nf
== NF_FRESH
) {
1028 b
->state
= 1 << B_READING
;
1034 if (nf
== NF_PREFETCH
)
1037 * Note: it is essential that we don't wait for the buffer to be
1038 * read if dm_bufio_get function is used. Both dm_bufio_get and
1039 * dm_bufio_prefetch can be used in the driver request routine.
1040 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1041 * the same buffer, it would deadlock if we waited.
1043 if (nf
== NF_GET
&& unlikely(test_bit(B_READING
, &b
->state
)))
1047 __relink_lru(b
, test_bit(B_DIRTY
, &b
->state
) ||
1048 test_bit(B_WRITING
, &b
->state
));
1053 * The endio routine for reading: set the error, clear the bit and wake up
1054 * anyone waiting on the buffer.
1056 static void read_endio(struct bio
*bio
)
1058 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
1060 b
->read_error
= bio
->bi_error
;
1062 BUG_ON(!test_bit(B_READING
, &b
->state
));
1064 smp_mb__before_atomic();
1065 clear_bit(B_READING
, &b
->state
);
1066 smp_mb__after_atomic();
1068 wake_up_bit(&b
->state
, B_READING
);
1072 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1073 * functions is similar except that dm_bufio_new doesn't read the
1074 * buffer from the disk (assuming that the caller overwrites all the data
1075 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1077 static void *new_read(struct dm_bufio_client
*c
, sector_t block
,
1078 enum new_flag nf
, struct dm_buffer
**bp
)
1081 struct dm_buffer
*b
;
1083 LIST_HEAD(write_list
);
1086 b
= __bufio_new(c
, block
, nf
, &need_submit
, &write_list
);
1087 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1088 if (b
&& b
->hold_count
== 1)
1089 buffer_record_stack(b
);
1093 __flush_write_list(&write_list
);
1099 submit_io(b
, READ
, b
->block
, read_endio
);
1101 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
1103 if (b
->read_error
) {
1104 int error
= b
->read_error
;
1106 dm_bufio_release(b
);
1108 return ERR_PTR(error
);
1116 void *dm_bufio_get(struct dm_bufio_client
*c
, sector_t block
,
1117 struct dm_buffer
**bp
)
1119 return new_read(c
, block
, NF_GET
, bp
);
1121 EXPORT_SYMBOL_GPL(dm_bufio_get
);
1123 void *dm_bufio_read(struct dm_bufio_client
*c
, sector_t block
,
1124 struct dm_buffer
**bp
)
1126 BUG_ON(dm_bufio_in_request());
1128 return new_read(c
, block
, NF_READ
, bp
);
1130 EXPORT_SYMBOL_GPL(dm_bufio_read
);
1132 void *dm_bufio_new(struct dm_bufio_client
*c
, sector_t block
,
1133 struct dm_buffer
**bp
)
1135 BUG_ON(dm_bufio_in_request());
1137 return new_read(c
, block
, NF_FRESH
, bp
);
1139 EXPORT_SYMBOL_GPL(dm_bufio_new
);
1141 void dm_bufio_prefetch(struct dm_bufio_client
*c
,
1142 sector_t block
, unsigned n_blocks
)
1144 struct blk_plug plug
;
1146 LIST_HEAD(write_list
);
1148 BUG_ON(dm_bufio_in_request());
1150 blk_start_plug(&plug
);
1153 for (; n_blocks
--; block
++) {
1155 struct dm_buffer
*b
;
1156 b
= __bufio_new(c
, block
, NF_PREFETCH
, &need_submit
,
1158 if (unlikely(!list_empty(&write_list
))) {
1160 blk_finish_plug(&plug
);
1161 __flush_write_list(&write_list
);
1162 blk_start_plug(&plug
);
1165 if (unlikely(b
!= NULL
)) {
1169 submit_io(b
, READ
, b
->block
, read_endio
);
1170 dm_bufio_release(b
);
1172 dm_bufio_cond_resched();
1183 blk_finish_plug(&plug
);
1185 EXPORT_SYMBOL_GPL(dm_bufio_prefetch
);
1187 void dm_bufio_release(struct dm_buffer
*b
)
1189 struct dm_bufio_client
*c
= b
->c
;
1193 BUG_ON(!b
->hold_count
);
1196 if (!b
->hold_count
) {
1197 wake_up(&c
->free_buffer_wait
);
1200 * If there were errors on the buffer, and the buffer is not
1201 * to be written, free the buffer. There is no point in caching
1204 if ((b
->read_error
|| b
->write_error
) &&
1205 !test_bit(B_READING
, &b
->state
) &&
1206 !test_bit(B_WRITING
, &b
->state
) &&
1207 !test_bit(B_DIRTY
, &b
->state
)) {
1209 __free_buffer_wake(b
);
1215 EXPORT_SYMBOL_GPL(dm_bufio_release
);
1217 void dm_bufio_mark_buffer_dirty(struct dm_buffer
*b
)
1219 struct dm_bufio_client
*c
= b
->c
;
1223 BUG_ON(test_bit(B_READING
, &b
->state
));
1225 if (!test_and_set_bit(B_DIRTY
, &b
->state
))
1226 __relink_lru(b
, LIST_DIRTY
);
1230 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty
);
1232 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client
*c
)
1234 LIST_HEAD(write_list
);
1236 BUG_ON(dm_bufio_in_request());
1239 __write_dirty_buffers_async(c
, 0, &write_list
);
1241 __flush_write_list(&write_list
);
1243 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async
);
1246 * For performance, it is essential that the buffers are written asynchronously
1247 * and simultaneously (so that the block layer can merge the writes) and then
1250 * Finally, we flush hardware disk cache.
1252 int dm_bufio_write_dirty_buffers(struct dm_bufio_client
*c
)
1255 unsigned long buffers_processed
= 0;
1256 struct dm_buffer
*b
, *tmp
;
1258 LIST_HEAD(write_list
);
1261 __write_dirty_buffers_async(c
, 0, &write_list
);
1263 __flush_write_list(&write_list
);
1267 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
1268 int dropped_lock
= 0;
1270 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
])
1271 buffers_processed
++;
1273 BUG_ON(test_bit(B_READING
, &b
->state
));
1275 if (test_bit(B_WRITING
, &b
->state
)) {
1276 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
]) {
1280 wait_on_bit_io(&b
->state
, B_WRITING
,
1281 TASK_UNINTERRUPTIBLE
);
1285 wait_on_bit_io(&b
->state
, B_WRITING
,
1286 TASK_UNINTERRUPTIBLE
);
1289 if (!test_bit(B_DIRTY
, &b
->state
) &&
1290 !test_bit(B_WRITING
, &b
->state
))
1291 __relink_lru(b
, LIST_CLEAN
);
1293 dm_bufio_cond_resched();
1296 * If we dropped the lock, the list is no longer consistent,
1297 * so we must restart the search.
1299 * In the most common case, the buffer just processed is
1300 * relinked to the clean list, so we won't loop scanning the
1301 * same buffer again and again.
1303 * This may livelock if there is another thread simultaneously
1304 * dirtying buffers, so we count the number of buffers walked
1305 * and if it exceeds the total number of buffers, it means that
1306 * someone is doing some writes simultaneously with us. In
1307 * this case, stop, dropping the lock.
1312 wake_up(&c
->free_buffer_wait
);
1315 a
= xchg(&c
->async_write_error
, 0);
1316 f
= dm_bufio_issue_flush(c
);
1322 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers
);
1325 * Use dm-io to send and empty barrier flush the device.
1327 int dm_bufio_issue_flush(struct dm_bufio_client
*c
)
1329 struct dm_io_request io_req
= {
1330 .bi_rw
= WRITE_FLUSH
,
1331 .mem
.type
= DM_IO_KMEM
,
1332 .mem
.ptr
.addr
= NULL
,
1335 struct dm_io_region io_reg
= {
1341 BUG_ON(dm_bufio_in_request());
1343 return dm_io(&io_req
, 1, &io_reg
, NULL
);
1345 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush
);
1348 * We first delete any other buffer that may be at that new location.
1350 * Then, we write the buffer to the original location if it was dirty.
1352 * Then, if we are the only one who is holding the buffer, relink the buffer
1353 * in the hash queue for the new location.
1355 * If there was someone else holding the buffer, we write it to the new
1356 * location but not relink it, because that other user needs to have the buffer
1357 * at the same place.
1359 void dm_bufio_release_move(struct dm_buffer
*b
, sector_t new_block
)
1361 struct dm_bufio_client
*c
= b
->c
;
1362 struct dm_buffer
*new;
1364 BUG_ON(dm_bufio_in_request());
1369 new = __find(c
, new_block
);
1371 if (new->hold_count
) {
1372 __wait_for_free_buffer(c
);
1377 * FIXME: Is there any point waiting for a write that's going
1378 * to be overwritten in a bit?
1380 __make_buffer_clean(new);
1381 __unlink_buffer(new);
1382 __free_buffer_wake(new);
1385 BUG_ON(!b
->hold_count
);
1386 BUG_ON(test_bit(B_READING
, &b
->state
));
1388 __write_dirty_buffer(b
, NULL
);
1389 if (b
->hold_count
== 1) {
1390 wait_on_bit_io(&b
->state
, B_WRITING
,
1391 TASK_UNINTERRUPTIBLE
);
1392 set_bit(B_DIRTY
, &b
->state
);
1394 __link_buffer(b
, new_block
, LIST_DIRTY
);
1397 wait_on_bit_lock_io(&b
->state
, B_WRITING
,
1398 TASK_UNINTERRUPTIBLE
);
1400 * Relink buffer to "new_block" so that write_callback
1401 * sees "new_block" as a block number.
1402 * After the write, link the buffer back to old_block.
1403 * All this must be done in bufio lock, so that block number
1404 * change isn't visible to other threads.
1406 old_block
= b
->block
;
1408 __link_buffer(b
, new_block
, b
->list_mode
);
1409 submit_io(b
, WRITE
, new_block
, write_endio
);
1410 wait_on_bit_io(&b
->state
, B_WRITING
,
1411 TASK_UNINTERRUPTIBLE
);
1413 __link_buffer(b
, old_block
, b
->list_mode
);
1417 dm_bufio_release(b
);
1419 EXPORT_SYMBOL_GPL(dm_bufio_release_move
);
1422 * Free the given buffer.
1424 * This is just a hint, if the buffer is in use or dirty, this function
1427 void dm_bufio_forget(struct dm_bufio_client
*c
, sector_t block
)
1429 struct dm_buffer
*b
;
1433 b
= __find(c
, block
);
1434 if (b
&& likely(!b
->hold_count
) && likely(!b
->state
)) {
1436 __free_buffer_wake(b
);
1441 EXPORT_SYMBOL(dm_bufio_forget
);
1443 void dm_bufio_set_minimum_buffers(struct dm_bufio_client
*c
, unsigned n
)
1445 c
->minimum_buffers
= n
;
1447 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers
);
1449 unsigned dm_bufio_get_block_size(struct dm_bufio_client
*c
)
1451 return c
->block_size
;
1453 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size
);
1455 sector_t
dm_bufio_get_device_size(struct dm_bufio_client
*c
)
1457 return i_size_read(c
->bdev
->bd_inode
) >>
1458 (SECTOR_SHIFT
+ c
->sectors_per_block_bits
);
1460 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size
);
1462 sector_t
dm_bufio_get_block_number(struct dm_buffer
*b
)
1466 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number
);
1468 void *dm_bufio_get_block_data(struct dm_buffer
*b
)
1472 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data
);
1474 void *dm_bufio_get_aux_data(struct dm_buffer
*b
)
1478 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data
);
1480 struct dm_bufio_client
*dm_bufio_get_client(struct dm_buffer
*b
)
1484 EXPORT_SYMBOL_GPL(dm_bufio_get_client
);
1486 static void drop_buffers(struct dm_bufio_client
*c
)
1488 struct dm_buffer
*b
;
1490 bool warned
= false;
1492 BUG_ON(dm_bufio_in_request());
1495 * An optimization so that the buffers are not written one-by-one.
1497 dm_bufio_write_dirty_buffers_async(c
);
1501 while ((b
= __get_unclaimed_buffer(c
)))
1502 __free_buffer_wake(b
);
1504 for (i
= 0; i
< LIST_SIZE
; i
++)
1505 list_for_each_entry(b
, &c
->lru
[i
], lru_list
) {
1508 DMERR("leaked buffer %llx, hold count %u, list %d",
1509 (unsigned long long)b
->block
, b
->hold_count
, i
);
1510 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1511 print_stack_trace(&b
->stack_trace
, 1);
1512 b
->hold_count
= 0; /* mark unclaimed to avoid BUG_ON below */
1516 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1517 while ((b
= __get_unclaimed_buffer(c
)))
1518 __free_buffer_wake(b
);
1521 for (i
= 0; i
< LIST_SIZE
; i
++)
1522 BUG_ON(!list_empty(&c
->lru
[i
]));
1528 * We may not be able to evict this buffer if IO pending or the client
1529 * is still using it. Caller is expected to know buffer is too old.
1531 * And if GFP_NOFS is used, we must not do any I/O because we hold
1532 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1533 * rerouted to different bufio client.
1535 static bool __try_evict_buffer(struct dm_buffer
*b
, gfp_t gfp
)
1537 if (!(gfp
& __GFP_FS
)) {
1538 if (test_bit(B_READING
, &b
->state
) ||
1539 test_bit(B_WRITING
, &b
->state
) ||
1540 test_bit(B_DIRTY
, &b
->state
))
1547 __make_buffer_clean(b
);
1549 __free_buffer_wake(b
);
1554 static unsigned get_retain_buffers(struct dm_bufio_client
*c
)
1556 unsigned retain_bytes
= ACCESS_ONCE(dm_bufio_retain_bytes
);
1557 return retain_bytes
/ c
->block_size
;
1560 static unsigned long __scan(struct dm_bufio_client
*c
, unsigned long nr_to_scan
,
1564 struct dm_buffer
*b
, *tmp
;
1565 unsigned long freed
= 0;
1566 unsigned long count
= nr_to_scan
;
1567 unsigned retain_target
= get_retain_buffers(c
);
1569 for (l
= 0; l
< LIST_SIZE
; l
++) {
1570 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[l
], lru_list
) {
1571 if (__try_evict_buffer(b
, gfp_mask
))
1573 if (!--nr_to_scan
|| ((count
- freed
) <= retain_target
))
1575 dm_bufio_cond_resched();
1581 static unsigned long
1582 dm_bufio_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1584 struct dm_bufio_client
*c
;
1585 unsigned long freed
;
1587 c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1588 if (sc
->gfp_mask
& __GFP_FS
)
1590 else if (!dm_bufio_trylock(c
))
1593 freed
= __scan(c
, sc
->nr_to_scan
, sc
->gfp_mask
);
1598 static unsigned long
1599 dm_bufio_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1601 struct dm_bufio_client
*c
;
1602 unsigned long count
;
1604 c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1605 if (sc
->gfp_mask
& __GFP_FS
)
1607 else if (!dm_bufio_trylock(c
))
1610 count
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1616 * Create the buffering interface
1618 struct dm_bufio_client
*dm_bufio_client_create(struct block_device
*bdev
, unsigned block_size
,
1619 unsigned reserved_buffers
, unsigned aux_size
,
1620 void (*alloc_callback
)(struct dm_buffer
*),
1621 void (*write_callback
)(struct dm_buffer
*))
1624 struct dm_bufio_client
*c
;
1627 BUG_ON(block_size
< 1 << SECTOR_SHIFT
||
1628 (block_size
& (block_size
- 1)));
1630 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1635 c
->buffer_tree
= RB_ROOT
;
1638 c
->block_size
= block_size
;
1639 c
->sectors_per_block_bits
= __ffs(block_size
) - SECTOR_SHIFT
;
1640 c
->pages_per_block_bits
= (__ffs(block_size
) >= PAGE_SHIFT
) ?
1641 __ffs(block_size
) - PAGE_SHIFT
: 0;
1642 c
->blocks_per_page_bits
= (__ffs(block_size
) < PAGE_SHIFT
?
1643 PAGE_SHIFT
- __ffs(block_size
) : 0);
1645 c
->aux_size
= aux_size
;
1646 c
->alloc_callback
= alloc_callback
;
1647 c
->write_callback
= write_callback
;
1649 for (i
= 0; i
< LIST_SIZE
; i
++) {
1650 INIT_LIST_HEAD(&c
->lru
[i
]);
1651 c
->n_buffers
[i
] = 0;
1654 mutex_init(&c
->lock
);
1655 INIT_LIST_HEAD(&c
->reserved_buffers
);
1656 c
->need_reserved_buffers
= reserved_buffers
;
1658 c
->minimum_buffers
= DM_BUFIO_MIN_BUFFERS
;
1660 init_waitqueue_head(&c
->free_buffer_wait
);
1661 c
->async_write_error
= 0;
1663 c
->dm_io
= dm_io_client_create();
1664 if (IS_ERR(c
->dm_io
)) {
1665 r
= PTR_ERR(c
->dm_io
);
1669 mutex_lock(&dm_bufio_clients_lock
);
1670 if (c
->blocks_per_page_bits
) {
1671 if (!DM_BUFIO_CACHE_NAME(c
)) {
1672 DM_BUFIO_CACHE_NAME(c
) = kasprintf(GFP_KERNEL
, "dm_bufio_cache-%u", c
->block_size
);
1673 if (!DM_BUFIO_CACHE_NAME(c
)) {
1675 mutex_unlock(&dm_bufio_clients_lock
);
1680 if (!DM_BUFIO_CACHE(c
)) {
1681 DM_BUFIO_CACHE(c
) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c
),
1683 c
->block_size
, 0, NULL
);
1684 if (!DM_BUFIO_CACHE(c
)) {
1686 mutex_unlock(&dm_bufio_clients_lock
);
1691 mutex_unlock(&dm_bufio_clients_lock
);
1693 while (c
->need_reserved_buffers
) {
1694 struct dm_buffer
*b
= alloc_buffer(c
, GFP_KERNEL
);
1700 __free_buffer_wake(b
);
1703 mutex_lock(&dm_bufio_clients_lock
);
1704 dm_bufio_client_count
++;
1705 list_add(&c
->client_list
, &dm_bufio_all_clients
);
1706 __cache_size_refresh();
1707 mutex_unlock(&dm_bufio_clients_lock
);
1709 c
->shrinker
.count_objects
= dm_bufio_shrink_count
;
1710 c
->shrinker
.scan_objects
= dm_bufio_shrink_scan
;
1711 c
->shrinker
.seeks
= 1;
1712 c
->shrinker
.batch
= 0;
1713 register_shrinker(&c
->shrinker
);
1719 while (!list_empty(&c
->reserved_buffers
)) {
1720 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1721 struct dm_buffer
, lru_list
);
1722 list_del(&b
->lru_list
);
1725 dm_io_client_destroy(c
->dm_io
);
1731 EXPORT_SYMBOL_GPL(dm_bufio_client_create
);
1734 * Free the buffering interface.
1735 * It is required that there are no references on any buffers.
1737 void dm_bufio_client_destroy(struct dm_bufio_client
*c
)
1743 unregister_shrinker(&c
->shrinker
);
1745 mutex_lock(&dm_bufio_clients_lock
);
1747 list_del(&c
->client_list
);
1748 dm_bufio_client_count
--;
1749 __cache_size_refresh();
1751 mutex_unlock(&dm_bufio_clients_lock
);
1753 BUG_ON(!RB_EMPTY_ROOT(&c
->buffer_tree
));
1754 BUG_ON(c
->need_reserved_buffers
);
1756 while (!list_empty(&c
->reserved_buffers
)) {
1757 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1758 struct dm_buffer
, lru_list
);
1759 list_del(&b
->lru_list
);
1763 for (i
= 0; i
< LIST_SIZE
; i
++)
1764 if (c
->n_buffers
[i
])
1765 DMERR("leaked buffer count %d: %ld", i
, c
->n_buffers
[i
]);
1767 for (i
= 0; i
< LIST_SIZE
; i
++)
1768 BUG_ON(c
->n_buffers
[i
]);
1770 dm_io_client_destroy(c
->dm_io
);
1773 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy
);
1775 static unsigned get_max_age_hz(void)
1777 unsigned max_age
= ACCESS_ONCE(dm_bufio_max_age
);
1779 if (max_age
> UINT_MAX
/ HZ
)
1780 max_age
= UINT_MAX
/ HZ
;
1782 return max_age
* HZ
;
1785 static bool older_than(struct dm_buffer
*b
, unsigned long age_hz
)
1787 return time_after_eq(jiffies
, b
->last_accessed
+ age_hz
);
1790 static void __evict_old_buffers(struct dm_bufio_client
*c
, unsigned long age_hz
)
1792 struct dm_buffer
*b
, *tmp
;
1793 unsigned retain_target
= get_retain_buffers(c
);
1798 count
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1799 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_CLEAN
], lru_list
) {
1800 if (count
<= retain_target
)
1803 if (!older_than(b
, age_hz
))
1806 if (__try_evict_buffer(b
, 0))
1809 dm_bufio_cond_resched();
1815 static void cleanup_old_buffers(void)
1817 unsigned long max_age_hz
= get_max_age_hz();
1818 struct dm_bufio_client
*c
;
1820 mutex_lock(&dm_bufio_clients_lock
);
1822 list_for_each_entry(c
, &dm_bufio_all_clients
, client_list
)
1823 __evict_old_buffers(c
, max_age_hz
);
1825 mutex_unlock(&dm_bufio_clients_lock
);
1828 static struct workqueue_struct
*dm_bufio_wq
;
1829 static struct delayed_work dm_bufio_work
;
1831 static void work_fn(struct work_struct
*w
)
1833 cleanup_old_buffers();
1835 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1836 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1839 /*----------------------------------------------------------------
1841 *--------------------------------------------------------------*/
1844 * This is called only once for the whole dm_bufio module.
1845 * It initializes memory limit.
1847 static int __init
dm_bufio_init(void)
1851 dm_bufio_allocated_kmem_cache
= 0;
1852 dm_bufio_allocated_get_free_pages
= 0;
1853 dm_bufio_allocated_vmalloc
= 0;
1854 dm_bufio_current_allocated
= 0;
1856 memset(&dm_bufio_caches
, 0, sizeof dm_bufio_caches
);
1857 memset(&dm_bufio_cache_names
, 0, sizeof dm_bufio_cache_names
);
1859 mem
= (__u64
)((totalram_pages
- totalhigh_pages
) *
1860 DM_BUFIO_MEMORY_PERCENT
/ 100) << PAGE_SHIFT
;
1862 if (mem
> ULONG_MAX
)
1867 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1868 * in fs/proc/internal.h
1870 if (mem
> (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100)
1871 mem
= (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100;
1874 dm_bufio_default_cache_size
= mem
;
1876 mutex_lock(&dm_bufio_clients_lock
);
1877 __cache_size_refresh();
1878 mutex_unlock(&dm_bufio_clients_lock
);
1880 dm_bufio_wq
= create_singlethread_workqueue("dm_bufio_cache");
1884 INIT_DELAYED_WORK(&dm_bufio_work
, work_fn
);
1885 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1886 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1892 * This is called once when unloading the dm_bufio module.
1894 static void __exit
dm_bufio_exit(void)
1899 cancel_delayed_work_sync(&dm_bufio_work
);
1900 destroy_workqueue(dm_bufio_wq
);
1902 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_caches
); i
++)
1903 kmem_cache_destroy(dm_bufio_caches
[i
]);
1905 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_cache_names
); i
++)
1906 kfree(dm_bufio_cache_names
[i
]);
1908 if (dm_bufio_client_count
) {
1909 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1910 __func__
, dm_bufio_client_count
);
1914 if (dm_bufio_current_allocated
) {
1915 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1916 __func__
, dm_bufio_current_allocated
);
1920 if (dm_bufio_allocated_get_free_pages
) {
1921 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1922 __func__
, dm_bufio_allocated_get_free_pages
);
1926 if (dm_bufio_allocated_vmalloc
) {
1927 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1928 __func__
, dm_bufio_allocated_vmalloc
);
1935 module_init(dm_bufio_init
)
1936 module_exit(dm_bufio_exit
)
1938 module_param_named(max_cache_size_bytes
, dm_bufio_cache_size
, ulong
, S_IRUGO
| S_IWUSR
);
1939 MODULE_PARM_DESC(max_cache_size_bytes
, "Size of metadata cache");
1941 module_param_named(max_age_seconds
, dm_bufio_max_age
, uint
, S_IRUGO
| S_IWUSR
);
1942 MODULE_PARM_DESC(max_age_seconds
, "Max age of a buffer in seconds");
1944 module_param_named(retain_bytes
, dm_bufio_retain_bytes
, uint
, S_IRUGO
| S_IWUSR
);
1945 MODULE_PARM_DESC(retain_bytes
, "Try to keep at least this many bytes cached in memory");
1947 module_param_named(peak_allocated_bytes
, dm_bufio_peak_allocated
, ulong
, S_IRUGO
| S_IWUSR
);
1948 MODULE_PARM_DESC(peak_allocated_bytes
, "Tracks the maximum allocated memory");
1950 module_param_named(allocated_kmem_cache_bytes
, dm_bufio_allocated_kmem_cache
, ulong
, S_IRUGO
);
1951 MODULE_PARM_DESC(allocated_kmem_cache_bytes
, "Memory allocated with kmem_cache_alloc");
1953 module_param_named(allocated_get_free_pages_bytes
, dm_bufio_allocated_get_free_pages
, ulong
, S_IRUGO
);
1954 MODULE_PARM_DESC(allocated_get_free_pages_bytes
, "Memory allocated with get_free_pages");
1956 module_param_named(allocated_vmalloc_bytes
, dm_bufio_allocated_vmalloc
, ulong
, S_IRUGO
);
1957 MODULE_PARM_DESC(allocated_vmalloc_bytes
, "Memory allocated with vmalloc");
1959 module_param_named(current_allocated_bytes
, dm_bufio_current_allocated
, ulong
, S_IRUGO
);
1960 MODULE_PARM_DESC(current_allocated_bytes
, "Memory currently used by the cache");
1962 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1963 MODULE_DESCRIPTION(DM_NAME
" buffered I/O library");
1964 MODULE_LICENSE("GPL");