2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <trace/events/writeback.h>
16 static atomic_long_t bdi_seq
= ATOMIC_LONG_INIT(0);
18 struct backing_dev_info default_backing_dev_info
= {
20 .ra_pages
= VM_MAX_READAHEAD
* 1024 / PAGE_CACHE_SIZE
,
22 .capabilities
= BDI_CAP_MAP_COPY
,
24 EXPORT_SYMBOL_GPL(default_backing_dev_info
);
26 struct backing_dev_info noop_backing_dev_info
= {
28 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
,
30 EXPORT_SYMBOL_GPL(noop_backing_dev_info
);
32 static struct class *bdi_class
;
35 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
36 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
39 DEFINE_SPINLOCK(bdi_lock
);
41 LIST_HEAD(bdi_pending_list
);
43 void bdi_lock_two(struct bdi_writeback
*wb1
, struct bdi_writeback
*wb2
)
46 spin_lock(&wb1
->list_lock
);
47 spin_lock_nested(&wb2
->list_lock
, 1);
49 spin_lock(&wb2
->list_lock
);
50 spin_lock_nested(&wb1
->list_lock
, 1);
54 #ifdef CONFIG_DEBUG_FS
55 #include <linux/debugfs.h>
56 #include <linux/seq_file.h>
58 static struct dentry
*bdi_debug_root
;
60 static void bdi_debug_init(void)
62 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
65 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
67 struct backing_dev_info
*bdi
= m
->private;
68 struct bdi_writeback
*wb
= &bdi
->wb
;
69 unsigned long background_thresh
;
70 unsigned long dirty_thresh
;
71 unsigned long bdi_thresh
;
72 unsigned long nr_dirty
, nr_io
, nr_more_io
;
75 nr_dirty
= nr_io
= nr_more_io
= 0;
76 spin_lock(&wb
->list_lock
);
77 list_for_each_entry(inode
, &wb
->b_dirty
, i_wb_list
)
79 list_for_each_entry(inode
, &wb
->b_io
, i_wb_list
)
81 list_for_each_entry(inode
, &wb
->b_more_io
, i_wb_list
)
83 spin_unlock(&wb
->list_lock
);
85 global_dirty_limits(&background_thresh
, &dirty_thresh
);
86 bdi_thresh
= bdi_dirty_limit(bdi
, dirty_thresh
);
88 #define K(x) ((x) << (PAGE_SHIFT - 10))
90 "BdiWriteback: %10lu kB\n"
91 "BdiReclaimable: %10lu kB\n"
92 "BdiDirtyThresh: %10lu kB\n"
93 "DirtyThresh: %10lu kB\n"
94 "BackgroundThresh: %10lu kB\n"
95 "BdiDirtied: %10lu kB\n"
96 "BdiWritten: %10lu kB\n"
97 "BdiWriteBandwidth: %10lu kBps\n"
103 (unsigned long) K(bdi_stat(bdi
, BDI_WRITEBACK
)),
104 (unsigned long) K(bdi_stat(bdi
, BDI_RECLAIMABLE
)),
107 K(background_thresh
),
108 (unsigned long) K(bdi_stat(bdi
, BDI_DIRTIED
)),
109 (unsigned long) K(bdi_stat(bdi
, BDI_WRITTEN
)),
110 (unsigned long) K(bdi
->write_bandwidth
),
114 !list_empty(&bdi
->bdi_list
), bdi
->state
);
120 static int bdi_debug_stats_open(struct inode
*inode
, struct file
*file
)
122 return single_open(file
, bdi_debug_stats_show
, inode
->i_private
);
125 static const struct file_operations bdi_debug_stats_fops
= {
126 .open
= bdi_debug_stats_open
,
129 .release
= single_release
,
132 static void bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
134 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
135 bdi
->debug_stats
= debugfs_create_file("stats", 0444, bdi
->debug_dir
,
136 bdi
, &bdi_debug_stats_fops
);
139 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
141 debugfs_remove(bdi
->debug_stats
);
142 debugfs_remove(bdi
->debug_dir
);
145 static inline void bdi_debug_init(void)
148 static inline void bdi_debug_register(struct backing_dev_info
*bdi
,
152 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
157 static ssize_t
read_ahead_kb_store(struct device
*dev
,
158 struct device_attribute
*attr
,
159 const char *buf
, size_t count
)
161 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
162 unsigned long read_ahead_kb
;
165 ret
= kstrtoul(buf
, 10, &read_ahead_kb
);
169 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
174 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
176 #define BDI_SHOW(name, expr) \
177 static ssize_t name##_show(struct device *dev, \
178 struct device_attribute *attr, char *page) \
180 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
182 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
185 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
187 static ssize_t
min_ratio_store(struct device
*dev
,
188 struct device_attribute
*attr
, const char *buf
, size_t count
)
190 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
194 ret
= kstrtouint(buf
, 10, &ratio
);
198 ret
= bdi_set_min_ratio(bdi
, ratio
);
204 BDI_SHOW(min_ratio
, bdi
->min_ratio
)
206 static ssize_t
max_ratio_store(struct device
*dev
,
207 struct device_attribute
*attr
, const char *buf
, size_t count
)
209 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
213 ret
= kstrtouint(buf
, 10, &ratio
);
217 ret
= bdi_set_max_ratio(bdi
, ratio
);
223 BDI_SHOW(max_ratio
, bdi
->max_ratio
)
225 static ssize_t
cpu_list_store(struct device
*dev
,
226 struct device_attribute
*attr
, const char *buf
, size_t count
)
228 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
229 struct bdi_writeback
*wb
= &bdi
->wb
;
230 cpumask_var_t newmask
;
232 struct task_struct
*task
;
234 if (!alloc_cpumask_var(&newmask
, GFP_KERNEL
))
237 ret
= cpulist_parse(buf
, newmask
);
239 spin_lock_bh(&bdi
->wb_lock
);
242 get_task_struct(task
);
243 spin_unlock_bh(&bdi
->wb_lock
);
245 mutex_lock(&bdi
->flusher_cpumask_lock
);
247 ret
= set_cpus_allowed_ptr(task
, newmask
);
248 put_task_struct(task
);
251 cpumask_copy(bdi
->flusher_cpumask
, newmask
);
254 mutex_unlock(&bdi
->flusher_cpumask_lock
);
257 free_cpumask_var(newmask
);
262 static ssize_t
cpu_list_show(struct device
*dev
,
263 struct device_attribute
*attr
, char *page
)
265 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
268 mutex_lock(&bdi
->flusher_cpumask_lock
);
269 ret
= cpulist_scnprintf(page
, PAGE_SIZE
-1, bdi
->flusher_cpumask
);
270 mutex_unlock(&bdi
->flusher_cpumask_lock
);
275 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
277 static struct device_attribute bdi_dev_attrs
[] = {
278 __ATTR_RW(read_ahead_kb
),
279 __ATTR_RW(min_ratio
),
280 __ATTR_RW(max_ratio
),
285 static __init
int bdi_class_init(void)
287 bdi_class
= class_create(THIS_MODULE
, "bdi");
288 if (IS_ERR(bdi_class
))
289 return PTR_ERR(bdi_class
);
291 bdi_class
->dev_attrs
= bdi_dev_attrs
;
295 postcore_initcall(bdi_class_init
);
297 static int __init
default_bdi_init(void)
301 err
= bdi_init(&default_backing_dev_info
);
303 bdi_register(&default_backing_dev_info
, NULL
, "default");
304 err
= bdi_init(&noop_backing_dev_info
);
308 subsys_initcall(default_bdi_init
);
310 int bdi_has_dirty_io(struct backing_dev_info
*bdi
)
312 return wb_has_dirty_io(&bdi
->wb
);
315 static void wakeup_timer_fn(unsigned long data
)
317 struct backing_dev_info
*bdi
= (struct backing_dev_info
*)data
;
319 spin_lock_bh(&bdi
->wb_lock
);
321 trace_writeback_wake_thread(bdi
);
322 wake_up_process(bdi
->wb
.task
);
323 } else if (bdi
->dev
) {
325 * When bdi tasks are inactive for long time, they are killed.
326 * In this case we have to wake-up the forker thread which
327 * should create and run the bdi thread.
329 trace_writeback_wake_forker_thread(bdi
);
330 wake_up_process(default_backing_dev_info
.wb
.task
);
332 spin_unlock_bh(&bdi
->wb_lock
);
336 * This function is used when the first inode for this bdi is marked dirty. It
337 * wakes-up the corresponding bdi thread which should then take care of the
338 * periodic background write-out of dirty inodes. Since the write-out would
339 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
340 * set up a timer which wakes the bdi thread up later.
342 * Note, we wouldn't bother setting up the timer, but this function is on the
343 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
344 * by delaying the wake-up.
346 void bdi_wakeup_thread_delayed(struct backing_dev_info
*bdi
)
348 unsigned long timeout
;
350 timeout
= msecs_to_jiffies(dirty_writeback_interval
* 10);
351 mod_timer(&bdi
->wb
.wakeup_timer
, jiffies
+ timeout
);
355 * Calculate the longest interval (jiffies) bdi threads are allowed to be
358 static unsigned long bdi_longest_inactive(void)
360 unsigned long interval
;
362 interval
= msecs_to_jiffies(dirty_writeback_interval
* 10);
363 return max(5UL * 60 * HZ
, interval
);
367 * Clear pending bit and wakeup anybody waiting for flusher thread creation or
370 static void bdi_clear_pending(struct backing_dev_info
*bdi
)
372 clear_bit(BDI_pending
, &bdi
->state
);
373 smp_mb__after_clear_bit();
374 wake_up_bit(&bdi
->state
, BDI_pending
);
377 static int bdi_forker_thread(void *ptr
)
379 struct bdi_writeback
*me
= ptr
;
381 current
->flags
|= PF_SWAPWRITE
;
385 * Our parent may run at a different priority, just set us to normal
387 set_user_nice(current
, 0);
390 struct task_struct
*task
= NULL
;
391 struct backing_dev_info
*bdi
;
393 NO_ACTION
, /* Nothing to do */
394 FORK_THREAD
, /* Fork bdi thread */
395 KILL_THREAD
, /* Kill inactive bdi thread */
396 } action
= NO_ACTION
;
399 * Temporary measure, we want to make sure we don't see
400 * dirty data on the default backing_dev_info
402 if (wb_has_dirty_io(me
) || !list_empty(&me
->bdi
->work_list
)) {
403 del_timer(&me
->wakeup_timer
);
404 wb_do_writeback(me
, 0);
407 spin_lock_bh(&bdi_lock
);
409 * In the following loop we are going to check whether we have
410 * some work to do without any synchronization with tasks
411 * waking us up to do work for them. Set the task state here
412 * so that we don't miss wakeups after verifying conditions.
414 set_current_state(TASK_INTERRUPTIBLE
);
416 list_for_each_entry(bdi
, &bdi_list
, bdi_list
) {
419 if (!bdi_cap_writeback_dirty(bdi
) ||
420 bdi_cap_flush_forker(bdi
))
423 WARN(!test_bit(BDI_registered
, &bdi
->state
),
424 "bdi %p/%s is not registered!\n", bdi
, bdi
->name
);
426 have_dirty_io
= !list_empty(&bdi
->work_list
) ||
427 wb_has_dirty_io(&bdi
->wb
);
430 * If the bdi has work to do, but the thread does not
433 if (!bdi
->wb
.task
&& have_dirty_io
) {
435 * Set the pending bit - if someone will try to
436 * unregister this bdi - it'll wait on this bit.
438 set_bit(BDI_pending
, &bdi
->state
);
439 action
= FORK_THREAD
;
443 spin_lock(&bdi
->wb_lock
);
446 * If there is no work to do and the bdi thread was
447 * inactive long enough - kill it. The wb_lock is taken
448 * to make sure no-one adds more work to this bdi and
449 * wakes the bdi thread up.
451 if (bdi
->wb
.task
&& !have_dirty_io
&&
452 time_after(jiffies
, bdi
->wb
.last_active
+
453 bdi_longest_inactive())) {
456 spin_unlock(&bdi
->wb_lock
);
457 set_bit(BDI_pending
, &bdi
->state
);
458 action
= KILL_THREAD
;
461 spin_unlock(&bdi
->wb_lock
);
463 spin_unlock_bh(&bdi_lock
);
465 /* Keep working if default bdi still has things to do */
466 if (!list_empty(&me
->bdi
->work_list
))
467 __set_current_state(TASK_RUNNING
);
471 __set_current_state(TASK_RUNNING
);
472 task
= kthread_create(bdi_writeback_thread
, &bdi
->wb
,
473 "flush-%s", dev_name(bdi
->dev
));
476 * If thread creation fails, force writeout of
477 * the bdi from the thread. Hopefully 1024 is
478 * large enough for efficient IO.
480 writeback_inodes_wb(&bdi
->wb
, 1024,
481 WB_REASON_FORKER_THREAD
);
485 * The spinlock makes sure we do not lose
486 * wake-ups when racing with 'bdi_queue_work()'.
487 * And as soon as the bdi thread is visible, we
490 spin_lock_bh(&bdi
->wb_lock
);
492 spin_unlock_bh(&bdi
->wb_lock
);
493 mutex_lock(&bdi
->flusher_cpumask_lock
);
494 ret
= set_cpus_allowed_ptr(task
,
495 bdi
->flusher_cpumask
);
496 mutex_unlock(&bdi
->flusher_cpumask_lock
);
498 printk_once("%s: failed to bind flusher"
499 " thread %s, error %d\n",
500 __func__
, task
->comm
, ret
);
501 wake_up_process(task
);
503 bdi_clear_pending(bdi
);
507 __set_current_state(TASK_RUNNING
);
509 bdi_clear_pending(bdi
);
513 if (!wb_has_dirty_io(me
) || !dirty_writeback_interval
)
515 * There are no dirty data. The only thing we
516 * should now care about is checking for
517 * inactive bdi threads and killing them. Thus,
518 * let's sleep for longer time, save energy and
519 * be friendly for battery-driven devices.
521 schedule_timeout(bdi_longest_inactive());
523 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval
* 10));
533 * Remove bdi from bdi_list, and ensure that it is no longer visible
535 static void bdi_remove_from_list(struct backing_dev_info
*bdi
)
537 spin_lock_bh(&bdi_lock
);
538 list_del_rcu(&bdi
->bdi_list
);
539 spin_unlock_bh(&bdi_lock
);
541 synchronize_rcu_expedited();
544 int bdi_register(struct backing_dev_info
*bdi
, struct device
*parent
,
545 const char *fmt
, ...)
550 if (bdi
->dev
) /* The driver needs to use separate queues per device */
554 dev
= device_create_vargs(bdi_class
, parent
, MKDEV(0, 0), bdi
, fmt
, args
);
562 * Just start the forker thread for our default backing_dev_info,
563 * and add other bdi's to the list. They will get a thread created
564 * on-demand when they need it.
566 if (bdi_cap_flush_forker(bdi
)) {
567 struct bdi_writeback
*wb
= &bdi
->wb
;
569 wb
->task
= kthread_run(bdi_forker_thread
, wb
, "bdi-%s",
571 if (IS_ERR(wb
->task
))
572 return PTR_ERR(wb
->task
);
576 * Set up a default cpumask for the flusher threads that
577 * includes all cpus on the same numa node as the device.
578 * The mask may be overridden via sysfs.
580 node
= dev_to_node(bdi
->dev
);
581 if (node
!= NUMA_NO_NODE
)
582 cpumask_copy(bdi
->flusher_cpumask
,
583 cpumask_of_node(node
));
586 bdi_debug_register(bdi
, dev_name(dev
));
587 set_bit(BDI_registered
, &bdi
->state
);
589 spin_lock_bh(&bdi_lock
);
590 list_add_tail_rcu(&bdi
->bdi_list
, &bdi_list
);
591 spin_unlock_bh(&bdi_lock
);
593 trace_writeback_bdi_register(bdi
);
596 EXPORT_SYMBOL(bdi_register
);
598 int bdi_register_dev(struct backing_dev_info
*bdi
, dev_t dev
)
600 return bdi_register(bdi
, NULL
, "%u:%u", MAJOR(dev
), MINOR(dev
));
602 EXPORT_SYMBOL(bdi_register_dev
);
605 * Remove bdi from the global list and shutdown any threads we have running
607 static void bdi_wb_shutdown(struct backing_dev_info
*bdi
)
609 struct task_struct
*task
;
611 if (!bdi_cap_writeback_dirty(bdi
))
615 * Make sure nobody finds us on the bdi_list anymore
617 bdi_remove_from_list(bdi
);
620 * If setup is pending, wait for that to complete first
622 wait_on_bit(&bdi
->state
, BDI_pending
, bdi_sched_wait
,
623 TASK_UNINTERRUPTIBLE
);
626 * Finally, kill the kernel thread. We don't need to be RCU
627 * safe anymore, since the bdi is gone from visibility.
629 spin_lock_bh(&bdi
->wb_lock
);
632 spin_unlock_bh(&bdi
->wb_lock
);
639 * This bdi is going away now, make sure that no super_blocks point to it
641 static void bdi_prune_sb(struct backing_dev_info
*bdi
)
643 struct super_block
*sb
;
646 list_for_each_entry(sb
, &super_blocks
, s_list
) {
647 if (sb
->s_bdi
== bdi
)
648 sb
->s_bdi
= &default_backing_dev_info
;
650 spin_unlock(&sb_lock
);
653 void bdi_unregister(struct backing_dev_info
*bdi
)
655 struct device
*dev
= bdi
->dev
;
658 bdi_set_min_ratio(bdi
, 0);
659 trace_writeback_bdi_unregister(bdi
);
661 del_timer_sync(&bdi
->wb
.wakeup_timer
);
663 if (!bdi_cap_flush_forker(bdi
))
664 bdi_wb_shutdown(bdi
);
665 bdi_debug_unregister(bdi
);
667 spin_lock_bh(&bdi
->wb_lock
);
669 spin_unlock_bh(&bdi
->wb_lock
);
671 device_unregister(dev
);
674 EXPORT_SYMBOL(bdi_unregister
);
676 static void bdi_wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
)
678 memset(wb
, 0, sizeof(*wb
));
681 wb
->last_old_flush
= jiffies
;
682 INIT_LIST_HEAD(&wb
->b_dirty
);
683 INIT_LIST_HEAD(&wb
->b_io
);
684 INIT_LIST_HEAD(&wb
->b_more_io
);
685 spin_lock_init(&wb
->list_lock
);
686 setup_timer(&wb
->wakeup_timer
, wakeup_timer_fn
, (unsigned long)bdi
);
690 * Initial write bandwidth: 100 MB/s
692 #define INIT_BW (100 << (20 - PAGE_SHIFT))
694 int bdi_init(struct backing_dev_info
*bdi
)
701 bdi
->max_ratio
= 100;
702 bdi
->max_prop_frac
= FPROP_FRAC_BASE
;
703 spin_lock_init(&bdi
->wb_lock
);
704 INIT_LIST_HEAD(&bdi
->bdi_list
);
705 INIT_LIST_HEAD(&bdi
->work_list
);
707 bdi_wb_init(&bdi
->wb
, bdi
);
709 if (!bdi_cap_flush_forker(bdi
)) {
710 bdi
->flusher_cpumask
= kmalloc(sizeof(cpumask_t
), GFP_KERNEL
);
711 if (!bdi
->flusher_cpumask
)
713 cpumask_setall(bdi
->flusher_cpumask
);
714 mutex_init(&bdi
->flusher_cpumask_lock
);
716 bdi
->flusher_cpumask
= NULL
;
718 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++) {
719 err
= percpu_counter_init(&bdi
->bdi_stat
[i
], 0);
724 bdi
->dirty_exceeded
= 0;
726 bdi
->bw_time_stamp
= jiffies
;
727 bdi
->written_stamp
= 0;
729 bdi
->balanced_dirty_ratelimit
= INIT_BW
;
730 bdi
->dirty_ratelimit
= INIT_BW
;
731 bdi
->write_bandwidth
= INIT_BW
;
732 bdi
->avg_write_bandwidth
= INIT_BW
;
734 err
= fprop_local_init_percpu(&bdi
->completions
);
739 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
740 kfree(bdi
->flusher_cpumask
);
745 EXPORT_SYMBOL(bdi_init
);
747 void bdi_destroy(struct backing_dev_info
*bdi
)
752 * Splice our entries to the default_backing_dev_info, if this
755 if (bdi_has_dirty_io(bdi
)) {
756 struct bdi_writeback
*dst
= &default_backing_dev_info
.wb
;
758 bdi_lock_two(&bdi
->wb
, dst
);
759 list_splice(&bdi
->wb
.b_dirty
, &dst
->b_dirty
);
760 list_splice(&bdi
->wb
.b_io
, &dst
->b_io
);
761 list_splice(&bdi
->wb
.b_more_io
, &dst
->b_more_io
);
762 spin_unlock(&bdi
->wb
.list_lock
);
763 spin_unlock(&dst
->list_lock
);
768 kfree(bdi
->flusher_cpumask
);
771 * If bdi_unregister() had already been called earlier, the
772 * wakeup_timer could still be armed because bdi_prune_sb()
773 * can race with the bdi_wakeup_thread_delayed() calls from
774 * __mark_inode_dirty().
776 del_timer_sync(&bdi
->wb
.wakeup_timer
);
778 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++)
779 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
781 fprop_local_destroy_percpu(&bdi
->completions
);
783 EXPORT_SYMBOL(bdi_destroy
);
786 * For use from filesystems to quickly init and register a bdi associated
787 * with dirty writeback
789 int bdi_setup_and_register(struct backing_dev_info
*bdi
, char *name
,
796 bdi
->capabilities
= cap
;
801 sprintf(tmp
, "%.28s%s", name
, "-%d");
802 err
= bdi_register(bdi
, NULL
, tmp
, atomic_long_inc_return(&bdi_seq
));
810 EXPORT_SYMBOL(bdi_setup_and_register
);
812 static wait_queue_head_t congestion_wqh
[2] = {
813 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[0]),
814 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[1])
816 static atomic_t nr_bdi_congested
[2];
818 void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
821 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
823 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
824 if (test_and_clear_bit(bit
, &bdi
->state
))
825 atomic_dec(&nr_bdi_congested
[sync
]);
826 smp_mb__after_clear_bit();
827 if (waitqueue_active(wqh
))
830 EXPORT_SYMBOL(clear_bdi_congested
);
832 void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
836 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
837 if (!test_and_set_bit(bit
, &bdi
->state
))
838 atomic_inc(&nr_bdi_congested
[sync
]);
840 EXPORT_SYMBOL(set_bdi_congested
);
843 * congestion_wait - wait for a backing_dev to become uncongested
844 * @sync: SYNC or ASYNC IO
845 * @timeout: timeout in jiffies
847 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
848 * write congestion. If no backing_devs are congested then just wait for the
849 * next write to be completed.
851 long congestion_wait(int sync
, long timeout
)
854 unsigned long start
= jiffies
;
856 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
858 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
859 ret
= io_schedule_timeout(timeout
);
860 finish_wait(wqh
, &wait
);
862 trace_writeback_congestion_wait(jiffies_to_usecs(timeout
),
863 jiffies_to_usecs(jiffies
- start
));
867 EXPORT_SYMBOL(congestion_wait
);
870 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
871 * @zone: A zone to check if it is heavily congested
872 * @sync: SYNC or ASYNC IO
873 * @timeout: timeout in jiffies
875 * In the event of a congested backing_dev (any backing_dev) and the given
876 * @zone has experienced recent congestion, this waits for up to @timeout
877 * jiffies for either a BDI to exit congestion of the given @sync queue
878 * or a write to complete.
880 * In the absence of zone congestion, cond_resched() is called to yield
881 * the processor if necessary but otherwise does not sleep.
883 * The return value is 0 if the sleep is for the full timeout. Otherwise,
884 * it is the number of jiffies that were still remaining when the function
885 * returned. return_value == timeout implies the function did not sleep.
887 long wait_iff_congested(struct zone
*zone
, int sync
, long timeout
)
890 unsigned long start
= jiffies
;
892 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
895 * If there is no congestion, or heavy congestion is not being
896 * encountered in the current zone, yield if necessary instead
897 * of sleeping on the congestion queue
899 if (atomic_read(&nr_bdi_congested
[sync
]) == 0 ||
900 !zone_is_reclaim_congested(zone
)) {
903 /* In case we scheduled, work out time remaining */
904 ret
= timeout
- (jiffies
- start
);
911 /* Sleep until uncongested or a write happens */
912 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
913 ret
= io_schedule_timeout(timeout
);
914 finish_wait(wqh
, &wait
);
917 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout
),
918 jiffies_to_usecs(jiffies
- start
));
922 EXPORT_SYMBOL(wait_iff_congested
);
924 int pdflush_proc_obsolete(struct ctl_table
*table
, int write
,
925 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
934 if (copy_to_user(buffer
, kbuf
, sizeof(kbuf
)))
936 printk_once(KERN_WARNING
"%s exported in /proc is scheduled for removal\n",