Merge tag 'tegra-for-3.8-fixes-for-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <trace/events/writeback.h>
15
16 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
17
18 struct backing_dev_info default_backing_dev_info = {
19 .name = "default",
20 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
21 .state = 0,
22 .capabilities = BDI_CAP_MAP_COPY,
23 };
24 EXPORT_SYMBOL_GPL(default_backing_dev_info);
25
26 struct backing_dev_info noop_backing_dev_info = {
27 .name = "noop",
28 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
29 };
30 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
31
32 static struct class *bdi_class;
33
34 /*
35 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
36 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
37 * locking.
38 */
39 DEFINE_SPINLOCK(bdi_lock);
40 LIST_HEAD(bdi_list);
41 LIST_HEAD(bdi_pending_list);
42
43 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
44 {
45 if (wb1 < wb2) {
46 spin_lock(&wb1->list_lock);
47 spin_lock_nested(&wb2->list_lock, 1);
48 } else {
49 spin_lock(&wb2->list_lock);
50 spin_lock_nested(&wb1->list_lock, 1);
51 }
52 }
53
54 #ifdef CONFIG_DEBUG_FS
55 #include <linux/debugfs.h>
56 #include <linux/seq_file.h>
57
58 static struct dentry *bdi_debug_root;
59
60 static void bdi_debug_init(void)
61 {
62 bdi_debug_root = debugfs_create_dir("bdi", NULL);
63 }
64
65 static int bdi_debug_stats_show(struct seq_file *m, void *v)
66 {
67 struct backing_dev_info *bdi = m->private;
68 struct bdi_writeback *wb = &bdi->wb;
69 unsigned long background_thresh;
70 unsigned long dirty_thresh;
71 unsigned long bdi_thresh;
72 unsigned long nr_dirty, nr_io, nr_more_io;
73 struct inode *inode;
74
75 nr_dirty = nr_io = nr_more_io = 0;
76 spin_lock(&wb->list_lock);
77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
78 nr_dirty++;
79 list_for_each_entry(inode, &wb->b_io, i_wb_list)
80 nr_io++;
81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
82 nr_more_io++;
83 spin_unlock(&wb->list_lock);
84
85 global_dirty_limits(&background_thresh, &dirty_thresh);
86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
87
88 #define K(x) ((x) << (PAGE_SHIFT - 10))
89 seq_printf(m,
90 "BdiWriteback: %10lu kB\n"
91 "BdiReclaimable: %10lu kB\n"
92 "BdiDirtyThresh: %10lu kB\n"
93 "DirtyThresh: %10lu kB\n"
94 "BackgroundThresh: %10lu kB\n"
95 "BdiDirtied: %10lu kB\n"
96 "BdiWritten: %10lu kB\n"
97 "BdiWriteBandwidth: %10lu kBps\n"
98 "b_dirty: %10lu\n"
99 "b_io: %10lu\n"
100 "b_more_io: %10lu\n"
101 "bdi_list: %10u\n"
102 "state: %10lx\n",
103 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
104 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
105 K(bdi_thresh),
106 K(dirty_thresh),
107 K(background_thresh),
108 (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
109 (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
110 (unsigned long) K(bdi->write_bandwidth),
111 nr_dirty,
112 nr_io,
113 nr_more_io,
114 !list_empty(&bdi->bdi_list), bdi->state);
115 #undef K
116
117 return 0;
118 }
119
120 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
121 {
122 return single_open(file, bdi_debug_stats_show, inode->i_private);
123 }
124
125 static const struct file_operations bdi_debug_stats_fops = {
126 .open = bdi_debug_stats_open,
127 .read = seq_read,
128 .llseek = seq_lseek,
129 .release = single_release,
130 };
131
132 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
133 {
134 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
135 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
136 bdi, &bdi_debug_stats_fops);
137 }
138
139 static void bdi_debug_unregister(struct backing_dev_info *bdi)
140 {
141 debugfs_remove(bdi->debug_stats);
142 debugfs_remove(bdi->debug_dir);
143 }
144 #else
145 static inline void bdi_debug_init(void)
146 {
147 }
148 static inline void bdi_debug_register(struct backing_dev_info *bdi,
149 const char *name)
150 {
151 }
152 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
153 {
154 }
155 #endif
156
157 static ssize_t read_ahead_kb_store(struct device *dev,
158 struct device_attribute *attr,
159 const char *buf, size_t count)
160 {
161 struct backing_dev_info *bdi = dev_get_drvdata(dev);
162 unsigned long read_ahead_kb;
163 ssize_t ret;
164
165 ret = kstrtoul(buf, 10, &read_ahead_kb);
166 if (ret < 0)
167 return ret;
168
169 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
170
171 return count;
172 }
173
174 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
175
176 #define BDI_SHOW(name, expr) \
177 static ssize_t name##_show(struct device *dev, \
178 struct device_attribute *attr, char *page) \
179 { \
180 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
181 \
182 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
183 }
184
185 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
186
187 static ssize_t min_ratio_store(struct device *dev,
188 struct device_attribute *attr, const char *buf, size_t count)
189 {
190 struct backing_dev_info *bdi = dev_get_drvdata(dev);
191 unsigned int ratio;
192 ssize_t ret;
193
194 ret = kstrtouint(buf, 10, &ratio);
195 if (ret < 0)
196 return ret;
197
198 ret = bdi_set_min_ratio(bdi, ratio);
199 if (!ret)
200 ret = count;
201
202 return ret;
203 }
204 BDI_SHOW(min_ratio, bdi->min_ratio)
205
206 static ssize_t max_ratio_store(struct device *dev,
207 struct device_attribute *attr, const char *buf, size_t count)
208 {
209 struct backing_dev_info *bdi = dev_get_drvdata(dev);
210 unsigned int ratio;
211 ssize_t ret;
212
213 ret = kstrtouint(buf, 10, &ratio);
214 if (ret < 0)
215 return ret;
216
217 ret = bdi_set_max_ratio(bdi, ratio);
218 if (!ret)
219 ret = count;
220
221 return ret;
222 }
223 BDI_SHOW(max_ratio, bdi->max_ratio)
224
225 static ssize_t cpu_list_store(struct device *dev,
226 struct device_attribute *attr, const char *buf, size_t count)
227 {
228 struct backing_dev_info *bdi = dev_get_drvdata(dev);
229 struct bdi_writeback *wb = &bdi->wb;
230 cpumask_var_t newmask;
231 ssize_t ret;
232 struct task_struct *task;
233
234 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
235 return -ENOMEM;
236
237 ret = cpulist_parse(buf, newmask);
238 if (!ret) {
239 spin_lock_bh(&bdi->wb_lock);
240 task = wb->task;
241 if (task)
242 get_task_struct(task);
243 spin_unlock_bh(&bdi->wb_lock);
244
245 mutex_lock(&bdi->flusher_cpumask_lock);
246 if (task) {
247 ret = set_cpus_allowed_ptr(task, newmask);
248 put_task_struct(task);
249 }
250 if (ret == 0) {
251 cpumask_copy(bdi->flusher_cpumask, newmask);
252 ret = count;
253 }
254 mutex_unlock(&bdi->flusher_cpumask_lock);
255
256 }
257 free_cpumask_var(newmask);
258
259 return ret;
260 }
261
262 static ssize_t cpu_list_show(struct device *dev,
263 struct device_attribute *attr, char *page)
264 {
265 struct backing_dev_info *bdi = dev_get_drvdata(dev);
266 ssize_t ret;
267
268 mutex_lock(&bdi->flusher_cpumask_lock);
269 ret = cpulist_scnprintf(page, PAGE_SIZE-1, bdi->flusher_cpumask);
270 mutex_unlock(&bdi->flusher_cpumask_lock);
271
272 return ret;
273 }
274
275 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
276
277 static struct device_attribute bdi_dev_attrs[] = {
278 __ATTR_RW(read_ahead_kb),
279 __ATTR_RW(min_ratio),
280 __ATTR_RW(max_ratio),
281 __ATTR_RW(cpu_list),
282 __ATTR_NULL,
283 };
284
285 static __init int bdi_class_init(void)
286 {
287 bdi_class = class_create(THIS_MODULE, "bdi");
288 if (IS_ERR(bdi_class))
289 return PTR_ERR(bdi_class);
290
291 bdi_class->dev_attrs = bdi_dev_attrs;
292 bdi_debug_init();
293 return 0;
294 }
295 postcore_initcall(bdi_class_init);
296
297 static int __init default_bdi_init(void)
298 {
299 int err;
300
301 err = bdi_init(&default_backing_dev_info);
302 if (!err)
303 bdi_register(&default_backing_dev_info, NULL, "default");
304 err = bdi_init(&noop_backing_dev_info);
305
306 return err;
307 }
308 subsys_initcall(default_bdi_init);
309
310 int bdi_has_dirty_io(struct backing_dev_info *bdi)
311 {
312 return wb_has_dirty_io(&bdi->wb);
313 }
314
315 static void wakeup_timer_fn(unsigned long data)
316 {
317 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
318
319 spin_lock_bh(&bdi->wb_lock);
320 if (bdi->wb.task) {
321 trace_writeback_wake_thread(bdi);
322 wake_up_process(bdi->wb.task);
323 } else if (bdi->dev) {
324 /*
325 * When bdi tasks are inactive for long time, they are killed.
326 * In this case we have to wake-up the forker thread which
327 * should create and run the bdi thread.
328 */
329 trace_writeback_wake_forker_thread(bdi);
330 wake_up_process(default_backing_dev_info.wb.task);
331 }
332 spin_unlock_bh(&bdi->wb_lock);
333 }
334
335 /*
336 * This function is used when the first inode for this bdi is marked dirty. It
337 * wakes-up the corresponding bdi thread which should then take care of the
338 * periodic background write-out of dirty inodes. Since the write-out would
339 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
340 * set up a timer which wakes the bdi thread up later.
341 *
342 * Note, we wouldn't bother setting up the timer, but this function is on the
343 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
344 * by delaying the wake-up.
345 */
346 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
347 {
348 unsigned long timeout;
349
350 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
351 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
352 }
353
354 /*
355 * Calculate the longest interval (jiffies) bdi threads are allowed to be
356 * inactive.
357 */
358 static unsigned long bdi_longest_inactive(void)
359 {
360 unsigned long interval;
361
362 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
363 return max(5UL * 60 * HZ, interval);
364 }
365
366 /*
367 * Clear pending bit and wakeup anybody waiting for flusher thread creation or
368 * shutdown
369 */
370 static void bdi_clear_pending(struct backing_dev_info *bdi)
371 {
372 clear_bit(BDI_pending, &bdi->state);
373 smp_mb__after_clear_bit();
374 wake_up_bit(&bdi->state, BDI_pending);
375 }
376
377 static int bdi_forker_thread(void *ptr)
378 {
379 struct bdi_writeback *me = ptr;
380
381 current->flags |= PF_SWAPWRITE;
382 set_freezable();
383
384 /*
385 * Our parent may run at a different priority, just set us to normal
386 */
387 set_user_nice(current, 0);
388
389 for (;;) {
390 struct task_struct *task = NULL;
391 struct backing_dev_info *bdi;
392 enum {
393 NO_ACTION, /* Nothing to do */
394 FORK_THREAD, /* Fork bdi thread */
395 KILL_THREAD, /* Kill inactive bdi thread */
396 } action = NO_ACTION;
397
398 /*
399 * Temporary measure, we want to make sure we don't see
400 * dirty data on the default backing_dev_info
401 */
402 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
403 del_timer(&me->wakeup_timer);
404 wb_do_writeback(me, 0);
405 }
406
407 spin_lock_bh(&bdi_lock);
408 /*
409 * In the following loop we are going to check whether we have
410 * some work to do without any synchronization with tasks
411 * waking us up to do work for them. Set the task state here
412 * so that we don't miss wakeups after verifying conditions.
413 */
414 set_current_state(TASK_INTERRUPTIBLE);
415
416 list_for_each_entry(bdi, &bdi_list, bdi_list) {
417 bool have_dirty_io;
418
419 if (!bdi_cap_writeback_dirty(bdi) ||
420 bdi_cap_flush_forker(bdi))
421 continue;
422
423 WARN(!test_bit(BDI_registered, &bdi->state),
424 "bdi %p/%s is not registered!\n", bdi, bdi->name);
425
426 have_dirty_io = !list_empty(&bdi->work_list) ||
427 wb_has_dirty_io(&bdi->wb);
428
429 /*
430 * If the bdi has work to do, but the thread does not
431 * exist - create it.
432 */
433 if (!bdi->wb.task && have_dirty_io) {
434 /*
435 * Set the pending bit - if someone will try to
436 * unregister this bdi - it'll wait on this bit.
437 */
438 set_bit(BDI_pending, &bdi->state);
439 action = FORK_THREAD;
440 break;
441 }
442
443 spin_lock(&bdi->wb_lock);
444
445 /*
446 * If there is no work to do and the bdi thread was
447 * inactive long enough - kill it. The wb_lock is taken
448 * to make sure no-one adds more work to this bdi and
449 * wakes the bdi thread up.
450 */
451 if (bdi->wb.task && !have_dirty_io &&
452 time_after(jiffies, bdi->wb.last_active +
453 bdi_longest_inactive())) {
454 task = bdi->wb.task;
455 bdi->wb.task = NULL;
456 spin_unlock(&bdi->wb_lock);
457 set_bit(BDI_pending, &bdi->state);
458 action = KILL_THREAD;
459 break;
460 }
461 spin_unlock(&bdi->wb_lock);
462 }
463 spin_unlock_bh(&bdi_lock);
464
465 /* Keep working if default bdi still has things to do */
466 if (!list_empty(&me->bdi->work_list))
467 __set_current_state(TASK_RUNNING);
468
469 switch (action) {
470 case FORK_THREAD:
471 __set_current_state(TASK_RUNNING);
472 task = kthread_create(bdi_writeback_thread, &bdi->wb,
473 "flush-%s", dev_name(bdi->dev));
474 if (IS_ERR(task)) {
475 /*
476 * If thread creation fails, force writeout of
477 * the bdi from the thread. Hopefully 1024 is
478 * large enough for efficient IO.
479 */
480 writeback_inodes_wb(&bdi->wb, 1024,
481 WB_REASON_FORKER_THREAD);
482 } else {
483 int ret;
484 /*
485 * The spinlock makes sure we do not lose
486 * wake-ups when racing with 'bdi_queue_work()'.
487 * And as soon as the bdi thread is visible, we
488 * can start it.
489 */
490 spin_lock_bh(&bdi->wb_lock);
491 bdi->wb.task = task;
492 spin_unlock_bh(&bdi->wb_lock);
493 mutex_lock(&bdi->flusher_cpumask_lock);
494 ret = set_cpus_allowed_ptr(task,
495 bdi->flusher_cpumask);
496 mutex_unlock(&bdi->flusher_cpumask_lock);
497 if (ret)
498 printk_once("%s: failed to bind flusher"
499 " thread %s, error %d\n",
500 __func__, task->comm, ret);
501 wake_up_process(task);
502 }
503 bdi_clear_pending(bdi);
504 break;
505
506 case KILL_THREAD:
507 __set_current_state(TASK_RUNNING);
508 kthread_stop(task);
509 bdi_clear_pending(bdi);
510 break;
511
512 case NO_ACTION:
513 if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
514 /*
515 * There are no dirty data. The only thing we
516 * should now care about is checking for
517 * inactive bdi threads and killing them. Thus,
518 * let's sleep for longer time, save energy and
519 * be friendly for battery-driven devices.
520 */
521 schedule_timeout(bdi_longest_inactive());
522 else
523 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
524 try_to_freeze();
525 break;
526 }
527 }
528
529 return 0;
530 }
531
532 /*
533 * Remove bdi from bdi_list, and ensure that it is no longer visible
534 */
535 static void bdi_remove_from_list(struct backing_dev_info *bdi)
536 {
537 spin_lock_bh(&bdi_lock);
538 list_del_rcu(&bdi->bdi_list);
539 spin_unlock_bh(&bdi_lock);
540
541 synchronize_rcu_expedited();
542 }
543
544 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
545 const char *fmt, ...)
546 {
547 va_list args;
548 struct device *dev;
549
550 if (bdi->dev) /* The driver needs to use separate queues per device */
551 return 0;
552
553 va_start(args, fmt);
554 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
555 va_end(args);
556 if (IS_ERR(dev))
557 return PTR_ERR(dev);
558
559 bdi->dev = dev;
560
561 /*
562 * Just start the forker thread for our default backing_dev_info,
563 * and add other bdi's to the list. They will get a thread created
564 * on-demand when they need it.
565 */
566 if (bdi_cap_flush_forker(bdi)) {
567 struct bdi_writeback *wb = &bdi->wb;
568
569 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
570 dev_name(dev));
571 if (IS_ERR(wb->task))
572 return PTR_ERR(wb->task);
573 } else {
574 int node;
575 /*
576 * Set up a default cpumask for the flusher threads that
577 * includes all cpus on the same numa node as the device.
578 * The mask may be overridden via sysfs.
579 */
580 node = dev_to_node(bdi->dev);
581 if (node != NUMA_NO_NODE)
582 cpumask_copy(bdi->flusher_cpumask,
583 cpumask_of_node(node));
584 }
585
586 bdi_debug_register(bdi, dev_name(dev));
587 set_bit(BDI_registered, &bdi->state);
588
589 spin_lock_bh(&bdi_lock);
590 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
591 spin_unlock_bh(&bdi_lock);
592
593 trace_writeback_bdi_register(bdi);
594 return 0;
595 }
596 EXPORT_SYMBOL(bdi_register);
597
598 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
599 {
600 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
601 }
602 EXPORT_SYMBOL(bdi_register_dev);
603
604 /*
605 * Remove bdi from the global list and shutdown any threads we have running
606 */
607 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
608 {
609 struct task_struct *task;
610
611 if (!bdi_cap_writeback_dirty(bdi))
612 return;
613
614 /*
615 * Make sure nobody finds us on the bdi_list anymore
616 */
617 bdi_remove_from_list(bdi);
618
619 /*
620 * If setup is pending, wait for that to complete first
621 */
622 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
623 TASK_UNINTERRUPTIBLE);
624
625 /*
626 * Finally, kill the kernel thread. We don't need to be RCU
627 * safe anymore, since the bdi is gone from visibility.
628 */
629 spin_lock_bh(&bdi->wb_lock);
630 task = bdi->wb.task;
631 bdi->wb.task = NULL;
632 spin_unlock_bh(&bdi->wb_lock);
633
634 if (task)
635 kthread_stop(task);
636 }
637
638 /*
639 * This bdi is going away now, make sure that no super_blocks point to it
640 */
641 static void bdi_prune_sb(struct backing_dev_info *bdi)
642 {
643 struct super_block *sb;
644
645 spin_lock(&sb_lock);
646 list_for_each_entry(sb, &super_blocks, s_list) {
647 if (sb->s_bdi == bdi)
648 sb->s_bdi = &default_backing_dev_info;
649 }
650 spin_unlock(&sb_lock);
651 }
652
653 void bdi_unregister(struct backing_dev_info *bdi)
654 {
655 struct device *dev = bdi->dev;
656
657 if (dev) {
658 bdi_set_min_ratio(bdi, 0);
659 trace_writeback_bdi_unregister(bdi);
660 bdi_prune_sb(bdi);
661 del_timer_sync(&bdi->wb.wakeup_timer);
662
663 if (!bdi_cap_flush_forker(bdi))
664 bdi_wb_shutdown(bdi);
665 bdi_debug_unregister(bdi);
666
667 spin_lock_bh(&bdi->wb_lock);
668 bdi->dev = NULL;
669 spin_unlock_bh(&bdi->wb_lock);
670
671 device_unregister(dev);
672 }
673 }
674 EXPORT_SYMBOL(bdi_unregister);
675
676 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
677 {
678 memset(wb, 0, sizeof(*wb));
679
680 wb->bdi = bdi;
681 wb->last_old_flush = jiffies;
682 INIT_LIST_HEAD(&wb->b_dirty);
683 INIT_LIST_HEAD(&wb->b_io);
684 INIT_LIST_HEAD(&wb->b_more_io);
685 spin_lock_init(&wb->list_lock);
686 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
687 }
688
689 /*
690 * Initial write bandwidth: 100 MB/s
691 */
692 #define INIT_BW (100 << (20 - PAGE_SHIFT))
693
694 int bdi_init(struct backing_dev_info *bdi)
695 {
696 int i, err;
697
698 bdi->dev = NULL;
699
700 bdi->min_ratio = 0;
701 bdi->max_ratio = 100;
702 bdi->max_prop_frac = FPROP_FRAC_BASE;
703 spin_lock_init(&bdi->wb_lock);
704 INIT_LIST_HEAD(&bdi->bdi_list);
705 INIT_LIST_HEAD(&bdi->work_list);
706
707 bdi_wb_init(&bdi->wb, bdi);
708
709 if (!bdi_cap_flush_forker(bdi)) {
710 bdi->flusher_cpumask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
711 if (!bdi->flusher_cpumask)
712 return -ENOMEM;
713 cpumask_setall(bdi->flusher_cpumask);
714 mutex_init(&bdi->flusher_cpumask_lock);
715 } else
716 bdi->flusher_cpumask = NULL;
717
718 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
719 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
720 if (err)
721 goto err;
722 }
723
724 bdi->dirty_exceeded = 0;
725
726 bdi->bw_time_stamp = jiffies;
727 bdi->written_stamp = 0;
728
729 bdi->balanced_dirty_ratelimit = INIT_BW;
730 bdi->dirty_ratelimit = INIT_BW;
731 bdi->write_bandwidth = INIT_BW;
732 bdi->avg_write_bandwidth = INIT_BW;
733
734 err = fprop_local_init_percpu(&bdi->completions);
735
736 if (err) {
737 err:
738 while (i--)
739 percpu_counter_destroy(&bdi->bdi_stat[i]);
740 kfree(bdi->flusher_cpumask);
741 }
742
743 return err;
744 }
745 EXPORT_SYMBOL(bdi_init);
746
747 void bdi_destroy(struct backing_dev_info *bdi)
748 {
749 int i;
750
751 /*
752 * Splice our entries to the default_backing_dev_info, if this
753 * bdi disappears
754 */
755 if (bdi_has_dirty_io(bdi)) {
756 struct bdi_writeback *dst = &default_backing_dev_info.wb;
757
758 bdi_lock_two(&bdi->wb, dst);
759 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
760 list_splice(&bdi->wb.b_io, &dst->b_io);
761 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
762 spin_unlock(&bdi->wb.list_lock);
763 spin_unlock(&dst->list_lock);
764 }
765
766 bdi_unregister(bdi);
767
768 kfree(bdi->flusher_cpumask);
769
770 /*
771 * If bdi_unregister() had already been called earlier, the
772 * wakeup_timer could still be armed because bdi_prune_sb()
773 * can race with the bdi_wakeup_thread_delayed() calls from
774 * __mark_inode_dirty().
775 */
776 del_timer_sync(&bdi->wb.wakeup_timer);
777
778 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
779 percpu_counter_destroy(&bdi->bdi_stat[i]);
780
781 fprop_local_destroy_percpu(&bdi->completions);
782 }
783 EXPORT_SYMBOL(bdi_destroy);
784
785 /*
786 * For use from filesystems to quickly init and register a bdi associated
787 * with dirty writeback
788 */
789 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
790 unsigned int cap)
791 {
792 char tmp[32];
793 int err;
794
795 bdi->name = name;
796 bdi->capabilities = cap;
797 err = bdi_init(bdi);
798 if (err)
799 return err;
800
801 sprintf(tmp, "%.28s%s", name, "-%d");
802 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
803 if (err) {
804 bdi_destroy(bdi);
805 return err;
806 }
807
808 return 0;
809 }
810 EXPORT_SYMBOL(bdi_setup_and_register);
811
812 static wait_queue_head_t congestion_wqh[2] = {
813 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
814 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
815 };
816 static atomic_t nr_bdi_congested[2];
817
818 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
819 {
820 enum bdi_state bit;
821 wait_queue_head_t *wqh = &congestion_wqh[sync];
822
823 bit = sync ? BDI_sync_congested : BDI_async_congested;
824 if (test_and_clear_bit(bit, &bdi->state))
825 atomic_dec(&nr_bdi_congested[sync]);
826 smp_mb__after_clear_bit();
827 if (waitqueue_active(wqh))
828 wake_up(wqh);
829 }
830 EXPORT_SYMBOL(clear_bdi_congested);
831
832 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
833 {
834 enum bdi_state bit;
835
836 bit = sync ? BDI_sync_congested : BDI_async_congested;
837 if (!test_and_set_bit(bit, &bdi->state))
838 atomic_inc(&nr_bdi_congested[sync]);
839 }
840 EXPORT_SYMBOL(set_bdi_congested);
841
842 /**
843 * congestion_wait - wait for a backing_dev to become uncongested
844 * @sync: SYNC or ASYNC IO
845 * @timeout: timeout in jiffies
846 *
847 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
848 * write congestion. If no backing_devs are congested then just wait for the
849 * next write to be completed.
850 */
851 long congestion_wait(int sync, long timeout)
852 {
853 long ret;
854 unsigned long start = jiffies;
855 DEFINE_WAIT(wait);
856 wait_queue_head_t *wqh = &congestion_wqh[sync];
857
858 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
859 ret = io_schedule_timeout(timeout);
860 finish_wait(wqh, &wait);
861
862 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
863 jiffies_to_usecs(jiffies - start));
864
865 return ret;
866 }
867 EXPORT_SYMBOL(congestion_wait);
868
869 /**
870 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
871 * @zone: A zone to check if it is heavily congested
872 * @sync: SYNC or ASYNC IO
873 * @timeout: timeout in jiffies
874 *
875 * In the event of a congested backing_dev (any backing_dev) and the given
876 * @zone has experienced recent congestion, this waits for up to @timeout
877 * jiffies for either a BDI to exit congestion of the given @sync queue
878 * or a write to complete.
879 *
880 * In the absence of zone congestion, cond_resched() is called to yield
881 * the processor if necessary but otherwise does not sleep.
882 *
883 * The return value is 0 if the sleep is for the full timeout. Otherwise,
884 * it is the number of jiffies that were still remaining when the function
885 * returned. return_value == timeout implies the function did not sleep.
886 */
887 long wait_iff_congested(struct zone *zone, int sync, long timeout)
888 {
889 long ret;
890 unsigned long start = jiffies;
891 DEFINE_WAIT(wait);
892 wait_queue_head_t *wqh = &congestion_wqh[sync];
893
894 /*
895 * If there is no congestion, or heavy congestion is not being
896 * encountered in the current zone, yield if necessary instead
897 * of sleeping on the congestion queue
898 */
899 if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
900 !zone_is_reclaim_congested(zone)) {
901 cond_resched();
902
903 /* In case we scheduled, work out time remaining */
904 ret = timeout - (jiffies - start);
905 if (ret < 0)
906 ret = 0;
907
908 goto out;
909 }
910
911 /* Sleep until uncongested or a write happens */
912 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
913 ret = io_schedule_timeout(timeout);
914 finish_wait(wqh, &wait);
915
916 out:
917 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
918 jiffies_to_usecs(jiffies - start));
919
920 return ret;
921 }
922 EXPORT_SYMBOL(wait_iff_congested);
923
924 int pdflush_proc_obsolete(struct ctl_table *table, int write,
925 void __user *buffer, size_t *lenp, loff_t *ppos)
926 {
927 char kbuf[] = "0\n";
928
929 if (*ppos) {
930 *lenp = 0;
931 return 0;
932 }
933
934 if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
935 return -EFAULT;
936 printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
937 table->procname);
938
939 *lenp = 2;
940 *ppos += *lenp;
941 return 2;
942 }
This page took 0.107069 seconds and 6 git commands to generate.