writeback: make writeback_in_progress() take bdi_writeback instead of backing_dev_info
[deliverable/linux.git] / include / linux / backing-dev.h
CommitLineData
1da177e4
LT
1/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
cf0ca9fe 11#include <linux/kernel.h>
e4ad08fe 12#include <linux/fs.h>
03ba3782 13#include <linux/sched.h>
a212b105 14#include <linux/blkdev.h>
03ba3782 15#include <linux/writeback.h>
52ebea74 16#include <linux/blk-cgroup.h>
66114cad 17#include <linux/backing-dev-defs.h>
1da177e4 18
8077c0d9 19int __must_check bdi_init(struct backing_dev_info *bdi);
b2e8fb6e
PZ
20void bdi_destroy(struct backing_dev_info *bdi);
21
d2cc4dde 22__printf(3, 4)
cf0ca9fe
PZ
23int bdi_register(struct backing_dev_info *bdi, struct device *parent,
24 const char *fmt, ...);
25int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
26void bdi_unregister(struct backing_dev_info *bdi);
b4caecd4 27int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
c00ddad3
TH
28void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
29 bool range_cyclic, enum wb_reason reason);
c5444198 30void bdi_start_background_writeback(struct backing_dev_info *bdi);
f0054bb1 31void wb_workfn(struct work_struct *work);
f0054bb1 32void wb_wakeup_delayed(struct bdi_writeback *wb);
cf0ca9fe 33
03ba3782 34extern spinlock_t bdi_lock;
66f3b8e2
JA
35extern struct list_head bdi_list;
36
839a8e86
TH
37extern struct workqueue_struct *bdi_wq;
38
d6c10f1f 39static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
03ba3782 40{
d6c10f1f 41 return test_bit(WB_has_dirty_io, &wb->state);
03ba3782
JA
42}
43
95a46c65
TH
44static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
45{
46 /*
47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
48 * any dirty wbs. See wb_update_write_bandwidth().
49 */
50 return atomic_long_read(&bdi->tot_write_bandwidth);
51}
52
93f78d88
TH
53static inline void __add_wb_stat(struct bdi_writeback *wb,
54 enum wb_stat_item item, s64 amount)
b2e8fb6e 55{
93f78d88 56 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
b2e8fb6e
PZ
57}
58
93f78d88
TH
59static inline void __inc_wb_stat(struct bdi_writeback *wb,
60 enum wb_stat_item item)
b2e8fb6e 61{
93f78d88 62 __add_wb_stat(wb, item, 1);
b2e8fb6e
PZ
63}
64
93f78d88 65static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
66{
67 unsigned long flags;
68
69 local_irq_save(flags);
93f78d88 70 __inc_wb_stat(wb, item);
b2e8fb6e
PZ
71 local_irq_restore(flags);
72}
73
93f78d88
TH
74static inline void __dec_wb_stat(struct bdi_writeback *wb,
75 enum wb_stat_item item)
b2e8fb6e 76{
93f78d88 77 __add_wb_stat(wb, item, -1);
b2e8fb6e
PZ
78}
79
93f78d88 80static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
81{
82 unsigned long flags;
83
84 local_irq_save(flags);
93f78d88 85 __dec_wb_stat(wb, item);
b2e8fb6e
PZ
86 local_irq_restore(flags);
87}
88
93f78d88 89static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 90{
93f78d88 91 return percpu_counter_read_positive(&wb->stat[item]);
b2e8fb6e
PZ
92}
93
93f78d88
TH
94static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
95 enum wb_stat_item item)
b2e8fb6e 96{
93f78d88 97 return percpu_counter_sum_positive(&wb->stat[item]);
b2e8fb6e
PZ
98}
99
93f78d88 100static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
e0bf68dd 101{
b2e8fb6e
PZ
102 s64 sum;
103 unsigned long flags;
104
105 local_irq_save(flags);
93f78d88 106 sum = __wb_stat_sum(wb, item);
b2e8fb6e
PZ
107 local_irq_restore(flags);
108
109 return sum;
e0bf68dd
PZ
110}
111
93f78d88 112extern void wb_writeout_inc(struct bdi_writeback *wb);
dd5656e5 113
b2e8fb6e
PZ
114/*
115 * maximal error of a stat counter.
116 */
93f78d88 117static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
e0bf68dd 118{
b2e8fb6e 119#ifdef CONFIG_SMP
93f78d88 120 return nr_cpu_ids * WB_STAT_BATCH;
b2e8fb6e
PZ
121#else
122 return 1;
123#endif
e0bf68dd 124}
1da177e4 125
189d3c4a 126int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
a42dde04 127int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
189d3c4a 128
1da177e4
LT
129/*
130 * Flags in backing_dev_info::capability
e4ad08fe
MS
131 *
132 * The first three flags control whether dirty pages will contribute to the
133 * VM's accounting and whether writepages() should be called for dirty pages
134 * (something that would not, for example, be appropriate for ramfs)
135 *
136 * WARNING: these flags are closely related and should not normally be
137 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
138 * three flags into a single convenience macro.
139 *
140 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
141 * BDI_CAP_NO_WRITEBACK: Don't write pages back
142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
5a537485 143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
89e9b9e0
TH
144 *
145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
1da177e4 146 */
e4ad08fe
MS
147#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
148#define BDI_CAP_NO_WRITEBACK 0x00000002
b4caecd4
CH
149#define BDI_CAP_NO_ACCT_WB 0x00000004
150#define BDI_CAP_STABLE_WRITES 0x00000008
151#define BDI_CAP_STRICTLIMIT 0x00000010
89e9b9e0 152#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
1da177e4 153
e4ad08fe
MS
154#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
156
5129a469 157extern struct backing_dev_info noop_backing_dev_info;
1da177e4 158
bc05873d
TH
159/**
160 * writeback_in_progress - determine whether there is writeback in progress
161 * @wb: bdi_writeback of interest
162 *
163 * Determine whether there is writeback waiting to be handled against a
164 * bdi_writeback.
165 */
166static inline bool writeback_in_progress(struct bdi_writeback *wb)
167{
168 return test_bit(WB_writeback_running, &wb->state);
169}
1da177e4 170
a212b105
TH
171static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
172{
173 struct super_block *sb;
174
175 if (!inode)
176 return &noop_backing_dev_info;
177
178 sb = inode->i_sb;
179#ifdef CONFIG_BLOCK
180 if (sb_is_blkdev_sb(sb))
181 return blk_get_backing_dev_info(I_BDEV(inode));
182#endif
183 return sb->s_bdi;
184}
185
ec8a6f26 186static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
1da177e4 187{
ec8a6f26 188 struct backing_dev_info *bdi = wb->bdi;
1da177e4 189
ec8a6f26
TH
190 if (bdi->congested_fn)
191 return bdi->congested_fn(bdi->congested_data, cong_bits);
192 return wb->congested->state & cong_bits;
1da177e4
LT
193}
194
8aa7e847 195long congestion_wait(int sync, long timeout);
0e093d99 196long wait_iff_congested(struct zone *zone, int sync, long timeout);
3965c9ae
WL
197int pdflush_proc_obsolete(struct ctl_table *table, int write,
198 void __user *buffer, size_t *lenp, loff_t *ppos);
1da177e4 199
7d311cda
DW
200static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
201{
202 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
203}
204
e4ad08fe
MS
205static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
206{
207 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
208}
209
210static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
211{
212 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
213}
1da177e4 214
e4ad08fe
MS
215static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
216{
217 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
218 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
219 BDI_CAP_NO_WRITEBACK));
220}
1da177e4 221
e4ad08fe
MS
222static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
223{
de1414a6 224 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
e4ad08fe 225}
1da177e4 226
e4ad08fe
MS
227static inline bool mapping_cap_account_dirty(struct address_space *mapping)
228{
de1414a6 229 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
e4ad08fe 230}
1da177e4 231
03ba3782
JA
232static inline int bdi_sched_wait(void *word)
233{
234 schedule();
235 return 0;
236}
237
89e9b9e0
TH
238#ifdef CONFIG_CGROUP_WRITEBACK
239
52ebea74
TH
240struct bdi_writeback_congested *
241wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
242void wb_congested_put(struct bdi_writeback_congested *congested);
243struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
244 struct cgroup_subsys_state *memcg_css,
245 gfp_t gfp);
246void __inode_attach_wb(struct inode *inode, struct page *page);
247void wb_memcg_offline(struct mem_cgroup *memcg);
248void wb_blkcg_offline(struct blkcg *blkcg);
703c2708 249int inode_congested(struct inode *inode, int cong_bits);
52ebea74 250
89e9b9e0
TH
251/**
252 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
253 * @inode: inode of interest
254 *
255 * cgroup writeback requires support from both the bdi and filesystem.
256 * Test whether @inode has both.
257 */
258static inline bool inode_cgwb_enabled(struct inode *inode)
259{
260 struct backing_dev_info *bdi = inode_to_bdi(inode);
261
262 return bdi_cap_account_dirty(bdi) &&
263 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
264 (inode->i_sb->s_type->fs_flags & FS_CGROUP_WRITEBACK);
265}
266
52ebea74
TH
267/**
268 * wb_tryget - try to increment a wb's refcount
269 * @wb: bdi_writeback to get
270 */
271static inline bool wb_tryget(struct bdi_writeback *wb)
272{
273 if (wb != &wb->bdi->wb)
274 return percpu_ref_tryget(&wb->refcnt);
275 return true;
276}
277
278/**
279 * wb_get - increment a wb's refcount
280 * @wb: bdi_writeback to get
281 */
282static inline void wb_get(struct bdi_writeback *wb)
283{
284 if (wb != &wb->bdi->wb)
285 percpu_ref_get(&wb->refcnt);
286}
287
288/**
289 * wb_put - decrement a wb's refcount
290 * @wb: bdi_writeback to put
291 */
292static inline void wb_put(struct bdi_writeback *wb)
293{
294 if (wb != &wb->bdi->wb)
295 percpu_ref_put(&wb->refcnt);
296}
297
298/**
299 * wb_find_current - find wb for %current on a bdi
300 * @bdi: bdi of interest
301 *
302 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
303 * Must be called under rcu_read_lock() which protects the returend wb.
304 * NULL if not found.
305 */
306static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
307{
308 struct cgroup_subsys_state *memcg_css;
309 struct bdi_writeback *wb;
310
311 memcg_css = task_css(current, memory_cgrp_id);
312 if (!memcg_css->parent)
313 return &bdi->wb;
314
315 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
316
317 /*
318 * %current's blkcg equals the effective blkcg of its memcg. No
319 * need to use the relatively expensive cgroup_get_e_css().
320 */
321 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
322 return wb;
323 return NULL;
324}
325
326/**
327 * wb_get_create_current - get or create wb for %current on a bdi
328 * @bdi: bdi of interest
329 * @gfp: allocation mask
330 *
331 * Equivalent to wb_get_create() on %current's memcg. This function is
332 * called from a relatively hot path and optimizes the common cases using
333 * wb_find_current().
334 */
335static inline struct bdi_writeback *
336wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
337{
338 struct bdi_writeback *wb;
339
340 rcu_read_lock();
341 wb = wb_find_current(bdi);
342 if (wb && unlikely(!wb_tryget(wb)))
343 wb = NULL;
344 rcu_read_unlock();
345
346 if (unlikely(!wb)) {
347 struct cgroup_subsys_state *memcg_css;
348
349 memcg_css = task_get_css(current, memory_cgrp_id);
350 wb = wb_get_create(bdi, memcg_css, gfp);
351 css_put(memcg_css);
352 }
353 return wb;
354}
355
356/**
357 * inode_attach_wb - associate an inode with its wb
358 * @inode: inode of interest
359 * @page: page being dirtied (may be NULL)
360 *
361 * If @inode doesn't have its wb, associate it with the wb matching the
362 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
363 * @inode->i_lock.
364 */
365static inline void inode_attach_wb(struct inode *inode, struct page *page)
366{
367 if (!inode->i_wb)
368 __inode_attach_wb(inode, page);
369}
370
371/**
372 * inode_detach_wb - disassociate an inode from its wb
373 * @inode: inode of interest
374 *
375 * @inode is being freed. Detach from its wb.
376 */
377static inline void inode_detach_wb(struct inode *inode)
378{
379 if (inode->i_wb) {
380 wb_put(inode->i_wb);
381 inode->i_wb = NULL;
382 }
383}
384
385/**
386 * inode_to_wb - determine the wb of an inode
387 * @inode: inode of interest
388 *
389 * Returns the wb @inode is currently associated with.
390 */
391static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
392{
393 return inode->i_wb;
394}
395
ebe41ab0
TH
396struct wb_iter {
397 int start_blkcg_id;
398 struct radix_tree_iter tree_iter;
399 void **slot;
400};
401
402static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
403 struct backing_dev_info *bdi)
404{
405 struct radix_tree_iter *titer = &iter->tree_iter;
406
407 WARN_ON_ONCE(!rcu_read_lock_held());
408
409 if (iter->start_blkcg_id >= 0) {
410 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
411 iter->start_blkcg_id = -1;
412 } else {
413 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
414 }
415
416 if (!iter->slot)
417 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
418 if (iter->slot)
419 return *iter->slot;
420 return NULL;
421}
422
423static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
424 struct backing_dev_info *bdi,
425 int start_blkcg_id)
426{
427 iter->start_blkcg_id = start_blkcg_id;
428
429 if (start_blkcg_id)
430 return __wb_iter_next(iter, bdi);
431 else
432 return &bdi->wb;
433}
434
435/**
436 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
437 * @wb_cur: cursor struct bdi_writeback pointer
438 * @bdi: bdi to walk wb's of
439 * @iter: pointer to struct wb_iter to be used as iteration buffer
440 * @start_blkcg_id: blkcg ID to start iteration from
441 *
442 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
443 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
444 * to be used as temp storage during iteration. rcu_read_lock() must be
445 * held throughout iteration.
446 */
447#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
448 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
449 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
450
89e9b9e0
TH
451#else /* CONFIG_CGROUP_WRITEBACK */
452
453static inline bool inode_cgwb_enabled(struct inode *inode)
454{
455 return false;
456}
457
52ebea74
TH
458static inline struct bdi_writeback_congested *
459wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
460{
461 return bdi->wb.congested;
462}
463
464static inline void wb_congested_put(struct bdi_writeback_congested *congested)
465{
466}
467
468static inline bool wb_tryget(struct bdi_writeback *wb)
469{
470 return true;
471}
472
473static inline void wb_get(struct bdi_writeback *wb)
474{
475}
476
477static inline void wb_put(struct bdi_writeback *wb)
478{
479}
480
481static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
482{
483 return &bdi->wb;
484}
485
486static inline struct bdi_writeback *
487wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
488{
489 return &bdi->wb;
490}
491
492static inline void inode_attach_wb(struct inode *inode, struct page *page)
493{
494}
495
496static inline void inode_detach_wb(struct inode *inode)
497{
498}
499
500static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
501{
502 return &inode_to_bdi(inode)->wb;
503}
504
505static inline void wb_memcg_offline(struct mem_cgroup *memcg)
506{
507}
508
509static inline void wb_blkcg_offline(struct blkcg *blkcg)
510{
511}
512
ebe41ab0
TH
513struct wb_iter {
514 int next_id;
515};
516
517#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
518 for ((iter)->next_id = (start_blkcg_id); \
519 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
520
703c2708
TH
521static inline int inode_congested(struct inode *inode, int cong_bits)
522{
523 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
524}
525
89e9b9e0
TH
526#endif /* CONFIG_CGROUP_WRITEBACK */
527
703c2708
TH
528static inline int inode_read_congested(struct inode *inode)
529{
530 return inode_congested(inode, 1 << WB_sync_congested);
531}
532
533static inline int inode_write_congested(struct inode *inode)
534{
535 return inode_congested(inode, 1 << WB_async_congested);
536}
537
538static inline int inode_rw_congested(struct inode *inode)
539{
540 return inode_congested(inode, (1 << WB_sync_congested) |
541 (1 << WB_async_congested));
542}
543
ec8a6f26
TH
544static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
545{
546 return wb_congested(&bdi->wb, cong_bits);
547}
548
549static inline int bdi_read_congested(struct backing_dev_info *bdi)
550{
551 return bdi_congested(bdi, 1 << WB_sync_congested);
552}
553
554static inline int bdi_write_congested(struct backing_dev_info *bdi)
555{
556 return bdi_congested(bdi, 1 << WB_async_congested);
557}
558
559static inline int bdi_rw_congested(struct backing_dev_info *bdi)
560{
561 return bdi_congested(bdi, (1 << WB_sync_congested) |
562 (1 << WB_async_congested));
563}
564
89e9b9e0 565#endif /* _LINUX_BACKING_DEV_H */
This page took 0.758122 seconds and 5 git commands to generate.