2 * include/linux/backing-dev.h
4 * low-level device information and state which is propagated up through
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
11 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/blkdev.h>
15 #include <linux/writeback.h>
16 #include <linux/blk-cgroup.h>
17 #include <linux/backing-dev-defs.h>
18 #include <linux/slab.h>
20 int __must_check
bdi_init(struct backing_dev_info
*bdi
);
21 void bdi_destroy(struct backing_dev_info
*bdi
);
24 int bdi_register(struct backing_dev_info
*bdi
, struct device
*parent
,
25 const char *fmt
, ...);
26 int bdi_register_dev(struct backing_dev_info
*bdi
, dev_t dev
);
27 int __must_check
bdi_setup_and_register(struct backing_dev_info
*, char *);
28 void wb_start_writeback(struct bdi_writeback
*wb
, long nr_pages
,
29 bool range_cyclic
, enum wb_reason reason
);
30 void wb_start_background_writeback(struct bdi_writeback
*wb
);
31 void wb_workfn(struct work_struct
*work
);
32 void wb_wakeup_delayed(struct bdi_writeback
*wb
);
34 extern spinlock_t bdi_lock
;
35 extern struct list_head bdi_list
;
37 extern struct workqueue_struct
*bdi_wq
;
39 static inline bool wb_has_dirty_io(struct bdi_writeback
*wb
)
41 return test_bit(WB_has_dirty_io
, &wb
->state
);
44 static inline bool bdi_has_dirty_io(struct backing_dev_info
*bdi
)
47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
48 * any dirty wbs. See wb_update_write_bandwidth().
50 return atomic_long_read(&bdi
->tot_write_bandwidth
);
53 static inline void __add_wb_stat(struct bdi_writeback
*wb
,
54 enum wb_stat_item item
, s64 amount
)
56 __percpu_counter_add(&wb
->stat
[item
], amount
, WB_STAT_BATCH
);
59 static inline void __inc_wb_stat(struct bdi_writeback
*wb
,
60 enum wb_stat_item item
)
62 __add_wb_stat(wb
, item
, 1);
65 static inline void inc_wb_stat(struct bdi_writeback
*wb
, enum wb_stat_item item
)
69 local_irq_save(flags
);
70 __inc_wb_stat(wb
, item
);
71 local_irq_restore(flags
);
74 static inline void __dec_wb_stat(struct bdi_writeback
*wb
,
75 enum wb_stat_item item
)
77 __add_wb_stat(wb
, item
, -1);
80 static inline void dec_wb_stat(struct bdi_writeback
*wb
, enum wb_stat_item item
)
84 local_irq_save(flags
);
85 __dec_wb_stat(wb
, item
);
86 local_irq_restore(flags
);
89 static inline s64
wb_stat(struct bdi_writeback
*wb
, enum wb_stat_item item
)
91 return percpu_counter_read_positive(&wb
->stat
[item
]);
94 static inline s64
__wb_stat_sum(struct bdi_writeback
*wb
,
95 enum wb_stat_item item
)
97 return percpu_counter_sum_positive(&wb
->stat
[item
]);
100 static inline s64
wb_stat_sum(struct bdi_writeback
*wb
, enum wb_stat_item item
)
105 local_irq_save(flags
);
106 sum
= __wb_stat_sum(wb
, item
);
107 local_irq_restore(flags
);
112 extern void wb_writeout_inc(struct bdi_writeback
*wb
);
115 * maximal error of a stat counter.
117 static inline unsigned long wb_stat_error(struct bdi_writeback
*wb
)
120 return nr_cpu_ids
* WB_STAT_BATCH
;
126 int bdi_set_min_ratio(struct backing_dev_info
*bdi
, unsigned int min_ratio
);
127 int bdi_set_max_ratio(struct backing_dev_info
*bdi
, unsigned int max_ratio
);
130 * Flags in backing_dev_info::capability
132 * The first three flags control whether dirty pages will contribute to the
133 * VM's accounting and whether writepages() should be called for dirty pages
134 * (something that would not, for example, be appropriate for ramfs)
136 * WARNING: these flags are closely related and should not normally be
137 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
138 * three flags into a single convenience macro.
140 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
141 * BDI_CAP_NO_WRITEBACK: Don't write pages back
142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
147 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
148 #define BDI_CAP_NO_WRITEBACK 0x00000002
149 #define BDI_CAP_NO_ACCT_WB 0x00000004
150 #define BDI_CAP_STABLE_WRITES 0x00000008
151 #define BDI_CAP_STRICTLIMIT 0x00000010
152 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
154 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
157 extern struct backing_dev_info noop_backing_dev_info
;
160 * writeback_in_progress - determine whether there is writeback in progress
161 * @wb: bdi_writeback of interest
163 * Determine whether there is writeback waiting to be handled against a
166 static inline bool writeback_in_progress(struct bdi_writeback
*wb
)
168 return test_bit(WB_writeback_running
, &wb
->state
);
171 static inline struct backing_dev_info
*inode_to_bdi(struct inode
*inode
)
173 struct super_block
*sb
;
176 return &noop_backing_dev_info
;
180 if (sb_is_blkdev_sb(sb
))
181 return blk_get_backing_dev_info(I_BDEV(inode
));
186 static inline int wb_congested(struct bdi_writeback
*wb
, int cong_bits
)
188 struct backing_dev_info
*bdi
= wb
->bdi
;
190 if (bdi
->congested_fn
)
191 return bdi
->congested_fn(bdi
->congested_data
, cong_bits
);
192 return wb
->congested
->state
& cong_bits
;
195 long congestion_wait(int sync
, long timeout
);
196 long wait_iff_congested(struct zone
*zone
, int sync
, long timeout
);
197 int pdflush_proc_obsolete(struct ctl_table
*table
, int write
,
198 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
);
200 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info
*bdi
)
202 return bdi
->capabilities
& BDI_CAP_STABLE_WRITES
;
205 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info
*bdi
)
207 return !(bdi
->capabilities
& BDI_CAP_NO_WRITEBACK
);
210 static inline bool bdi_cap_account_dirty(struct backing_dev_info
*bdi
)
212 return !(bdi
->capabilities
& BDI_CAP_NO_ACCT_DIRTY
);
215 static inline bool bdi_cap_account_writeback(struct backing_dev_info
*bdi
)
217 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
218 return !(bdi
->capabilities
& (BDI_CAP_NO_ACCT_WB
|
219 BDI_CAP_NO_WRITEBACK
));
222 static inline bool mapping_cap_writeback_dirty(struct address_space
*mapping
)
224 return bdi_cap_writeback_dirty(inode_to_bdi(mapping
->host
));
227 static inline bool mapping_cap_account_dirty(struct address_space
*mapping
)
229 return bdi_cap_account_dirty(inode_to_bdi(mapping
->host
));
232 static inline int bdi_sched_wait(void *word
)
238 #ifdef CONFIG_CGROUP_WRITEBACK
240 struct bdi_writeback_congested
*
241 wb_congested_get_create(struct backing_dev_info
*bdi
, int blkcg_id
, gfp_t gfp
);
242 void wb_congested_put(struct bdi_writeback_congested
*congested
);
243 struct bdi_writeback
*wb_get_create(struct backing_dev_info
*bdi
,
244 struct cgroup_subsys_state
*memcg_css
,
246 void wb_memcg_offline(struct mem_cgroup
*memcg
);
247 void wb_blkcg_offline(struct blkcg
*blkcg
);
248 int inode_congested(struct inode
*inode
, int cong_bits
);
251 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
252 * @inode: inode of interest
254 * cgroup writeback requires support from both the bdi and filesystem.
255 * Test whether @inode has both.
257 static inline bool inode_cgwb_enabled(struct inode
*inode
)
259 struct backing_dev_info
*bdi
= inode_to_bdi(inode
);
261 return bdi_cap_account_dirty(bdi
) &&
262 (bdi
->capabilities
& BDI_CAP_CGROUP_WRITEBACK
) &&
263 (inode
->i_sb
->s_iflags
& SB_I_CGROUPWB
);
267 * wb_find_current - find wb for %current on a bdi
268 * @bdi: bdi of interest
270 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
271 * Must be called under rcu_read_lock() which protects the returend wb.
274 static inline struct bdi_writeback
*wb_find_current(struct backing_dev_info
*bdi
)
276 struct cgroup_subsys_state
*memcg_css
;
277 struct bdi_writeback
*wb
;
279 memcg_css
= task_css(current
, memory_cgrp_id
);
280 if (!memcg_css
->parent
)
283 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
286 * %current's blkcg equals the effective blkcg of its memcg. No
287 * need to use the relatively expensive cgroup_get_e_css().
289 if (likely(wb
&& wb
->blkcg_css
== task_css(current
, blkio_cgrp_id
)))
295 * wb_get_create_current - get or create wb for %current on a bdi
296 * @bdi: bdi of interest
297 * @gfp: allocation mask
299 * Equivalent to wb_get_create() on %current's memcg. This function is
300 * called from a relatively hot path and optimizes the common cases using
303 static inline struct bdi_writeback
*
304 wb_get_create_current(struct backing_dev_info
*bdi
, gfp_t gfp
)
306 struct bdi_writeback
*wb
;
309 wb
= wb_find_current(bdi
);
310 if (wb
&& unlikely(!wb_tryget(wb
)))
315 struct cgroup_subsys_state
*memcg_css
;
317 memcg_css
= task_get_css(current
, memory_cgrp_id
);
318 wb
= wb_get_create(bdi
, memcg_css
, gfp
);
325 * inode_to_wb_is_valid - test whether an inode has a wb associated
326 * @inode: inode of interest
328 * Returns %true if @inode has a wb associated. May be called without any
331 static inline bool inode_to_wb_is_valid(struct inode
*inode
)
337 * inode_to_wb - determine the wb of an inode
338 * @inode: inode of interest
340 * Returns the wb @inode is currently associated with. The caller must be
341 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
342 * associated wb's list_lock.
344 static inline struct bdi_writeback
*inode_to_wb(struct inode
*inode
)
346 #ifdef CONFIG_LOCKDEP
347 WARN_ON_ONCE(debug_locks
&&
348 (!lockdep_is_held(&inode
->i_lock
) &&
349 !lockdep_is_held(&inode
->i_mapping
->tree_lock
) &&
350 !lockdep_is_held(&inode
->i_wb
->list_lock
)));
356 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
357 * @inode: target inode
358 * @lockedp: temp bool output param, to be passed to the end function
360 * The caller wants to access the wb associated with @inode but isn't
361 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
362 * function determines the wb associated with @inode and ensures that the
363 * association doesn't change until the transaction is finished with
364 * unlocked_inode_to_wb_end().
366 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
367 * afterwards and can't sleep during transaction. IRQ may or may not be
368 * disabled on return.
370 static inline struct bdi_writeback
*
371 unlocked_inode_to_wb_begin(struct inode
*inode
, bool *lockedp
)
376 * Paired with store_release in inode_switch_wb_work_fn() and
377 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
379 *lockedp
= smp_load_acquire(&inode
->i_state
) & I_WB_SWITCH
;
381 if (unlikely(*lockedp
))
382 spin_lock_irq(&inode
->i_mapping
->tree_lock
);
385 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
386 * inode_to_wb() will bark. Deref directly.
392 * unlocked_inode_to_wb_end - end inode wb access transaction
393 * @inode: target inode
394 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
396 static inline void unlocked_inode_to_wb_end(struct inode
*inode
, bool locked
)
398 if (unlikely(locked
))
399 spin_unlock_irq(&inode
->i_mapping
->tree_lock
);
406 struct radix_tree_iter tree_iter
;
410 static inline struct bdi_writeback
*__wb_iter_next(struct wb_iter
*iter
,
411 struct backing_dev_info
*bdi
)
413 struct radix_tree_iter
*titer
= &iter
->tree_iter
;
415 WARN_ON_ONCE(!rcu_read_lock_held());
417 if (iter
->start_blkcg_id
>= 0) {
418 iter
->slot
= radix_tree_iter_init(titer
, iter
->start_blkcg_id
);
419 iter
->start_blkcg_id
= -1;
421 iter
->slot
= radix_tree_next_slot(iter
->slot
, titer
, 0);
425 iter
->slot
= radix_tree_next_chunk(&bdi
->cgwb_tree
, titer
, 0);
431 static inline struct bdi_writeback
*__wb_iter_init(struct wb_iter
*iter
,
432 struct backing_dev_info
*bdi
,
435 iter
->start_blkcg_id
= start_blkcg_id
;
438 return __wb_iter_next(iter
, bdi
);
444 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
445 * @wb_cur: cursor struct bdi_writeback pointer
446 * @bdi: bdi to walk wb's of
447 * @iter: pointer to struct wb_iter to be used as iteration buffer
448 * @start_blkcg_id: blkcg ID to start iteration from
450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
451 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
452 * to be used as temp storage during iteration. rcu_read_lock() must be
453 * held throughout iteration.
455 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
459 #else /* CONFIG_CGROUP_WRITEBACK */
461 static inline bool inode_cgwb_enabled(struct inode
*inode
)
466 static inline struct bdi_writeback_congested
*
467 wb_congested_get_create(struct backing_dev_info
*bdi
, int blkcg_id
, gfp_t gfp
)
469 atomic_inc(&bdi
->wb_congested
->refcnt
);
470 return bdi
->wb_congested
;
473 static inline void wb_congested_put(struct bdi_writeback_congested
*congested
)
475 if (atomic_dec_and_test(&congested
->refcnt
))
479 static inline struct bdi_writeback
*wb_find_current(struct backing_dev_info
*bdi
)
484 static inline struct bdi_writeback
*
485 wb_get_create_current(struct backing_dev_info
*bdi
, gfp_t gfp
)
490 static inline bool inode_to_wb_is_valid(struct inode
*inode
)
495 static inline struct bdi_writeback
*inode_to_wb(struct inode
*inode
)
497 return &inode_to_bdi(inode
)->wb
;
500 static inline struct bdi_writeback
*
501 unlocked_inode_to_wb_begin(struct inode
*inode
, bool *lockedp
)
503 return inode_to_wb(inode
);
506 static inline void unlocked_inode_to_wb_end(struct inode
*inode
, bool locked
)
510 static inline void wb_memcg_offline(struct mem_cgroup
*memcg
)
514 static inline void wb_blkcg_offline(struct blkcg
*blkcg
)
522 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
523 for ((iter)->next_id = (start_blkcg_id); \
524 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
526 static inline int inode_congested(struct inode
*inode
, int cong_bits
)
528 return wb_congested(&inode_to_bdi(inode
)->wb
, cong_bits
);
531 #endif /* CONFIG_CGROUP_WRITEBACK */
533 static inline int inode_read_congested(struct inode
*inode
)
535 return inode_congested(inode
, 1 << WB_sync_congested
);
538 static inline int inode_write_congested(struct inode
*inode
)
540 return inode_congested(inode
, 1 << WB_async_congested
);
543 static inline int inode_rw_congested(struct inode
*inode
)
545 return inode_congested(inode
, (1 << WB_sync_congested
) |
546 (1 << WB_async_congested
));
549 static inline int bdi_congested(struct backing_dev_info
*bdi
, int cong_bits
)
551 return wb_congested(&bdi
->wb
, cong_bits
);
554 static inline int bdi_read_congested(struct backing_dev_info
*bdi
)
556 return bdi_congested(bdi
, 1 << WB_sync_congested
);
559 static inline int bdi_write_congested(struct backing_dev_info
*bdi
)
561 return bdi_congested(bdi
, 1 << WB_async_congested
);
564 static inline int bdi_rw_congested(struct backing_dev_info
*bdi
)
566 return bdi_congested(bdi
, (1 << WB_sync_congested
) |
567 (1 << WB_async_congested
));
570 #endif /* _LINUX_BACKING_DEV_H */