Merge branch 'next' into for-linus
[deliverable/linux.git] / include / linux / backing-dev.h
... / ...
CommitLineData
1/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
11#include <linux/percpu_counter.h>
12#include <linux/log2.h>
13#include <linux/flex_proportions.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
16#include <linux/sched.h>
17#include <linux/timer.h>
18#include <linux/writeback.h>
19#include <linux/atomic.h>
20#include <linux/sysctl.h>
21#include <linux/workqueue.h>
22
23struct page;
24struct device;
25struct dentry;
26
27/*
28 * Bits in backing_dev_info.state
29 */
30enum bdi_state {
31 BDI_async_congested, /* The async (write) queue is getting full */
32 BDI_sync_congested, /* The sync queue is getting full */
33 BDI_registered, /* bdi_register() was done */
34 BDI_writeback_running, /* Writeback is in progress */
35};
36
37typedef int (congested_fn)(void *, int);
38
39enum bdi_stat_item {
40 BDI_RECLAIMABLE,
41 BDI_WRITEBACK,
42 BDI_DIRTIED,
43 BDI_WRITTEN,
44 NR_BDI_STAT_ITEMS
45};
46
47#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
48
49struct bdi_writeback {
50 struct backing_dev_info *bdi; /* our parent bdi */
51
52 unsigned long last_old_flush; /* last old data flush */
53
54 struct delayed_work dwork; /* work item used for writeback */
55 struct list_head b_dirty; /* dirty inodes */
56 struct list_head b_io; /* parked for writeback */
57 struct list_head b_more_io; /* parked for more writeback */
58 spinlock_t list_lock; /* protects the b_* lists */
59};
60
61struct backing_dev_info {
62 struct list_head bdi_list;
63 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
64 unsigned long state; /* Always use atomic bitops on this */
65 unsigned int capabilities; /* Device capabilities */
66 congested_fn *congested_fn; /* Function pointer if device is md/dm */
67 void *congested_data; /* Pointer to aux data for congested func */
68
69 char *name;
70
71 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
72
73 unsigned long bw_time_stamp; /* last time write bw is updated */
74 unsigned long dirtied_stamp;
75 unsigned long written_stamp; /* pages written at bw_time_stamp */
76 unsigned long write_bandwidth; /* the estimated write bandwidth */
77 unsigned long avg_write_bandwidth; /* further smoothed write bw */
78
79 /*
80 * The base dirty throttle rate, re-calculated on every 200ms.
81 * All the bdi tasks' dirty rate will be curbed under it.
82 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
83 * in small steps and is much more smooth/stable than the latter.
84 */
85 unsigned long dirty_ratelimit;
86 unsigned long balanced_dirty_ratelimit;
87
88 struct fprop_local_percpu completions;
89 int dirty_exceeded;
90
91 unsigned int min_ratio;
92 unsigned int max_ratio, max_prop_frac;
93
94 struct bdi_writeback wb; /* default writeback info for this bdi */
95 spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
96
97 struct list_head work_list;
98
99 struct device *dev;
100
101 struct timer_list laptop_mode_wb_timer;
102
103#ifdef CONFIG_DEBUG_FS
104 struct dentry *debug_dir;
105 struct dentry *debug_stats;
106#endif
107};
108
109int __must_check bdi_init(struct backing_dev_info *bdi);
110void bdi_destroy(struct backing_dev_info *bdi);
111
112__printf(3, 4)
113int bdi_register(struct backing_dev_info *bdi, struct device *parent,
114 const char *fmt, ...);
115int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
116void bdi_unregister(struct backing_dev_info *bdi);
117int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
118void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
119 enum wb_reason reason);
120void bdi_start_background_writeback(struct backing_dev_info *bdi);
121void bdi_writeback_workfn(struct work_struct *work);
122int bdi_has_dirty_io(struct backing_dev_info *bdi);
123void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
124
125extern spinlock_t bdi_lock;
126extern struct list_head bdi_list;
127
128extern struct workqueue_struct *bdi_wq;
129
130static inline int wb_has_dirty_io(struct bdi_writeback *wb)
131{
132 return !list_empty(&wb->b_dirty) ||
133 !list_empty(&wb->b_io) ||
134 !list_empty(&wb->b_more_io);
135}
136
137static inline void __add_bdi_stat(struct backing_dev_info *bdi,
138 enum bdi_stat_item item, s64 amount)
139{
140 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
141}
142
143static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
144 enum bdi_stat_item item)
145{
146 __add_bdi_stat(bdi, item, 1);
147}
148
149static inline void inc_bdi_stat(struct backing_dev_info *bdi,
150 enum bdi_stat_item item)
151{
152 unsigned long flags;
153
154 local_irq_save(flags);
155 __inc_bdi_stat(bdi, item);
156 local_irq_restore(flags);
157}
158
159static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
160 enum bdi_stat_item item)
161{
162 __add_bdi_stat(bdi, item, -1);
163}
164
165static inline void dec_bdi_stat(struct backing_dev_info *bdi,
166 enum bdi_stat_item item)
167{
168 unsigned long flags;
169
170 local_irq_save(flags);
171 __dec_bdi_stat(bdi, item);
172 local_irq_restore(flags);
173}
174
175static inline s64 bdi_stat(struct backing_dev_info *bdi,
176 enum bdi_stat_item item)
177{
178 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
179}
180
181static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
182 enum bdi_stat_item item)
183{
184 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
185}
186
187static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
188 enum bdi_stat_item item)
189{
190 s64 sum;
191 unsigned long flags;
192
193 local_irq_save(flags);
194 sum = __bdi_stat_sum(bdi, item);
195 local_irq_restore(flags);
196
197 return sum;
198}
199
200extern void bdi_writeout_inc(struct backing_dev_info *bdi);
201
202/*
203 * maximal error of a stat counter.
204 */
205static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
206{
207#ifdef CONFIG_SMP
208 return nr_cpu_ids * BDI_STAT_BATCH;
209#else
210 return 1;
211#endif
212}
213
214int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
215int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
216
217/*
218 * Flags in backing_dev_info::capability
219 *
220 * The first three flags control whether dirty pages will contribute to the
221 * VM's accounting and whether writepages() should be called for dirty pages
222 * (something that would not, for example, be appropriate for ramfs)
223 *
224 * WARNING: these flags are closely related and should not normally be
225 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
226 * three flags into a single convenience macro.
227 *
228 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
229 * BDI_CAP_NO_WRITEBACK: Don't write pages back
230 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
231 *
232 * These flags let !MMU mmap() govern direct device mapping vs immediate
233 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
234 *
235 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
236 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
237 * BDI_CAP_READ_MAP: Can be mapped for reading
238 * BDI_CAP_WRITE_MAP: Can be mapped for writing
239 * BDI_CAP_EXEC_MAP: Can be mapped for execution
240 *
241 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
242 *
243 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
244 */
245#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
246#define BDI_CAP_NO_WRITEBACK 0x00000002
247#define BDI_CAP_MAP_COPY 0x00000004
248#define BDI_CAP_MAP_DIRECT 0x00000008
249#define BDI_CAP_READ_MAP 0x00000010
250#define BDI_CAP_WRITE_MAP 0x00000020
251#define BDI_CAP_EXEC_MAP 0x00000040
252#define BDI_CAP_NO_ACCT_WB 0x00000080
253#define BDI_CAP_SWAP_BACKED 0x00000100
254#define BDI_CAP_STABLE_WRITES 0x00000200
255#define BDI_CAP_STRICTLIMIT 0x00000400
256
257#define BDI_CAP_VMFLAGS \
258 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
259
260#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
261 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
262
263#if defined(VM_MAYREAD) && \
264 (BDI_CAP_READ_MAP != VM_MAYREAD || \
265 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
266 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
267#error please change backing_dev_info::capabilities flags
268#endif
269
270extern struct backing_dev_info default_backing_dev_info;
271extern struct backing_dev_info noop_backing_dev_info;
272
273int writeback_in_progress(struct backing_dev_info *bdi);
274
275static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
276{
277 if (bdi->congested_fn)
278 return bdi->congested_fn(bdi->congested_data, bdi_bits);
279 return (bdi->state & bdi_bits);
280}
281
282static inline int bdi_read_congested(struct backing_dev_info *bdi)
283{
284 return bdi_congested(bdi, 1 << BDI_sync_congested);
285}
286
287static inline int bdi_write_congested(struct backing_dev_info *bdi)
288{
289 return bdi_congested(bdi, 1 << BDI_async_congested);
290}
291
292static inline int bdi_rw_congested(struct backing_dev_info *bdi)
293{
294 return bdi_congested(bdi, (1 << BDI_sync_congested) |
295 (1 << BDI_async_congested));
296}
297
298enum {
299 BLK_RW_ASYNC = 0,
300 BLK_RW_SYNC = 1,
301};
302
303void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
304void set_bdi_congested(struct backing_dev_info *bdi, int sync);
305long congestion_wait(int sync, long timeout);
306long wait_iff_congested(struct zone *zone, int sync, long timeout);
307int pdflush_proc_obsolete(struct ctl_table *table, int write,
308 void __user *buffer, size_t *lenp, loff_t *ppos);
309
310static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
311{
312 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
313}
314
315static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
316{
317 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
318}
319
320static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
321{
322 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
323}
324
325static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
326{
327 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
328 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
329 BDI_CAP_NO_WRITEBACK));
330}
331
332static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
333{
334 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
335}
336
337static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
338{
339 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
340}
341
342static inline bool mapping_cap_account_dirty(struct address_space *mapping)
343{
344 return bdi_cap_account_dirty(mapping->backing_dev_info);
345}
346
347static inline bool mapping_cap_swap_backed(struct address_space *mapping)
348{
349 return bdi_cap_swap_backed(mapping->backing_dev_info);
350}
351
352static inline int bdi_sched_wait(void *word)
353{
354 schedule();
355 return 0;
356}
357
358#endif /* _LINUX_BACKING_DEV_H */
This page took 0.029273 seconds and 5 git commands to generate.