powerpc: Fix bad inline asm constraint in create_zero_mask()
[deliverable/linux.git] / include / linux / buffer_head.h
1 /*
2 * include/linux/buffer_head.h
3 *
4 * Everything to do with buffer_heads.
5 */
6
7 #ifndef _LINUX_BUFFER_HEAD_H
8 #define _LINUX_BUFFER_HEAD_H
9
10 #include <linux/types.h>
11 #include <linux/fs.h>
12 #include <linux/linkage.h>
13 #include <linux/pagemap.h>
14 #include <linux/wait.h>
15 #include <linux/atomic.h>
16
17 #ifdef CONFIG_BLOCK
18
19 enum bh_state_bits {
20 BH_Uptodate, /* Contains valid data */
21 BH_Dirty, /* Is dirty */
22 BH_Lock, /* Is locked */
23 BH_Req, /* Has been submitted for I/O */
24 BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
25 * IO completion of other buffers in the page
26 */
27
28 BH_Mapped, /* Has a disk mapping */
29 BH_New, /* Disk mapping was newly created by get_block */
30 BH_Async_Read, /* Is under end_buffer_async_read I/O */
31 BH_Async_Write, /* Is under end_buffer_async_write I/O */
32 BH_Delay, /* Buffer is not yet allocated on disk */
33 BH_Boundary, /* Block is followed by a discontiguity */
34 BH_Write_EIO, /* I/O error on write */
35 BH_Unwritten, /* Buffer is allocated on disk but not written */
36 BH_Quiet, /* Buffer Error Prinks to be quiet */
37 BH_Meta, /* Buffer contains metadata */
38 BH_Prio, /* Buffer should be submitted with REQ_PRIO */
39 BH_Defer_Completion, /* Defer AIO completion to workqueue */
40
41 BH_PrivateStart,/* not a state bit, but the first bit available
42 * for private allocation by other entities
43 */
44 };
45
46 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
47
48 struct page;
49 struct buffer_head;
50 struct address_space;
51 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
52
53 /*
54 * Historically, a buffer_head was used to map a single block
55 * within a page, and of course as the unit of I/O through the
56 * filesystem and block layers. Nowadays the basic I/O unit
57 * is the bio, and buffer_heads are used for extracting block
58 * mappings (via a get_block_t call), for tracking state within
59 * a page (via a page_mapping) and for wrapping bio submission
60 * for backward compatibility reasons (e.g. submit_bh).
61 */
62 struct buffer_head {
63 unsigned long b_state; /* buffer state bitmap (see above) */
64 struct buffer_head *b_this_page;/* circular list of page's buffers */
65 struct page *b_page; /* the page this bh is mapped to */
66
67 sector_t b_blocknr; /* start block number */
68 size_t b_size; /* size of mapping */
69 char *b_data; /* pointer to data within the page */
70
71 struct block_device *b_bdev;
72 bh_end_io_t *b_end_io; /* I/O completion */
73 void *b_private; /* reserved for b_end_io */
74 struct list_head b_assoc_buffers; /* associated with another mapping */
75 struct address_space *b_assoc_map; /* mapping this buffer is
76 associated with */
77 atomic_t b_count; /* users using this buffer_head */
78 };
79
80 /*
81 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
82 * and buffer_foo() functions.
83 */
84 #define BUFFER_FNS(bit, name) \
85 static __always_inline void set_buffer_##name(struct buffer_head *bh) \
86 { \
87 set_bit(BH_##bit, &(bh)->b_state); \
88 } \
89 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
90 { \
91 clear_bit(BH_##bit, &(bh)->b_state); \
92 } \
93 static __always_inline int buffer_##name(const struct buffer_head *bh) \
94 { \
95 return test_bit(BH_##bit, &(bh)->b_state); \
96 }
97
98 /*
99 * test_set_buffer_foo() and test_clear_buffer_foo()
100 */
101 #define TAS_BUFFER_FNS(bit, name) \
102 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
103 { \
104 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
105 } \
106 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
107 { \
108 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
109 } \
110
111 /*
112 * Emit the buffer bitops functions. Note that there are also functions
113 * of the form "mark_buffer_foo()". These are higher-level functions which
114 * do something in addition to setting a b_state bit.
115 */
116 BUFFER_FNS(Uptodate, uptodate)
117 BUFFER_FNS(Dirty, dirty)
118 TAS_BUFFER_FNS(Dirty, dirty)
119 BUFFER_FNS(Lock, locked)
120 BUFFER_FNS(Req, req)
121 TAS_BUFFER_FNS(Req, req)
122 BUFFER_FNS(Mapped, mapped)
123 BUFFER_FNS(New, new)
124 BUFFER_FNS(Async_Read, async_read)
125 BUFFER_FNS(Async_Write, async_write)
126 BUFFER_FNS(Delay, delay)
127 BUFFER_FNS(Boundary, boundary)
128 BUFFER_FNS(Write_EIO, write_io_error)
129 BUFFER_FNS(Unwritten, unwritten)
130 BUFFER_FNS(Meta, meta)
131 BUFFER_FNS(Prio, prio)
132 BUFFER_FNS(Defer_Completion, defer_completion)
133
134 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
135
136 /* If we *know* page->private refers to buffer_heads */
137 #define page_buffers(page) \
138 ({ \
139 BUG_ON(!PagePrivate(page)); \
140 ((struct buffer_head *)page_private(page)); \
141 })
142 #define page_has_buffers(page) PagePrivate(page)
143
144 void buffer_check_dirty_writeback(struct page *page,
145 bool *dirty, bool *writeback);
146
147 /*
148 * Declarations
149 */
150
151 void mark_buffer_dirty(struct buffer_head *bh);
152 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
153 void touch_buffer(struct buffer_head *bh);
154 void set_bh_page(struct buffer_head *bh,
155 struct page *page, unsigned long offset);
156 int try_to_free_buffers(struct page *);
157 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
158 int retry);
159 void create_empty_buffers(struct page *, unsigned long,
160 unsigned long b_state);
161 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
162 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
163 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
164
165 /* Things to do with buffers at mapping->private_list */
166 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
167 int inode_has_buffers(struct inode *);
168 void invalidate_inode_buffers(struct inode *);
169 int remove_inode_buffers(struct inode *inode);
170 int sync_mapping_buffers(struct address_space *mapping);
171 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
172
173 void mark_buffer_async_write(struct buffer_head *bh);
174 void __wait_on_buffer(struct buffer_head *);
175 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
176 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
177 unsigned size);
178 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
179 unsigned size, gfp_t gfp);
180 void __brelse(struct buffer_head *);
181 void __bforget(struct buffer_head *);
182 void __breadahead(struct block_device *, sector_t block, unsigned int size);
183 struct buffer_head *__bread_gfp(struct block_device *,
184 sector_t block, unsigned size, gfp_t gfp);
185 void invalidate_bh_lrus(void);
186 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
187 void free_buffer_head(struct buffer_head * bh);
188 void unlock_buffer(struct buffer_head *bh);
189 void __lock_buffer(struct buffer_head *bh);
190 void ll_rw_block(int, int, struct buffer_head * bh[]);
191 int sync_dirty_buffer(struct buffer_head *bh);
192 int __sync_dirty_buffer(struct buffer_head *bh, int rw);
193 void write_dirty_buffer(struct buffer_head *bh, int rw);
194 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
195 int submit_bh(int, struct buffer_head *);
196 void write_boundary_block(struct block_device *bdev,
197 sector_t bblock, unsigned blocksize);
198 int bh_uptodate_or_lock(struct buffer_head *bh);
199 int bh_submit_read(struct buffer_head *bh);
200
201 extern int buffer_heads_over_limit;
202
203 /*
204 * Generic address_space_operations implementations for buffer_head-backed
205 * address_spaces.
206 */
207 void block_invalidatepage(struct page *page, unsigned int offset,
208 unsigned int length);
209 int block_write_full_page(struct page *page, get_block_t *get_block,
210 struct writeback_control *wbc);
211 int block_read_full_page(struct page*, get_block_t*);
212 int block_is_partially_uptodate(struct page *page, unsigned long from,
213 unsigned long count);
214 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
215 unsigned flags, struct page **pagep, get_block_t *get_block);
216 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
217 get_block_t *get_block);
218 int block_write_end(struct file *, struct address_space *,
219 loff_t, unsigned, unsigned,
220 struct page *, void *);
221 int generic_write_end(struct file *, struct address_space *,
222 loff_t, unsigned, unsigned,
223 struct page *, void *);
224 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
225 int cont_write_begin(struct file *, struct address_space *, loff_t,
226 unsigned, unsigned, struct page **, void **,
227 get_block_t *, loff_t *);
228 int generic_cont_expand_simple(struct inode *inode, loff_t size);
229 int block_commit_write(struct page *page, unsigned from, unsigned to);
230 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
231 get_block_t get_block);
232 /* Convert errno to return value from ->page_mkwrite() call */
233 static inline int block_page_mkwrite_return(int err)
234 {
235 if (err == 0)
236 return VM_FAULT_LOCKED;
237 if (err == -EFAULT)
238 return VM_FAULT_NOPAGE;
239 if (err == -ENOMEM)
240 return VM_FAULT_OOM;
241 if (err == -EAGAIN)
242 return VM_FAULT_RETRY;
243 /* -ENOSPC, -EDQUOT, -EIO ... */
244 return VM_FAULT_SIGBUS;
245 }
246 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
247 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
248 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
249 struct page **, void **, get_block_t*);
250 int nobh_write_end(struct file *, struct address_space *,
251 loff_t, unsigned, unsigned,
252 struct page *, void *);
253 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
254 int nobh_writepage(struct page *page, get_block_t *get_block,
255 struct writeback_control *wbc);
256
257 void buffer_init(void);
258
259 /*
260 * inline definitions
261 */
262
263 static inline void attach_page_buffers(struct page *page,
264 struct buffer_head *head)
265 {
266 page_cache_get(page);
267 SetPagePrivate(page);
268 set_page_private(page, (unsigned long)head);
269 }
270
271 static inline void get_bh(struct buffer_head *bh)
272 {
273 atomic_inc(&bh->b_count);
274 }
275
276 static inline void put_bh(struct buffer_head *bh)
277 {
278 smp_mb__before_atomic();
279 atomic_dec(&bh->b_count);
280 }
281
282 static inline void brelse(struct buffer_head *bh)
283 {
284 if (bh)
285 __brelse(bh);
286 }
287
288 static inline void bforget(struct buffer_head *bh)
289 {
290 if (bh)
291 __bforget(bh);
292 }
293
294 static inline struct buffer_head *
295 sb_bread(struct super_block *sb, sector_t block)
296 {
297 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
298 }
299
300 static inline struct buffer_head *
301 sb_bread_unmovable(struct super_block *sb, sector_t block)
302 {
303 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
304 }
305
306 static inline void
307 sb_breadahead(struct super_block *sb, sector_t block)
308 {
309 __breadahead(sb->s_bdev, block, sb->s_blocksize);
310 }
311
312 static inline struct buffer_head *
313 sb_getblk(struct super_block *sb, sector_t block)
314 {
315 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
316 }
317
318
319 static inline struct buffer_head *
320 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
321 {
322 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
323 }
324
325 static inline struct buffer_head *
326 sb_find_get_block(struct super_block *sb, sector_t block)
327 {
328 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
329 }
330
331 static inline void
332 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
333 {
334 set_buffer_mapped(bh);
335 bh->b_bdev = sb->s_bdev;
336 bh->b_blocknr = block;
337 bh->b_size = sb->s_blocksize;
338 }
339
340 static inline void wait_on_buffer(struct buffer_head *bh)
341 {
342 might_sleep();
343 if (buffer_locked(bh))
344 __wait_on_buffer(bh);
345 }
346
347 static inline int trylock_buffer(struct buffer_head *bh)
348 {
349 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
350 }
351
352 static inline void lock_buffer(struct buffer_head *bh)
353 {
354 might_sleep();
355 if (!trylock_buffer(bh))
356 __lock_buffer(bh);
357 }
358
359 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
360 sector_t block,
361 unsigned size)
362 {
363 return __getblk_gfp(bdev, block, size, 0);
364 }
365
366 static inline struct buffer_head *__getblk(struct block_device *bdev,
367 sector_t block,
368 unsigned size)
369 {
370 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
371 }
372
373 /**
374 * __bread() - reads a specified block and returns the bh
375 * @bdev: the block_device to read from
376 * @block: number of block
377 * @size: size (in bytes) to read
378 *
379 * Reads a specified block, and returns buffer head that contains it.
380 * The page cache is allocated from movable area so that it can be migrated.
381 * It returns NULL if the block was unreadable.
382 */
383 static inline struct buffer_head *
384 __bread(struct block_device *bdev, sector_t block, unsigned size)
385 {
386 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
387 }
388
389 extern int __set_page_dirty_buffers(struct page *page);
390
391 #else /* CONFIG_BLOCK */
392
393 static inline void buffer_init(void) {}
394 static inline int try_to_free_buffers(struct page *page) { return 1; }
395 static inline int inode_has_buffers(struct inode *inode) { return 0; }
396 static inline void invalidate_inode_buffers(struct inode *inode) {}
397 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
398 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
399
400 #endif /* CONFIG_BLOCK */
401 #endif /* _LINUX_BUFFER_HEAD_H */
This page took 0.038538 seconds and 5 git commands to generate.