Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_buf.c
CommitLineData
1da177e4 1/*
f07c2250 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
93c189c1 18#include "xfs.h"
1da177e4
LT
19#include <linux/stddef.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
4df08c52 32#include <linux/kthread.h>
b20a3503 33#include <linux/migrate.h>
3fcfab16 34#include <linux/backing-dev.h>
7dfb7103 35#include <linux/freezer.h>
1da177e4 36
7989cb8e 37static kmem_zone_t *xfs_buf_zone;
a6867a68 38STATIC int xfsbufd(void *);
27496a8c 39STATIC int xfsbufd_wakeup(int, gfp_t);
ce8e922c 40STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
8e1f936b
RR
41static struct shrinker xfs_buf_shake = {
42 .shrink = xfsbufd_wakeup,
43 .seeks = DEFAULT_SEEKS,
44};
23ea4032 45
7989cb8e 46static struct workqueue_struct *xfslogd_workqueue;
0829c360 47struct workqueue_struct *xfsdatad_workqueue;
1da177e4 48
ce8e922c 49#ifdef XFS_BUF_TRACE
1da177e4 50void
ce8e922c
NS
51xfs_buf_trace(
52 xfs_buf_t *bp,
1da177e4
LT
53 char *id,
54 void *data,
55 void *ra)
56{
ce8e922c
NS
57 ktrace_enter(xfs_buf_trace_buf,
58 bp, id,
59 (void *)(unsigned long)bp->b_flags,
60 (void *)(unsigned long)bp->b_hold.counter,
d63f154a 61 (void *)(unsigned long)bp->b_sema.count,
1da177e4
LT
62 (void *)current,
63 data, ra,
ce8e922c
NS
64 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
65 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
66 (void *)(unsigned long)bp->b_buffer_length,
1da177e4
LT
67 NULL, NULL, NULL, NULL, NULL);
68}
ce8e922c
NS
69ktrace_t *xfs_buf_trace_buf;
70#define XFS_BUF_TRACE_SIZE 4096
71#define XB_TRACE(bp, id, data) \
72 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
1da177e4 73#else
ce8e922c 74#define XB_TRACE(bp, id, data) do { } while (0)
1da177e4
LT
75#endif
76
ce8e922c
NS
77#ifdef XFS_BUF_LOCK_TRACKING
78# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
79# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
80# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
1da177e4 81#else
ce8e922c
NS
82# define XB_SET_OWNER(bp) do { } while (0)
83# define XB_CLEAR_OWNER(bp) do { } while (0)
84# define XB_GET_OWNER(bp) do { } while (0)
1da177e4
LT
85#endif
86
ce8e922c
NS
87#define xb_to_gfp(flags) \
88 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
89 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
1da177e4 90
ce8e922c
NS
91#define xb_to_km(flags) \
92 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
1da177e4 93
ce8e922c
NS
94#define xfs_buf_allocate(flags) \
95 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
96#define xfs_buf_deallocate(bp) \
97 kmem_zone_free(xfs_buf_zone, (bp));
1da177e4
LT
98
99/*
ce8e922c 100 * Page Region interfaces.
1da177e4 101 *
ce8e922c
NS
102 * For pages in filesystems where the blocksize is smaller than the
103 * pagesize, we use the page->private field (long) to hold a bitmap
104 * of uptodate regions within the page.
1da177e4 105 *
ce8e922c 106 * Each such region is "bytes per page / bits per long" bytes long.
1da177e4 107 *
ce8e922c
NS
108 * NBPPR == number-of-bytes-per-page-region
109 * BTOPR == bytes-to-page-region (rounded up)
110 * BTOPRT == bytes-to-page-region-truncated (rounded down)
1da177e4
LT
111 */
112#if (BITS_PER_LONG == 32)
113#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
114#elif (BITS_PER_LONG == 64)
115#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
116#else
117#error BITS_PER_LONG must be 32 or 64
118#endif
119#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
120#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
121#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
122
123STATIC unsigned long
124page_region_mask(
125 size_t offset,
126 size_t length)
127{
128 unsigned long mask;
129 int first, final;
130
131 first = BTOPR(offset);
132 final = BTOPRT(offset + length - 1);
133 first = min(first, final);
134
135 mask = ~0UL;
136 mask <<= BITS_PER_LONG - (final - first);
137 mask >>= BITS_PER_LONG - (final);
138
139 ASSERT(offset + length <= PAGE_CACHE_SIZE);
140 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
141
142 return mask;
143}
144
7989cb8e 145STATIC_INLINE void
1da177e4
LT
146set_page_region(
147 struct page *page,
148 size_t offset,
149 size_t length)
150{
4c21e2f2
HD
151 set_page_private(page,
152 page_private(page) | page_region_mask(offset, length));
153 if (page_private(page) == ~0UL)
1da177e4
LT
154 SetPageUptodate(page);
155}
156
7989cb8e 157STATIC_INLINE int
1da177e4
LT
158test_page_region(
159 struct page *page,
160 size_t offset,
161 size_t length)
162{
163 unsigned long mask = page_region_mask(offset, length);
164
4c21e2f2 165 return (mask && (page_private(page) & mask) == mask);
1da177e4
LT
166}
167
168/*
ce8e922c 169 * Mapping of multi-page buffers into contiguous virtual space
1da177e4
LT
170 */
171
172typedef struct a_list {
173 void *vm_addr;
174 struct a_list *next;
175} a_list_t;
176
7989cb8e
DC
177static a_list_t *as_free_head;
178static int as_list_len;
179static DEFINE_SPINLOCK(as_lock);
1da177e4
LT
180
181/*
ce8e922c 182 * Try to batch vunmaps because they are costly.
1da177e4
LT
183 */
184STATIC void
185free_address(
186 void *addr)
187{
188 a_list_t *aentry;
189
7f015072
JF
190#ifdef CONFIG_XEN
191 /*
192 * Xen needs to be able to make sure it can get an exclusive
193 * RO mapping of pages it wants to turn into a pagetable. If
194 * a newly allocated page is also still being vmap()ed by xfs,
195 * it will cause pagetable construction to fail. This is a
196 * quick workaround to always eagerly unmap pages so that Xen
197 * is happy.
198 */
199 vunmap(addr);
200 return;
201#endif
202
7b04d717 203 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
1da177e4
LT
204 if (likely(aentry)) {
205 spin_lock(&as_lock);
206 aentry->next = as_free_head;
207 aentry->vm_addr = addr;
208 as_free_head = aentry;
209 as_list_len++;
210 spin_unlock(&as_lock);
211 } else {
212 vunmap(addr);
213 }
214}
215
216STATIC void
217purge_addresses(void)
218{
219 a_list_t *aentry, *old;
220
221 if (as_free_head == NULL)
222 return;
223
224 spin_lock(&as_lock);
225 aentry = as_free_head;
226 as_free_head = NULL;
227 as_list_len = 0;
228 spin_unlock(&as_lock);
229
230 while ((old = aentry) != NULL) {
231 vunmap(aentry->vm_addr);
232 aentry = aentry->next;
233 kfree(old);
234 }
235}
236
237/*
ce8e922c 238 * Internal xfs_buf_t object manipulation
1da177e4
LT
239 */
240
241STATIC void
ce8e922c
NS
242_xfs_buf_initialize(
243 xfs_buf_t *bp,
1da177e4 244 xfs_buftarg_t *target,
204ab25f 245 xfs_off_t range_base,
1da177e4 246 size_t range_length,
ce8e922c 247 xfs_buf_flags_t flags)
1da177e4
LT
248{
249 /*
ce8e922c 250 * We don't want certain flags to appear in b_flags.
1da177e4 251 */
ce8e922c
NS
252 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
253
254 memset(bp, 0, sizeof(xfs_buf_t));
255 atomic_set(&bp->b_hold, 1);
b4dd330b 256 init_completion(&bp->b_iowait);
ce8e922c
NS
257 INIT_LIST_HEAD(&bp->b_list);
258 INIT_LIST_HEAD(&bp->b_hash_list);
259 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
260 XB_SET_OWNER(bp);
261 bp->b_target = target;
262 bp->b_file_offset = range_base;
1da177e4
LT
263 /*
264 * Set buffer_length and count_desired to the same value initially.
265 * I/O routines should use count_desired, which will be the same in
266 * most cases but may be reset (e.g. XFS recovery).
267 */
ce8e922c
NS
268 bp->b_buffer_length = bp->b_count_desired = range_length;
269 bp->b_flags = flags;
270 bp->b_bn = XFS_BUF_DADDR_NULL;
271 atomic_set(&bp->b_pin_count, 0);
272 init_waitqueue_head(&bp->b_waiters);
273
274 XFS_STATS_INC(xb_create);
275 XB_TRACE(bp, "initialize", target);
1da177e4
LT
276}
277
278/*
ce8e922c
NS
279 * Allocate a page array capable of holding a specified number
280 * of pages, and point the page buf at it.
1da177e4
LT
281 */
282STATIC int
ce8e922c
NS
283_xfs_buf_get_pages(
284 xfs_buf_t *bp,
1da177e4 285 int page_count,
ce8e922c 286 xfs_buf_flags_t flags)
1da177e4
LT
287{
288 /* Make sure that we have a page list */
ce8e922c
NS
289 if (bp->b_pages == NULL) {
290 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
291 bp->b_page_count = page_count;
292 if (page_count <= XB_PAGES) {
293 bp->b_pages = bp->b_page_array;
1da177e4 294 } else {
ce8e922c
NS
295 bp->b_pages = kmem_alloc(sizeof(struct page *) *
296 page_count, xb_to_km(flags));
297 if (bp->b_pages == NULL)
1da177e4
LT
298 return -ENOMEM;
299 }
ce8e922c 300 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
1da177e4
LT
301 }
302 return 0;
303}
304
305/*
ce8e922c 306 * Frees b_pages if it was allocated.
1da177e4
LT
307 */
308STATIC void
ce8e922c 309_xfs_buf_free_pages(
1da177e4
LT
310 xfs_buf_t *bp)
311{
ce8e922c 312 if (bp->b_pages != bp->b_page_array) {
f0e2d93c 313 kmem_free(bp->b_pages);
1da177e4
LT
314 }
315}
316
317/*
318 * Releases the specified buffer.
319 *
320 * The modification state of any associated pages is left unchanged.
ce8e922c 321 * The buffer most not be on any hash - use xfs_buf_rele instead for
1da177e4
LT
322 * hashed and refcounted buffers
323 */
324void
ce8e922c 325xfs_buf_free(
1da177e4
LT
326 xfs_buf_t *bp)
327{
ce8e922c 328 XB_TRACE(bp, "free", 0);
1da177e4 329
ce8e922c 330 ASSERT(list_empty(&bp->b_hash_list));
1da177e4 331
1fa40b01 332 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
1da177e4
LT
333 uint i;
334
ce8e922c
NS
335 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
336 free_address(bp->b_addr - bp->b_offset);
1da177e4 337
948ecdb4
NS
338 for (i = 0; i < bp->b_page_count; i++) {
339 struct page *page = bp->b_pages[i];
340
1fa40b01
CH
341 if (bp->b_flags & _XBF_PAGE_CACHE)
342 ASSERT(!PagePrivate(page));
948ecdb4
NS
343 page_cache_release(page);
344 }
ce8e922c 345 _xfs_buf_free_pages(bp);
1da177e4
LT
346 }
347
ce8e922c 348 xfs_buf_deallocate(bp);
1da177e4
LT
349}
350
351/*
352 * Finds all pages for buffer in question and builds it's page list.
353 */
354STATIC int
ce8e922c 355_xfs_buf_lookup_pages(
1da177e4
LT
356 xfs_buf_t *bp,
357 uint flags)
358{
ce8e922c
NS
359 struct address_space *mapping = bp->b_target->bt_mapping;
360 size_t blocksize = bp->b_target->bt_bsize;
361 size_t size = bp->b_count_desired;
1da177e4 362 size_t nbytes, offset;
ce8e922c 363 gfp_t gfp_mask = xb_to_gfp(flags);
1da177e4
LT
364 unsigned short page_count, i;
365 pgoff_t first;
204ab25f 366 xfs_off_t end;
1da177e4
LT
367 int error;
368
ce8e922c
NS
369 end = bp->b_file_offset + bp->b_buffer_length;
370 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
1da177e4 371
ce8e922c 372 error = _xfs_buf_get_pages(bp, page_count, flags);
1da177e4
LT
373 if (unlikely(error))
374 return error;
ce8e922c 375 bp->b_flags |= _XBF_PAGE_CACHE;
1da177e4 376
ce8e922c
NS
377 offset = bp->b_offset;
378 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
1da177e4 379
ce8e922c 380 for (i = 0; i < bp->b_page_count; i++) {
1da177e4
LT
381 struct page *page;
382 uint retries = 0;
383
384 retry:
385 page = find_or_create_page(mapping, first + i, gfp_mask);
386 if (unlikely(page == NULL)) {
ce8e922c
NS
387 if (flags & XBF_READ_AHEAD) {
388 bp->b_page_count = i;
6ab455ee
CH
389 for (i = 0; i < bp->b_page_count; i++)
390 unlock_page(bp->b_pages[i]);
1da177e4
LT
391 return -ENOMEM;
392 }
393
394 /*
395 * This could deadlock.
396 *
397 * But until all the XFS lowlevel code is revamped to
398 * handle buffer allocation failures we can't do much.
399 */
400 if (!(++retries % 100))
401 printk(KERN_ERR
402 "XFS: possible memory allocation "
403 "deadlock in %s (mode:0x%x)\n",
34a622b2 404 __func__, gfp_mask);
1da177e4 405
ce8e922c 406 XFS_STATS_INC(xb_page_retries);
23ea4032 407 xfsbufd_wakeup(0, gfp_mask);
3fcfab16 408 congestion_wait(WRITE, HZ/50);
1da177e4
LT
409 goto retry;
410 }
411
ce8e922c 412 XFS_STATS_INC(xb_page_found);
1da177e4
LT
413
414 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
415 size -= nbytes;
416
948ecdb4 417 ASSERT(!PagePrivate(page));
1da177e4
LT
418 if (!PageUptodate(page)) {
419 page_count--;
6ab455ee
CH
420 if (blocksize >= PAGE_CACHE_SIZE) {
421 if (flags & XBF_READ)
422 bp->b_flags |= _XBF_PAGE_LOCKED;
423 } else if (!PagePrivate(page)) {
1da177e4
LT
424 if (test_page_region(page, offset, nbytes))
425 page_count++;
426 }
427 }
428
ce8e922c 429 bp->b_pages[i] = page;
1da177e4
LT
430 offset = 0;
431 }
432
6ab455ee
CH
433 if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
434 for (i = 0; i < bp->b_page_count; i++)
435 unlock_page(bp->b_pages[i]);
436 }
437
ce8e922c
NS
438 if (page_count == bp->b_page_count)
439 bp->b_flags |= XBF_DONE;
1da177e4 440
ce8e922c 441 XB_TRACE(bp, "lookup_pages", (long)page_count);
1da177e4
LT
442 return error;
443}
444
445/*
446 * Map buffer into kernel address-space if nessecary.
447 */
448STATIC int
ce8e922c 449_xfs_buf_map_pages(
1da177e4
LT
450 xfs_buf_t *bp,
451 uint flags)
452{
453 /* A single page buffer is always mappable */
ce8e922c
NS
454 if (bp->b_page_count == 1) {
455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
456 bp->b_flags |= XBF_MAPPED;
457 } else if (flags & XBF_MAPPED) {
1da177e4
LT
458 if (as_list_len > 64)
459 purge_addresses();
ce8e922c
NS
460 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
461 VM_MAP, PAGE_KERNEL);
462 if (unlikely(bp->b_addr == NULL))
1da177e4 463 return -ENOMEM;
ce8e922c
NS
464 bp->b_addr += bp->b_offset;
465 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
466 }
467
468 return 0;
469}
470
471/*
472 * Finding and Reading Buffers
473 */
474
475/*
ce8e922c 476 * Look up, and creates if absent, a lockable buffer for
1da177e4
LT
477 * a given range of an inode. The buffer is returned
478 * locked. If other overlapping buffers exist, they are
479 * released before the new buffer is created and locked,
480 * which may imply that this call will block until those buffers
481 * are unlocked. No I/O is implied by this call.
482 */
483xfs_buf_t *
ce8e922c 484_xfs_buf_find(
1da177e4 485 xfs_buftarg_t *btp, /* block device target */
204ab25f 486 xfs_off_t ioff, /* starting offset of range */
1da177e4 487 size_t isize, /* length of range */
ce8e922c
NS
488 xfs_buf_flags_t flags,
489 xfs_buf_t *new_bp)
1da177e4 490{
204ab25f 491 xfs_off_t range_base;
1da177e4
LT
492 size_t range_length;
493 xfs_bufhash_t *hash;
ce8e922c 494 xfs_buf_t *bp, *n;
1da177e4
LT
495
496 range_base = (ioff << BBSHIFT);
497 range_length = (isize << BBSHIFT);
498
499 /* Check for IOs smaller than the sector size / not sector aligned */
ce8e922c 500 ASSERT(!(range_length < (1 << btp->bt_sshift)));
204ab25f 501 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
1da177e4
LT
502
503 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
504
505 spin_lock(&hash->bh_lock);
506
ce8e922c
NS
507 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
508 ASSERT(btp == bp->b_target);
509 if (bp->b_file_offset == range_base &&
510 bp->b_buffer_length == range_length) {
1da177e4 511 /*
ce8e922c 512 * If we look at something, bring it to the
1da177e4
LT
513 * front of the list for next time.
514 */
ce8e922c
NS
515 atomic_inc(&bp->b_hold);
516 list_move(&bp->b_hash_list, &hash->bh_list);
1da177e4
LT
517 goto found;
518 }
519 }
520
521 /* No match found */
ce8e922c
NS
522 if (new_bp) {
523 _xfs_buf_initialize(new_bp, btp, range_base,
1da177e4 524 range_length, flags);
ce8e922c
NS
525 new_bp->b_hash = hash;
526 list_add(&new_bp->b_hash_list, &hash->bh_list);
1da177e4 527 } else {
ce8e922c 528 XFS_STATS_INC(xb_miss_locked);
1da177e4
LT
529 }
530
531 spin_unlock(&hash->bh_lock);
ce8e922c 532 return new_bp;
1da177e4
LT
533
534found:
535 spin_unlock(&hash->bh_lock);
536
537 /* Attempt to get the semaphore without sleeping,
538 * if this does not work then we need to drop the
539 * spinlock and do a hard attempt on the semaphore.
540 */
ce8e922c
NS
541 if (down_trylock(&bp->b_sema)) {
542 if (!(flags & XBF_TRYLOCK)) {
1da177e4 543 /* wait for buffer ownership */
ce8e922c
NS
544 XB_TRACE(bp, "get_lock", 0);
545 xfs_buf_lock(bp);
546 XFS_STATS_INC(xb_get_locked_waited);
1da177e4
LT
547 } else {
548 /* We asked for a trylock and failed, no need
549 * to look at file offset and length here, we
ce8e922c
NS
550 * know that this buffer at least overlaps our
551 * buffer and is locked, therefore our buffer
552 * either does not exist, or is this buffer.
1da177e4 553 */
ce8e922c
NS
554 xfs_buf_rele(bp);
555 XFS_STATS_INC(xb_busy_locked);
556 return NULL;
1da177e4
LT
557 }
558 } else {
559 /* trylock worked */
ce8e922c 560 XB_SET_OWNER(bp);
1da177e4
LT
561 }
562
ce8e922c
NS
563 if (bp->b_flags & XBF_STALE) {
564 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
565 bp->b_flags &= XBF_MAPPED;
2f926587 566 }
ce8e922c
NS
567 XB_TRACE(bp, "got_lock", 0);
568 XFS_STATS_INC(xb_get_locked);
569 return bp;
1da177e4
LT
570}
571
572/*
ce8e922c 573 * Assembles a buffer covering the specified range.
1da177e4
LT
574 * Storage in memory for all portions of the buffer will be allocated,
575 * although backing storage may not be.
576 */
577xfs_buf_t *
ce8e922c 578xfs_buf_get_flags(
1da177e4 579 xfs_buftarg_t *target,/* target for buffer */
204ab25f 580 xfs_off_t ioff, /* starting offset of range */
1da177e4 581 size_t isize, /* length of range */
ce8e922c 582 xfs_buf_flags_t flags)
1da177e4 583{
ce8e922c 584 xfs_buf_t *bp, *new_bp;
1da177e4
LT
585 int error = 0, i;
586
ce8e922c
NS
587 new_bp = xfs_buf_allocate(flags);
588 if (unlikely(!new_bp))
1da177e4
LT
589 return NULL;
590
ce8e922c
NS
591 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
592 if (bp == new_bp) {
593 error = _xfs_buf_lookup_pages(bp, flags);
1da177e4
LT
594 if (error)
595 goto no_buffer;
596 } else {
ce8e922c
NS
597 xfs_buf_deallocate(new_bp);
598 if (unlikely(bp == NULL))
1da177e4
LT
599 return NULL;
600 }
601
ce8e922c
NS
602 for (i = 0; i < bp->b_page_count; i++)
603 mark_page_accessed(bp->b_pages[i]);
1da177e4 604
ce8e922c
NS
605 if (!(bp->b_flags & XBF_MAPPED)) {
606 error = _xfs_buf_map_pages(bp, flags);
1da177e4
LT
607 if (unlikely(error)) {
608 printk(KERN_WARNING "%s: failed to map pages\n",
34a622b2 609 __func__);
1da177e4
LT
610 goto no_buffer;
611 }
612 }
613
ce8e922c 614 XFS_STATS_INC(xb_get);
1da177e4
LT
615
616 /*
617 * Always fill in the block number now, the mapped cases can do
618 * their own overlay of this later.
619 */
ce8e922c
NS
620 bp->b_bn = ioff;
621 bp->b_count_desired = bp->b_buffer_length;
1da177e4 622
ce8e922c
NS
623 XB_TRACE(bp, "get", (unsigned long)flags);
624 return bp;
1da177e4
LT
625
626 no_buffer:
ce8e922c
NS
627 if (flags & (XBF_LOCK | XBF_TRYLOCK))
628 xfs_buf_unlock(bp);
629 xfs_buf_rele(bp);
1da177e4
LT
630 return NULL;
631}
632
633xfs_buf_t *
634xfs_buf_read_flags(
635 xfs_buftarg_t *target,
204ab25f 636 xfs_off_t ioff,
1da177e4 637 size_t isize,
ce8e922c 638 xfs_buf_flags_t flags)
1da177e4 639{
ce8e922c
NS
640 xfs_buf_t *bp;
641
642 flags |= XBF_READ;
643
644 bp = xfs_buf_get_flags(target, ioff, isize, flags);
645 if (bp) {
646 if (!XFS_BUF_ISDONE(bp)) {
647 XB_TRACE(bp, "read", (unsigned long)flags);
648 XFS_STATS_INC(xb_get_read);
649 xfs_buf_iostart(bp, flags);
650 } else if (flags & XBF_ASYNC) {
651 XB_TRACE(bp, "read_async", (unsigned long)flags);
1da177e4
LT
652 /*
653 * Read ahead call which is already satisfied,
654 * drop the buffer
655 */
656 goto no_buffer;
657 } else {
ce8e922c 658 XB_TRACE(bp, "read_done", (unsigned long)flags);
1da177e4 659 /* We do not want read in the flags */
ce8e922c 660 bp->b_flags &= ~XBF_READ;
1da177e4
LT
661 }
662 }
663
ce8e922c 664 return bp;
1da177e4
LT
665
666 no_buffer:
ce8e922c
NS
667 if (flags & (XBF_LOCK | XBF_TRYLOCK))
668 xfs_buf_unlock(bp);
669 xfs_buf_rele(bp);
1da177e4
LT
670 return NULL;
671}
672
1da177e4 673/*
ce8e922c
NS
674 * If we are not low on memory then do the readahead in a deadlock
675 * safe manner.
1da177e4
LT
676 */
677void
ce8e922c 678xfs_buf_readahead(
1da177e4 679 xfs_buftarg_t *target,
204ab25f 680 xfs_off_t ioff,
1da177e4 681 size_t isize,
ce8e922c 682 xfs_buf_flags_t flags)
1da177e4
LT
683{
684 struct backing_dev_info *bdi;
685
ce8e922c 686 bdi = target->bt_mapping->backing_dev_info;
1da177e4
LT
687 if (bdi_read_congested(bdi))
688 return;
689
ce8e922c 690 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
1da177e4
LT
691 xfs_buf_read_flags(target, ioff, isize, flags);
692}
693
694xfs_buf_t *
ce8e922c 695xfs_buf_get_empty(
1da177e4
LT
696 size_t len,
697 xfs_buftarg_t *target)
698{
ce8e922c 699 xfs_buf_t *bp;
1da177e4 700
ce8e922c
NS
701 bp = xfs_buf_allocate(0);
702 if (bp)
703 _xfs_buf_initialize(bp, target, 0, len, 0);
704 return bp;
1da177e4
LT
705}
706
707static inline struct page *
708mem_to_page(
709 void *addr)
710{
9e2779fa 711 if ((!is_vmalloc_addr(addr))) {
1da177e4
LT
712 return virt_to_page(addr);
713 } else {
714 return vmalloc_to_page(addr);
715 }
716}
717
718int
ce8e922c
NS
719xfs_buf_associate_memory(
720 xfs_buf_t *bp,
1da177e4
LT
721 void *mem,
722 size_t len)
723{
724 int rval;
725 int i = 0;
d1afb678
LM
726 unsigned long pageaddr;
727 unsigned long offset;
728 size_t buflen;
1da177e4
LT
729 int page_count;
730
d1afb678
LM
731 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
732 offset = (unsigned long)mem - pageaddr;
733 buflen = PAGE_CACHE_ALIGN(len + offset);
734 page_count = buflen >> PAGE_CACHE_SHIFT;
1da177e4
LT
735
736 /* Free any previous set of page pointers */
ce8e922c
NS
737 if (bp->b_pages)
738 _xfs_buf_free_pages(bp);
1da177e4 739
ce8e922c
NS
740 bp->b_pages = NULL;
741 bp->b_addr = mem;
1da177e4 742
ce8e922c 743 rval = _xfs_buf_get_pages(bp, page_count, 0);
1da177e4
LT
744 if (rval)
745 return rval;
746
ce8e922c 747 bp->b_offset = offset;
d1afb678
LM
748
749 for (i = 0; i < bp->b_page_count; i++) {
750 bp->b_pages[i] = mem_to_page((void *)pageaddr);
751 pageaddr += PAGE_CACHE_SIZE;
1da177e4 752 }
1da177e4 753
d1afb678
LM
754 bp->b_count_desired = len;
755 bp->b_buffer_length = buflen;
ce8e922c 756 bp->b_flags |= XBF_MAPPED;
6ab455ee 757 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1da177e4
LT
758
759 return 0;
760}
761
762xfs_buf_t *
ce8e922c 763xfs_buf_get_noaddr(
1da177e4
LT
764 size_t len,
765 xfs_buftarg_t *target)
766{
1fa40b01
CH
767 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
768 int error, i;
1da177e4 769 xfs_buf_t *bp;
1da177e4 770
ce8e922c 771 bp = xfs_buf_allocate(0);
1da177e4
LT
772 if (unlikely(bp == NULL))
773 goto fail;
ce8e922c 774 _xfs_buf_initialize(bp, target, 0, len, 0);
1da177e4 775
1fa40b01
CH
776 error = _xfs_buf_get_pages(bp, page_count, 0);
777 if (error)
1da177e4
LT
778 goto fail_free_buf;
779
1fa40b01
CH
780 for (i = 0; i < page_count; i++) {
781 bp->b_pages[i] = alloc_page(GFP_KERNEL);
782 if (!bp->b_pages[i])
783 goto fail_free_mem;
1da177e4 784 }
1fa40b01 785 bp->b_flags |= _XBF_PAGES;
1da177e4 786
1fa40b01
CH
787 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
788 if (unlikely(error)) {
789 printk(KERN_WARNING "%s: failed to map pages\n",
34a622b2 790 __func__);
1da177e4 791 goto fail_free_mem;
1fa40b01 792 }
1da177e4 793
ce8e922c 794 xfs_buf_unlock(bp);
1da177e4 795
1fa40b01 796 XB_TRACE(bp, "no_daddr", len);
1da177e4 797 return bp;
1fa40b01 798
1da177e4 799 fail_free_mem:
1fa40b01
CH
800 while (--i >= 0)
801 __free_page(bp->b_pages[i]);
ca165b88 802 _xfs_buf_free_pages(bp);
1da177e4 803 fail_free_buf:
ca165b88 804 xfs_buf_deallocate(bp);
1da177e4
LT
805 fail:
806 return NULL;
807}
808
809/*
1da177e4
LT
810 * Increment reference count on buffer, to hold the buffer concurrently
811 * with another thread which may release (free) the buffer asynchronously.
1da177e4
LT
812 * Must hold the buffer already to call this function.
813 */
814void
ce8e922c
NS
815xfs_buf_hold(
816 xfs_buf_t *bp)
1da177e4 817{
ce8e922c
NS
818 atomic_inc(&bp->b_hold);
819 XB_TRACE(bp, "hold", 0);
1da177e4
LT
820}
821
822/*
ce8e922c
NS
823 * Releases a hold on the specified buffer. If the
824 * the hold count is 1, calls xfs_buf_free.
1da177e4
LT
825 */
826void
ce8e922c
NS
827xfs_buf_rele(
828 xfs_buf_t *bp)
1da177e4 829{
ce8e922c 830 xfs_bufhash_t *hash = bp->b_hash;
1da177e4 831
ce8e922c 832 XB_TRACE(bp, "rele", bp->b_relse);
1da177e4 833
fad3aa1e
NS
834 if (unlikely(!hash)) {
835 ASSERT(!bp->b_relse);
836 if (atomic_dec_and_test(&bp->b_hold))
837 xfs_buf_free(bp);
838 return;
839 }
840
3790689f 841 ASSERT(atomic_read(&bp->b_hold) > 0);
ce8e922c
NS
842 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
843 if (bp->b_relse) {
844 atomic_inc(&bp->b_hold);
1da177e4 845 spin_unlock(&hash->bh_lock);
ce8e922c
NS
846 (*(bp->b_relse)) (bp);
847 } else if (bp->b_flags & XBF_FS_MANAGED) {
1da177e4 848 spin_unlock(&hash->bh_lock);
1da177e4 849 } else {
ce8e922c
NS
850 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
851 list_del_init(&bp->b_hash_list);
1da177e4 852 spin_unlock(&hash->bh_lock);
ce8e922c 853 xfs_buf_free(bp);
1da177e4
LT
854 }
855 }
856}
857
858
859/*
860 * Mutual exclusion on buffers. Locking model:
861 *
862 * Buffers associated with inodes for which buffer locking
863 * is not enabled are not protected by semaphores, and are
864 * assumed to be exclusively owned by the caller. There is a
865 * spinlock in the buffer, used by the caller when concurrent
866 * access is possible.
867 */
868
869/*
ce8e922c
NS
870 * Locks a buffer object, if it is not already locked.
871 * Note that this in no way locks the underlying pages, so it is only
872 * useful for synchronizing concurrent use of buffer objects, not for
873 * synchronizing independent access to the underlying pages.
1da177e4
LT
874 */
875int
ce8e922c
NS
876xfs_buf_cond_lock(
877 xfs_buf_t *bp)
1da177e4
LT
878{
879 int locked;
880
ce8e922c 881 locked = down_trylock(&bp->b_sema) == 0;
1da177e4 882 if (locked) {
ce8e922c 883 XB_SET_OWNER(bp);
1da177e4 884 }
ce8e922c
NS
885 XB_TRACE(bp, "cond_lock", (long)locked);
886 return locked ? 0 : -EBUSY;
1da177e4
LT
887}
888
889#if defined(DEBUG) || defined(XFS_BLI_TRACE)
1da177e4 890int
ce8e922c
NS
891xfs_buf_lock_value(
892 xfs_buf_t *bp)
1da177e4 893{
adaa693b 894 return bp->b_sema.count;
1da177e4
LT
895}
896#endif
897
898/*
ce8e922c
NS
899 * Locks a buffer object.
900 * Note that this in no way locks the underlying pages, so it is only
901 * useful for synchronizing concurrent use of buffer objects, not for
902 * synchronizing independent access to the underlying pages.
1da177e4 903 */
ce8e922c
NS
904void
905xfs_buf_lock(
906 xfs_buf_t *bp)
1da177e4 907{
ce8e922c
NS
908 XB_TRACE(bp, "lock", 0);
909 if (atomic_read(&bp->b_io_remaining))
910 blk_run_address_space(bp->b_target->bt_mapping);
911 down(&bp->b_sema);
912 XB_SET_OWNER(bp);
913 XB_TRACE(bp, "locked", 0);
1da177e4
LT
914}
915
916/*
ce8e922c 917 * Releases the lock on the buffer object.
2f926587 918 * If the buffer is marked delwri but is not queued, do so before we
ce8e922c 919 * unlock the buffer as we need to set flags correctly. We also need to
2f926587
DC
920 * take a reference for the delwri queue because the unlocker is going to
921 * drop their's and they don't know we just queued it.
1da177e4
LT
922 */
923void
ce8e922c
NS
924xfs_buf_unlock(
925 xfs_buf_t *bp)
1da177e4 926{
ce8e922c
NS
927 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
928 atomic_inc(&bp->b_hold);
929 bp->b_flags |= XBF_ASYNC;
930 xfs_buf_delwri_queue(bp, 0);
2f926587
DC
931 }
932
ce8e922c
NS
933 XB_CLEAR_OWNER(bp);
934 up(&bp->b_sema);
935 XB_TRACE(bp, "unlock", 0);
1da177e4
LT
936}
937
938
939/*
940 * Pinning Buffer Storage in Memory
ce8e922c 941 * Ensure that no attempt to force a buffer to disk will succeed.
1da177e4
LT
942 */
943void
ce8e922c
NS
944xfs_buf_pin(
945 xfs_buf_t *bp)
1da177e4 946{
ce8e922c
NS
947 atomic_inc(&bp->b_pin_count);
948 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
1da177e4
LT
949}
950
1da177e4 951void
ce8e922c
NS
952xfs_buf_unpin(
953 xfs_buf_t *bp)
1da177e4 954{
ce8e922c
NS
955 if (atomic_dec_and_test(&bp->b_pin_count))
956 wake_up_all(&bp->b_waiters);
957 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
1da177e4
LT
958}
959
960int
ce8e922c
NS
961xfs_buf_ispin(
962 xfs_buf_t *bp)
1da177e4 963{
ce8e922c 964 return atomic_read(&bp->b_pin_count);
1da177e4
LT
965}
966
ce8e922c
NS
967STATIC void
968xfs_buf_wait_unpin(
969 xfs_buf_t *bp)
1da177e4
LT
970{
971 DECLARE_WAITQUEUE (wait, current);
972
ce8e922c 973 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4
LT
974 return;
975
ce8e922c 976 add_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
977 for (;;) {
978 set_current_state(TASK_UNINTERRUPTIBLE);
ce8e922c 979 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4 980 break;
ce8e922c
NS
981 if (atomic_read(&bp->b_io_remaining))
982 blk_run_address_space(bp->b_target->bt_mapping);
1da177e4
LT
983 schedule();
984 }
ce8e922c 985 remove_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
986 set_current_state(TASK_RUNNING);
987}
988
989/*
990 * Buffer Utility Routines
991 */
992
1da177e4 993STATIC void
ce8e922c 994xfs_buf_iodone_work(
c4028958 995 struct work_struct *work)
1da177e4 996{
c4028958
DH
997 xfs_buf_t *bp =
998 container_of(work, xfs_buf_t, b_iodone_work);
1da177e4 999
0bfefc46
DC
1000 /*
1001 * We can get an EOPNOTSUPP to ordered writes. Here we clear the
1002 * ordered flag and reissue them. Because we can't tell the higher
1003 * layers directly that they should not issue ordered I/O anymore, they
1004 * need to check if the ordered flag was cleared during I/O completion.
1005 */
1006 if ((bp->b_error == EOPNOTSUPP) &&
1007 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
1008 XB_TRACE(bp, "ordered_retry", bp->b_iodone);
1009 bp->b_flags &= ~XBF_ORDERED;
1010 xfs_buf_iorequest(bp);
1011 } else if (bp->b_iodone)
ce8e922c
NS
1012 (*(bp->b_iodone))(bp);
1013 else if (bp->b_flags & XBF_ASYNC)
1da177e4
LT
1014 xfs_buf_relse(bp);
1015}
1016
1017void
ce8e922c
NS
1018xfs_buf_ioend(
1019 xfs_buf_t *bp,
1da177e4
LT
1020 int schedule)
1021{
77be55a5 1022 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
ce8e922c
NS
1023 if (bp->b_error == 0)
1024 bp->b_flags |= XBF_DONE;
1da177e4 1025
ce8e922c 1026 XB_TRACE(bp, "iodone", bp->b_iodone);
1da177e4 1027
ce8e922c 1028 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1da177e4 1029 if (schedule) {
c4028958 1030 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
ce8e922c 1031 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1da177e4 1032 } else {
c4028958 1033 xfs_buf_iodone_work(&bp->b_iodone_work);
1da177e4
LT
1034 }
1035 } else {
b4dd330b 1036 complete(&bp->b_iowait);
1da177e4
LT
1037 }
1038}
1039
1da177e4 1040void
ce8e922c
NS
1041xfs_buf_ioerror(
1042 xfs_buf_t *bp,
1043 int error)
1da177e4
LT
1044{
1045 ASSERT(error >= 0 && error <= 0xffff);
ce8e922c
NS
1046 bp->b_error = (unsigned short)error;
1047 XB_TRACE(bp, "ioerror", (unsigned long)error);
1da177e4
LT
1048}
1049
1050/*
ce8e922c
NS
1051 * Initiate I/O on a buffer, based on the flags supplied.
1052 * The b_iodone routine in the buffer supplied will only be called
1da177e4 1053 * when all of the subsidiary I/O requests, if any, have been completed.
1da177e4
LT
1054 */
1055int
ce8e922c
NS
1056xfs_buf_iostart(
1057 xfs_buf_t *bp,
1058 xfs_buf_flags_t flags)
1da177e4
LT
1059{
1060 int status = 0;
1061
ce8e922c 1062 XB_TRACE(bp, "iostart", (unsigned long)flags);
1da177e4 1063
ce8e922c
NS
1064 if (flags & XBF_DELWRI) {
1065 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1066 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1067 xfs_buf_delwri_queue(bp, 1);
958d4ec6 1068 return 0;
1da177e4
LT
1069 }
1070
ce8e922c
NS
1071 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1072 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1073 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1074 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1da177e4 1075
ce8e922c 1076 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1da177e4
LT
1077
1078 /* For writes allow an alternate strategy routine to precede
1079 * the actual I/O request (which may not be issued at all in
1080 * a shutdown situation, for example).
1081 */
ce8e922c
NS
1082 status = (flags & XBF_WRITE) ?
1083 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1da177e4
LT
1084
1085 /* Wait for I/O if we are not an async request.
1086 * Note: async I/O request completion will release the buffer,
1087 * and that can already be done by this point. So using the
1088 * buffer pointer from here on, after async I/O, is invalid.
1089 */
ce8e922c
NS
1090 if (!status && !(flags & XBF_ASYNC))
1091 status = xfs_buf_iowait(bp);
1da177e4
LT
1092
1093 return status;
1094}
1095
7989cb8e 1096STATIC_INLINE void
ce8e922c
NS
1097_xfs_buf_ioend(
1098 xfs_buf_t *bp,
1da177e4
LT
1099 int schedule)
1100{
6ab455ee
CH
1101 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1102 bp->b_flags &= ~_XBF_PAGE_LOCKED;
ce8e922c 1103 xfs_buf_ioend(bp, schedule);
6ab455ee 1104 }
1da177e4
LT
1105}
1106
782e3b3b 1107STATIC void
ce8e922c 1108xfs_buf_bio_end_io(
1da177e4 1109 struct bio *bio,
1da177e4
LT
1110 int error)
1111{
ce8e922c
NS
1112 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1113 unsigned int blocksize = bp->b_target->bt_bsize;
eedb5530 1114 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1da177e4 1115
1da177e4 1116 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
ce8e922c 1117 bp->b_error = EIO;
1da177e4 1118
eedb5530 1119 do {
1da177e4
LT
1120 struct page *page = bvec->bv_page;
1121
948ecdb4 1122 ASSERT(!PagePrivate(page));
ce8e922c
NS
1123 if (unlikely(bp->b_error)) {
1124 if (bp->b_flags & XBF_READ)
eedb5530 1125 ClearPageUptodate(page);
ce8e922c 1126 } else if (blocksize >= PAGE_CACHE_SIZE) {
1da177e4
LT
1127 SetPageUptodate(page);
1128 } else if (!PagePrivate(page) &&
ce8e922c 1129 (bp->b_flags & _XBF_PAGE_CACHE)) {
1da177e4
LT
1130 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1131 }
1132
eedb5530
NS
1133 if (--bvec >= bio->bi_io_vec)
1134 prefetchw(&bvec->bv_page->flags);
6ab455ee
CH
1135
1136 if (bp->b_flags & _XBF_PAGE_LOCKED)
1137 unlock_page(page);
eedb5530 1138 } while (bvec >= bio->bi_io_vec);
1da177e4 1139
ce8e922c 1140 _xfs_buf_ioend(bp, 1);
1da177e4 1141 bio_put(bio);
1da177e4
LT
1142}
1143
1144STATIC void
ce8e922c
NS
1145_xfs_buf_ioapply(
1146 xfs_buf_t *bp)
1da177e4 1147{
a9759f2d 1148 int rw, map_i, total_nr_pages, nr_pages;
1da177e4 1149 struct bio *bio;
ce8e922c
NS
1150 int offset = bp->b_offset;
1151 int size = bp->b_count_desired;
1152 sector_t sector = bp->b_bn;
1153 unsigned int blocksize = bp->b_target->bt_bsize;
1da177e4 1154
ce8e922c 1155 total_nr_pages = bp->b_page_count;
1da177e4
LT
1156 map_i = 0;
1157
ce8e922c
NS
1158 if (bp->b_flags & XBF_ORDERED) {
1159 ASSERT(!(bp->b_flags & XBF_READ));
f538d4da 1160 rw = WRITE_BARRIER;
51bdd706
NS
1161 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1162 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1163 bp->b_flags &= ~_XBF_RUN_QUEUES;
1164 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1165 } else {
1166 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1167 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
f538d4da
CH
1168 }
1169
ce8e922c 1170 /* Special code path for reading a sub page size buffer in --
1da177e4
LT
1171 * we populate up the whole page, and hence the other metadata
1172 * in the same page. This optimization is only valid when the
ce8e922c 1173 * filesystem block size is not smaller than the page size.
1da177e4 1174 */
ce8e922c 1175 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
6ab455ee
CH
1176 ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1177 (XBF_READ|_XBF_PAGE_LOCKED)) &&
ce8e922c 1178 (blocksize >= PAGE_CACHE_SIZE)) {
1da177e4
LT
1179 bio = bio_alloc(GFP_NOIO, 1);
1180
ce8e922c 1181 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1182 bio->bi_sector = sector - (offset >> BBSHIFT);
ce8e922c
NS
1183 bio->bi_end_io = xfs_buf_bio_end_io;
1184 bio->bi_private = bp;
1da177e4 1185
ce8e922c 1186 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1da177e4
LT
1187 size = 0;
1188
ce8e922c 1189 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1190
1191 goto submit_io;
1192 }
1193
1da177e4 1194next_chunk:
ce8e922c 1195 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1196 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1197 if (nr_pages > total_nr_pages)
1198 nr_pages = total_nr_pages;
1199
1200 bio = bio_alloc(GFP_NOIO, nr_pages);
ce8e922c 1201 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1202 bio->bi_sector = sector;
ce8e922c
NS
1203 bio->bi_end_io = xfs_buf_bio_end_io;
1204 bio->bi_private = bp;
1da177e4
LT
1205
1206 for (; size && nr_pages; nr_pages--, map_i++) {
ce8e922c 1207 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1da177e4
LT
1208
1209 if (nbytes > size)
1210 nbytes = size;
1211
ce8e922c
NS
1212 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1213 if (rbytes < nbytes)
1da177e4
LT
1214 break;
1215
1216 offset = 0;
1217 sector += nbytes >> BBSHIFT;
1218 size -= nbytes;
1219 total_nr_pages--;
1220 }
1221
1222submit_io:
1223 if (likely(bio->bi_size)) {
1224 submit_bio(rw, bio);
1225 if (size)
1226 goto next_chunk;
1227 } else {
1228 bio_put(bio);
ce8e922c 1229 xfs_buf_ioerror(bp, EIO);
1da177e4
LT
1230 }
1231}
1232
1da177e4 1233int
ce8e922c
NS
1234xfs_buf_iorequest(
1235 xfs_buf_t *bp)
1da177e4 1236{
ce8e922c 1237 XB_TRACE(bp, "iorequest", 0);
1da177e4 1238
ce8e922c
NS
1239 if (bp->b_flags & XBF_DELWRI) {
1240 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1241 return 0;
1242 }
1243
ce8e922c
NS
1244 if (bp->b_flags & XBF_WRITE) {
1245 xfs_buf_wait_unpin(bp);
1da177e4
LT
1246 }
1247
ce8e922c 1248 xfs_buf_hold(bp);
1da177e4
LT
1249
1250 /* Set the count to 1 initially, this will stop an I/O
1251 * completion callout which happens before we have started
ce8e922c 1252 * all the I/O from calling xfs_buf_ioend too early.
1da177e4 1253 */
ce8e922c
NS
1254 atomic_set(&bp->b_io_remaining, 1);
1255 _xfs_buf_ioapply(bp);
1256 _xfs_buf_ioend(bp, 0);
1da177e4 1257
ce8e922c 1258 xfs_buf_rele(bp);
1da177e4
LT
1259 return 0;
1260}
1261
1262/*
ce8e922c
NS
1263 * Waits for I/O to complete on the buffer supplied.
1264 * It returns immediately if no I/O is pending.
1265 * It returns the I/O error code, if any, or 0 if there was no error.
1da177e4
LT
1266 */
1267int
ce8e922c
NS
1268xfs_buf_iowait(
1269 xfs_buf_t *bp)
1da177e4 1270{
ce8e922c
NS
1271 XB_TRACE(bp, "iowait", 0);
1272 if (atomic_read(&bp->b_io_remaining))
1273 blk_run_address_space(bp->b_target->bt_mapping);
b4dd330b 1274 wait_for_completion(&bp->b_iowait);
ce8e922c
NS
1275 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1276 return bp->b_error;
1da177e4
LT
1277}
1278
ce8e922c
NS
1279xfs_caddr_t
1280xfs_buf_offset(
1281 xfs_buf_t *bp,
1da177e4
LT
1282 size_t offset)
1283{
1284 struct page *page;
1285
ce8e922c
NS
1286 if (bp->b_flags & XBF_MAPPED)
1287 return XFS_BUF_PTR(bp) + offset;
1da177e4 1288
ce8e922c
NS
1289 offset += bp->b_offset;
1290 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1291 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1da177e4
LT
1292}
1293
1294/*
1da177e4
LT
1295 * Move data into or out of a buffer.
1296 */
1297void
ce8e922c
NS
1298xfs_buf_iomove(
1299 xfs_buf_t *bp, /* buffer to process */
1da177e4
LT
1300 size_t boff, /* starting buffer offset */
1301 size_t bsize, /* length to copy */
1302 caddr_t data, /* data address */
ce8e922c 1303 xfs_buf_rw_t mode) /* read/write/zero flag */
1da177e4
LT
1304{
1305 size_t bend, cpoff, csize;
1306 struct page *page;
1307
1308 bend = boff + bsize;
1309 while (boff < bend) {
ce8e922c
NS
1310 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1311 cpoff = xfs_buf_poff(boff + bp->b_offset);
1da177e4 1312 csize = min_t(size_t,
ce8e922c 1313 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1da177e4
LT
1314
1315 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1316
1317 switch (mode) {
ce8e922c 1318 case XBRW_ZERO:
1da177e4
LT
1319 memset(page_address(page) + cpoff, 0, csize);
1320 break;
ce8e922c 1321 case XBRW_READ:
1da177e4
LT
1322 memcpy(data, page_address(page) + cpoff, csize);
1323 break;
ce8e922c 1324 case XBRW_WRITE:
1da177e4
LT
1325 memcpy(page_address(page) + cpoff, data, csize);
1326 }
1327
1328 boff += csize;
1329 data += csize;
1330 }
1331}
1332
1333/*
ce8e922c 1334 * Handling of buffer targets (buftargs).
1da177e4
LT
1335 */
1336
1337/*
ce8e922c
NS
1338 * Wait for any bufs with callbacks that have been submitted but
1339 * have not yet returned... walk the hash list for the target.
1da177e4
LT
1340 */
1341void
1342xfs_wait_buftarg(
1343 xfs_buftarg_t *btp)
1344{
1345 xfs_buf_t *bp, *n;
1346 xfs_bufhash_t *hash;
1347 uint i;
1348
1349 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1350 hash = &btp->bt_hash[i];
1351again:
1352 spin_lock(&hash->bh_lock);
ce8e922c
NS
1353 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1354 ASSERT(btp == bp->b_target);
1355 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1da177e4 1356 spin_unlock(&hash->bh_lock);
2f926587
DC
1357 /*
1358 * Catch superblock reference count leaks
1359 * immediately
1360 */
ce8e922c 1361 BUG_ON(bp->b_bn == 0);
1da177e4
LT
1362 delay(100);
1363 goto again;
1364 }
1365 }
1366 spin_unlock(&hash->bh_lock);
1367 }
1368}
1369
1370/*
ce8e922c
NS
1371 * Allocate buffer hash table for a given target.
1372 * For devices containing metadata (i.e. not the log/realtime devices)
1373 * we need to allocate a much larger hash table.
1da177e4
LT
1374 */
1375STATIC void
1376xfs_alloc_bufhash(
1377 xfs_buftarg_t *btp,
1378 int external)
1379{
1380 unsigned int i;
1381
1382 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1383 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1384 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
93c189c1 1385 sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
1da177e4
LT
1386 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1387 spin_lock_init(&btp->bt_hash[i].bh_lock);
1388 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1389 }
1390}
1391
1392STATIC void
1393xfs_free_bufhash(
1394 xfs_buftarg_t *btp)
1395{
f0e2d93c 1396 kmem_free(btp->bt_hash);
1da177e4
LT
1397 btp->bt_hash = NULL;
1398}
1399
a6867a68 1400/*
ce8e922c 1401 * buftarg list for delwrite queue processing
a6867a68 1402 */
e6a0e9cd 1403static LIST_HEAD(xfs_buftarg_list);
7989cb8e 1404static DEFINE_SPINLOCK(xfs_buftarg_lock);
a6867a68
DC
1405
1406STATIC void
1407xfs_register_buftarg(
1408 xfs_buftarg_t *btp)
1409{
1410 spin_lock(&xfs_buftarg_lock);
1411 list_add(&btp->bt_list, &xfs_buftarg_list);
1412 spin_unlock(&xfs_buftarg_lock);
1413}
1414
1415STATIC void
1416xfs_unregister_buftarg(
1417 xfs_buftarg_t *btp)
1418{
1419 spin_lock(&xfs_buftarg_lock);
1420 list_del(&btp->bt_list);
1421 spin_unlock(&xfs_buftarg_lock);
1422}
1423
1da177e4
LT
1424void
1425xfs_free_buftarg(
19f354d4 1426 xfs_buftarg_t *btp)
1da177e4
LT
1427{
1428 xfs_flush_buftarg(btp, 1);
f4a9f28a 1429 xfs_blkdev_issue_flush(btp);
1da177e4 1430 xfs_free_bufhash(btp);
ce8e922c 1431 iput(btp->bt_mapping->host);
a6867a68 1432
ce8e922c
NS
1433 /* Unregister the buftarg first so that we don't get a
1434 * wakeup finding a non-existent task
1435 */
a6867a68
DC
1436 xfs_unregister_buftarg(btp);
1437 kthread_stop(btp->bt_task);
1438
f0e2d93c 1439 kmem_free(btp);
1da177e4
LT
1440}
1441
1da177e4
LT
1442STATIC int
1443xfs_setsize_buftarg_flags(
1444 xfs_buftarg_t *btp,
1445 unsigned int blocksize,
1446 unsigned int sectorsize,
1447 int verbose)
1448{
ce8e922c
NS
1449 btp->bt_bsize = blocksize;
1450 btp->bt_sshift = ffs(sectorsize) - 1;
1451 btp->bt_smask = sectorsize - 1;
1da177e4 1452
ce8e922c 1453 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1da177e4
LT
1454 printk(KERN_WARNING
1455 "XFS: Cannot set_blocksize to %u on device %s\n",
1456 sectorsize, XFS_BUFTARG_NAME(btp));
1457 return EINVAL;
1458 }
1459
1460 if (verbose &&
1461 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1462 printk(KERN_WARNING
1463 "XFS: %u byte sectors in use on device %s. "
1464 "This is suboptimal; %u or greater is ideal.\n",
1465 sectorsize, XFS_BUFTARG_NAME(btp),
1466 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1467 }
1468
1469 return 0;
1470}
1471
1472/*
ce8e922c
NS
1473 * When allocating the initial buffer target we have not yet
1474 * read in the superblock, so don't know what sized sectors
1475 * are being used is at this early stage. Play safe.
1476 */
1da177e4
LT
1477STATIC int
1478xfs_setsize_buftarg_early(
1479 xfs_buftarg_t *btp,
1480 struct block_device *bdev)
1481{
1482 return xfs_setsize_buftarg_flags(btp,
1483 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1484}
1485
1486int
1487xfs_setsize_buftarg(
1488 xfs_buftarg_t *btp,
1489 unsigned int blocksize,
1490 unsigned int sectorsize)
1491{
1492 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1493}
1494
1495STATIC int
1496xfs_mapping_buftarg(
1497 xfs_buftarg_t *btp,
1498 struct block_device *bdev)
1499{
1500 struct backing_dev_info *bdi;
1501 struct inode *inode;
1502 struct address_space *mapping;
f5e54d6e 1503 static const struct address_space_operations mapping_aops = {
1da177e4 1504 .sync_page = block_sync_page,
e965f963 1505 .migratepage = fail_migrate_page,
1da177e4
LT
1506 };
1507
1508 inode = new_inode(bdev->bd_inode->i_sb);
1509 if (!inode) {
1510 printk(KERN_WARNING
1511 "XFS: Cannot allocate mapping inode for device %s\n",
1512 XFS_BUFTARG_NAME(btp));
1513 return ENOMEM;
1514 }
1515 inode->i_mode = S_IFBLK;
1516 inode->i_bdev = bdev;
1517 inode->i_rdev = bdev->bd_dev;
1518 bdi = blk_get_backing_dev_info(bdev);
1519 if (!bdi)
1520 bdi = &default_backing_dev_info;
1521 mapping = &inode->i_data;
1522 mapping->a_ops = &mapping_aops;
1523 mapping->backing_dev_info = bdi;
1524 mapping_set_gfp_mask(mapping, GFP_NOFS);
ce8e922c 1525 btp->bt_mapping = mapping;
1da177e4
LT
1526 return 0;
1527}
1528
a6867a68
DC
1529STATIC int
1530xfs_alloc_delwrite_queue(
1531 xfs_buftarg_t *btp)
1532{
1533 int error = 0;
1534
1535 INIT_LIST_HEAD(&btp->bt_list);
1536 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
007c61c6 1537 spin_lock_init(&btp->bt_delwrite_lock);
a6867a68
DC
1538 btp->bt_flags = 0;
1539 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1540 if (IS_ERR(btp->bt_task)) {
1541 error = PTR_ERR(btp->bt_task);
1542 goto out_error;
1543 }
1544 xfs_register_buftarg(btp);
1545out_error:
1546 return error;
1547}
1548
1da177e4
LT
1549xfs_buftarg_t *
1550xfs_alloc_buftarg(
1551 struct block_device *bdev,
1552 int external)
1553{
1554 xfs_buftarg_t *btp;
1555
1556 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1557
ce8e922c
NS
1558 btp->bt_dev = bdev->bd_dev;
1559 btp->bt_bdev = bdev;
1da177e4
LT
1560 if (xfs_setsize_buftarg_early(btp, bdev))
1561 goto error;
1562 if (xfs_mapping_buftarg(btp, bdev))
1563 goto error;
a6867a68
DC
1564 if (xfs_alloc_delwrite_queue(btp))
1565 goto error;
1da177e4
LT
1566 xfs_alloc_bufhash(btp, external);
1567 return btp;
1568
1569error:
f0e2d93c 1570 kmem_free(btp);
1da177e4
LT
1571 return NULL;
1572}
1573
1574
1575/*
ce8e922c 1576 * Delayed write buffer handling
1da177e4 1577 */
1da177e4 1578STATIC void
ce8e922c
NS
1579xfs_buf_delwri_queue(
1580 xfs_buf_t *bp,
1da177e4
LT
1581 int unlock)
1582{
ce8e922c
NS
1583 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1584 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
a6867a68 1585
ce8e922c
NS
1586 XB_TRACE(bp, "delwri_q", (long)unlock);
1587 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1da177e4 1588
a6867a68 1589 spin_lock(dwlk);
1da177e4 1590 /* If already in the queue, dequeue and place at tail */
ce8e922c
NS
1591 if (!list_empty(&bp->b_list)) {
1592 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1593 if (unlock)
1594 atomic_dec(&bp->b_hold);
1595 list_del(&bp->b_list);
1da177e4
LT
1596 }
1597
ce8e922c
NS
1598 bp->b_flags |= _XBF_DELWRI_Q;
1599 list_add_tail(&bp->b_list, dwq);
1600 bp->b_queuetime = jiffies;
a6867a68 1601 spin_unlock(dwlk);
1da177e4
LT
1602
1603 if (unlock)
ce8e922c 1604 xfs_buf_unlock(bp);
1da177e4
LT
1605}
1606
1607void
ce8e922c
NS
1608xfs_buf_delwri_dequeue(
1609 xfs_buf_t *bp)
1da177e4 1610{
ce8e922c 1611 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1da177e4
LT
1612 int dequeued = 0;
1613
a6867a68 1614 spin_lock(dwlk);
ce8e922c
NS
1615 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1616 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1617 list_del_init(&bp->b_list);
1da177e4
LT
1618 dequeued = 1;
1619 }
ce8e922c 1620 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
a6867a68 1621 spin_unlock(dwlk);
1da177e4
LT
1622
1623 if (dequeued)
ce8e922c 1624 xfs_buf_rele(bp);
1da177e4 1625
ce8e922c 1626 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1da177e4
LT
1627}
1628
1629STATIC void
ce8e922c 1630xfs_buf_runall_queues(
1da177e4
LT
1631 struct workqueue_struct *queue)
1632{
1633 flush_workqueue(queue);
1634}
1635
1da177e4 1636STATIC int
23ea4032 1637xfsbufd_wakeup(
15c84a47
NS
1638 int priority,
1639 gfp_t mask)
1da177e4 1640{
da7f93e9 1641 xfs_buftarg_t *btp;
a6867a68
DC
1642
1643 spin_lock(&xfs_buftarg_lock);
da7f93e9 1644 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
ce8e922c 1645 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
a6867a68 1646 continue;
ce8e922c 1647 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
a6867a68
DC
1648 wake_up_process(btp->bt_task);
1649 }
1650 spin_unlock(&xfs_buftarg_lock);
1da177e4
LT
1651 return 0;
1652}
1653
585e6d88
DC
1654/*
1655 * Move as many buffers as specified to the supplied list
1656 * idicating if we skipped any buffers to prevent deadlocks.
1657 */
1658STATIC int
1659xfs_buf_delwri_split(
1660 xfs_buftarg_t *target,
1661 struct list_head *list,
5e6a07df 1662 unsigned long age)
585e6d88
DC
1663{
1664 xfs_buf_t *bp, *n;
1665 struct list_head *dwq = &target->bt_delwrite_queue;
1666 spinlock_t *dwlk = &target->bt_delwrite_lock;
1667 int skipped = 0;
5e6a07df 1668 int force;
585e6d88 1669
5e6a07df 1670 force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
585e6d88
DC
1671 INIT_LIST_HEAD(list);
1672 spin_lock(dwlk);
1673 list_for_each_entry_safe(bp, n, dwq, b_list) {
1674 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1675 ASSERT(bp->b_flags & XBF_DELWRI);
1676
1677 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
5e6a07df 1678 if (!force &&
585e6d88
DC
1679 time_before(jiffies, bp->b_queuetime + age)) {
1680 xfs_buf_unlock(bp);
1681 break;
1682 }
1683
1684 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1685 _XBF_RUN_QUEUES);
1686 bp->b_flags |= XBF_WRITE;
1687 list_move_tail(&bp->b_list, list);
1688 } else
1689 skipped++;
1690 }
1691 spin_unlock(dwlk);
1692
1693 return skipped;
1694
1695}
1696
1da177e4 1697STATIC int
23ea4032 1698xfsbufd(
585e6d88 1699 void *data)
1da177e4 1700{
585e6d88
DC
1701 struct list_head tmp;
1702 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1703 int count;
1704 xfs_buf_t *bp;
1da177e4 1705
1da177e4
LT
1706 current->flags |= PF_MEMALLOC;
1707
978c7b2f
RW
1708 set_freezable();
1709
1da177e4 1710 do {
3e1d1d28 1711 if (unlikely(freezing(current))) {
ce8e922c 1712 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
3e1d1d28 1713 refrigerator();
abd0cf7a 1714 } else {
ce8e922c 1715 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
abd0cf7a 1716 }
1da177e4 1717
15c84a47
NS
1718 schedule_timeout_interruptible(
1719 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1da177e4 1720
585e6d88 1721 xfs_buf_delwri_split(target, &tmp,
5e6a07df 1722 xfs_buf_age_centisecs * msecs_to_jiffies(10));
1da177e4 1723
585e6d88 1724 count = 0;
1da177e4 1725 while (!list_empty(&tmp)) {
ce8e922c
NS
1726 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1727 ASSERT(target == bp->b_target);
1da177e4 1728
ce8e922c
NS
1729 list_del_init(&bp->b_list);
1730 xfs_buf_iostrategy(bp);
585e6d88 1731 count++;
1da177e4
LT
1732 }
1733
1734 if (as_list_len > 0)
1735 purge_addresses();
f07c2250
NS
1736 if (count)
1737 blk_run_address_space(target->bt_mapping);
1da177e4 1738
4df08c52 1739 } while (!kthread_should_stop());
1da177e4 1740
4df08c52 1741 return 0;
1da177e4
LT
1742}
1743
1744/*
ce8e922c
NS
1745 * Go through all incore buffers, and release buffers if they belong to
1746 * the given device. This is used in filesystem error handling to
1747 * preserve the consistency of its metadata.
1da177e4
LT
1748 */
1749int
1750xfs_flush_buftarg(
585e6d88
DC
1751 xfs_buftarg_t *target,
1752 int wait)
1da177e4 1753{
585e6d88
DC
1754 struct list_head tmp;
1755 xfs_buf_t *bp, *n;
1756 int pincount = 0;
1da177e4 1757
ce8e922c
NS
1758 xfs_buf_runall_queues(xfsdatad_workqueue);
1759 xfs_buf_runall_queues(xfslogd_workqueue);
1da177e4 1760
5e6a07df
DC
1761 set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1762 pincount = xfs_buf_delwri_split(target, &tmp, 0);
1da177e4
LT
1763
1764 /*
1765 * Dropped the delayed write list lock, now walk the temporary list
1766 */
ce8e922c 1767 list_for_each_entry_safe(bp, n, &tmp, b_list) {
585e6d88 1768 ASSERT(target == bp->b_target);
1da177e4 1769 if (wait)
ce8e922c 1770 bp->b_flags &= ~XBF_ASYNC;
1da177e4 1771 else
ce8e922c 1772 list_del_init(&bp->b_list);
1da177e4 1773
ce8e922c 1774 xfs_buf_iostrategy(bp);
1da177e4
LT
1775 }
1776
f07c2250
NS
1777 if (wait)
1778 blk_run_address_space(target->bt_mapping);
1779
1da177e4
LT
1780 /*
1781 * Remaining list items must be flushed before returning
1782 */
1783 while (!list_empty(&tmp)) {
ce8e922c 1784 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1da177e4 1785
ce8e922c
NS
1786 list_del_init(&bp->b_list);
1787 xfs_iowait(bp);
1788 xfs_buf_relse(bp);
1da177e4
LT
1789 }
1790
1da177e4
LT
1791 return pincount;
1792}
1793
04d8b284 1794int __init
ce8e922c 1795xfs_buf_init(void)
1da177e4 1796{
ce8e922c 1797#ifdef XFS_BUF_TRACE
5695ef46 1798 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
04d8b284
CH
1799#endif
1800
8758280f
NS
1801 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1802 KM_ZONE_HWALIGN, NULL);
ce8e922c 1803 if (!xfs_buf_zone)
04d8b284
CH
1804 goto out_free_trace_buf;
1805
b4337692 1806 xfslogd_workqueue = create_workqueue("xfslogd");
23ea4032 1807 if (!xfslogd_workqueue)
04d8b284 1808 goto out_free_buf_zone;
1da177e4 1809
b4337692 1810 xfsdatad_workqueue = create_workqueue("xfsdatad");
23ea4032
CH
1811 if (!xfsdatad_workqueue)
1812 goto out_destroy_xfslogd_workqueue;
1da177e4 1813
8e1f936b 1814 register_shrinker(&xfs_buf_shake);
23ea4032 1815 return 0;
1da177e4 1816
23ea4032
CH
1817 out_destroy_xfslogd_workqueue:
1818 destroy_workqueue(xfslogd_workqueue);
23ea4032 1819 out_free_buf_zone:
ce8e922c 1820 kmem_zone_destroy(xfs_buf_zone);
04d8b284 1821 out_free_trace_buf:
ce8e922c
NS
1822#ifdef XFS_BUF_TRACE
1823 ktrace_free(xfs_buf_trace_buf);
23ea4032 1824#endif
8758280f 1825 return -ENOMEM;
1da177e4
LT
1826}
1827
1da177e4 1828void
ce8e922c 1829xfs_buf_terminate(void)
1da177e4 1830{
8e1f936b 1831 unregister_shrinker(&xfs_buf_shake);
04d8b284
CH
1832 destroy_workqueue(xfsdatad_workqueue);
1833 destroy_workqueue(xfslogd_workqueue);
ce8e922c
NS
1834 kmem_zone_destroy(xfs_buf_zone);
1835#ifdef XFS_BUF_TRACE
1836 ktrace_free(xfs_buf_trace_buf);
1da177e4 1837#endif
1da177e4 1838}
e6a0e9cd
TS
1839
1840#ifdef CONFIG_KDB_MODULES
1841struct list_head *
1842xfs_get_buftarg_list(void)
1843{
1844 return &xfs_buftarg_list;
1845}
1846#endif
This page took 0.464523 seconds and 5 git commands to generate.