nilfs2: always set back pointer to host inode in mapping->host
[deliverable/linux.git] / fs / nilfs2 / page.c
CommitLineData
0bd49f94
RK
1/*
2 * page.c - buffer/page management specific to NILFS
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>,
21 * Seiji Kihara <kihara@osrg.net>.
22 */
23
24#include <linux/pagemap.h>
25#include <linux/writeback.h>
26#include <linux/swap.h>
27#include <linux/bitops.h>
28#include <linux/page-flags.h>
29#include <linux/list.h>
30#include <linux/highmem.h>
31#include <linux/pagevec.h>
5a0e3ad6 32#include <linux/gfp.h>
0bd49f94
RK
33#include "nilfs.h"
34#include "page.h"
35#include "mdt.h"
36
37
38#define NILFS_BUFFER_INHERENT_BITS \
39 ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
1cb2d38c 40 (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
0bd49f94
RK
41
42static struct buffer_head *
43__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
44 int blkbits, unsigned long b_state)
45
46{
47 unsigned long first_block;
48 struct buffer_head *bh;
49
50 if (!page_has_buffers(page))
51 create_empty_buffers(page, 1 << blkbits, b_state);
52
53 first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits);
54 bh = nilfs_page_get_nth_block(page, block - first_block);
55
56 touch_buffer(bh);
57 wait_on_buffer(bh);
58 return bh;
59}
60
61/*
62 * Since the page cache of B-tree node pages or data page cache of pseudo
63 * inodes does not have a valid mapping->host pointer, calling
64 * mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
65 * it calls __mark_inode_dirty(NULL) through __set_page_dirty().
66 * To avoid this problem, the old style mark_buffer_dirty() is used instead.
67 */
68void nilfs_mark_buffer_dirty(struct buffer_head *bh)
69{
70 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
71 __set_page_dirty_nobuffers(bh->b_page);
72}
73
74struct buffer_head *nilfs_grab_buffer(struct inode *inode,
75 struct address_space *mapping,
76 unsigned long blkoff,
77 unsigned long b_state)
78{
79 int blkbits = inode->i_blkbits;
80 pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits);
c1c1d709
RK
81 struct page *page;
82 struct buffer_head *bh;
0bd49f94
RK
83
84 page = grab_cache_page(mapping, index);
85 if (unlikely(!page))
86 return NULL;
87
88 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
89 if (unlikely(!bh)) {
90 unlock_page(page);
91 page_cache_release(page);
92 return NULL;
93 }
0bd49f94
RK
94 return bh;
95}
96
97/**
98 * nilfs_forget_buffer - discard dirty state
99 * @inode: owner inode of the buffer
100 * @bh: buffer head of the buffer to be discarded
101 */
102void nilfs_forget_buffer(struct buffer_head *bh)
103{
104 struct page *page = bh->b_page;
105
106 lock_buffer(bh);
107 clear_buffer_nilfs_volatile(bh);
4e13e66b 108 clear_buffer_nilfs_checked(bh);
b1f6a4f2 109 clear_buffer_nilfs_redirected(bh);
84338237
RK
110 clear_buffer_dirty(bh);
111 if (nilfs_page_buffers_clean(page))
0bd49f94
RK
112 __nilfs_clear_page_dirty(page);
113
114 clear_buffer_uptodate(bh);
115 clear_buffer_mapped(bh);
116 bh->b_blocknr = -1;
117 ClearPageUptodate(page);
118 ClearPageMappedToDisk(page);
119 unlock_buffer(bh);
120 brelse(bh);
121}
122
123/**
124 * nilfs_copy_buffer -- copy buffer data and flags
125 * @dbh: destination buffer
126 * @sbh: source buffer
127 */
128void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
129{
130 void *kaddr0, *kaddr1;
131 unsigned long bits;
132 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
133 struct buffer_head *bh;
134
135 kaddr0 = kmap_atomic(spage, KM_USER0);
136 kaddr1 = kmap_atomic(dpage, KM_USER1);
137 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
138 kunmap_atomic(kaddr1, KM_USER1);
139 kunmap_atomic(kaddr0, KM_USER0);
140
141 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
142 dbh->b_blocknr = sbh->b_blocknr;
143 dbh->b_bdev = sbh->b_bdev;
144
145 bh = dbh;
146 bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped));
147 while ((bh = bh->b_this_page) != dbh) {
148 lock_buffer(bh);
149 bits &= bh->b_state;
150 unlock_buffer(bh);
151 }
152 if (bits & (1UL << BH_Uptodate))
153 SetPageUptodate(dpage);
154 else
155 ClearPageUptodate(dpage);
156 if (bits & (1UL << BH_Mapped))
157 SetPageMappedToDisk(dpage);
158 else
159 ClearPageMappedToDisk(dpage);
160}
161
162/**
163 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
164 * @page: page to be checked
165 *
166 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
167 * Otherwise, it returns non-zero value.
168 */
169int nilfs_page_buffers_clean(struct page *page)
170{
171 struct buffer_head *bh, *head;
172
173 bh = head = page_buffers(page);
174 do {
175 if (buffer_dirty(bh))
176 return 0;
177 bh = bh->b_this_page;
178 } while (bh != head);
179 return 1;
180}
181
182void nilfs_page_bug(struct page *page)
183{
184 struct address_space *m;
aa405b1f 185 unsigned long ino;
0bd49f94
RK
186
187 if (unlikely(!page)) {
188 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
189 return;
190 }
191
192 m = page->mapping;
aa405b1f
RK
193 ino = m ? m->host->i_ino : 0;
194
0bd49f94
RK
195 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
196 "mapping=%p ino=%lu\n",
197 page, atomic_read(&page->_count),
198 (unsigned long long)page->index, page->flags, m, ino);
199
200 if (page_has_buffers(page)) {
201 struct buffer_head *bh, *head;
202 int i = 0;
203
204 bh = head = page_buffers(page);
205 do {
206 printk(KERN_CRIT
207 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
208 i++, bh, atomic_read(&bh->b_count),
209 (unsigned long long)bh->b_blocknr, bh->b_state);
210 bh = bh->b_this_page;
211 } while (bh != head);
212 }
213}
214
0bd49f94
RK
215/**
216 * nilfs_copy_page -- copy the page with buffers
217 * @dst: destination page
218 * @src: source page
219 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
220 *
7a65004b 221 * This function is for both data pages and btnode pages. The dirty flag
0bd49f94
RK
222 * should be treated by caller. The page must not be under i/o.
223 * Both src and dst page must be locked
224 */
225static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
226{
227 struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
228 unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
229
230 BUG_ON(PageWriteback(dst));
231
232 sbh = sbufs = page_buffers(src);
233 if (!page_has_buffers(dst))
234 create_empty_buffers(dst, sbh->b_size, 0);
235
236 if (copy_dirty)
237 mask |= (1UL << BH_Dirty);
238
239 dbh = dbufs = page_buffers(dst);
240 do {
241 lock_buffer(sbh);
242 lock_buffer(dbh);
243 dbh->b_state = sbh->b_state & mask;
244 dbh->b_blocknr = sbh->b_blocknr;
245 dbh->b_bdev = sbh->b_bdev;
246 sbh = sbh->b_this_page;
247 dbh = dbh->b_this_page;
248 } while (dbh != dbufs);
249
250 copy_highpage(dst, src);
251
252 if (PageUptodate(src) && !PageUptodate(dst))
253 SetPageUptodate(dst);
254 else if (!PageUptodate(src) && PageUptodate(dst))
255 ClearPageUptodate(dst);
256 if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
257 SetPageMappedToDisk(dst);
258 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
259 ClearPageMappedToDisk(dst);
260
261 do {
262 unlock_buffer(sbh);
263 unlock_buffer(dbh);
264 sbh = sbh->b_this_page;
265 dbh = dbh->b_this_page;
266 } while (dbh != dbufs);
267}
268
269int nilfs_copy_dirty_pages(struct address_space *dmap,
270 struct address_space *smap)
271{
272 struct pagevec pvec;
273 unsigned int i;
274 pgoff_t index = 0;
275 int err = 0;
276
277 pagevec_init(&pvec, 0);
278repeat:
279 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY,
280 PAGEVEC_SIZE))
281 return 0;
282
283 for (i = 0; i < pagevec_count(&pvec); i++) {
284 struct page *page = pvec.pages[i], *dpage;
285
286 lock_page(page);
287 if (unlikely(!PageDirty(page)))
288 NILFS_PAGE_BUG(page, "inconsistent dirty state");
289
290 dpage = grab_cache_page(dmap, page->index);
291 if (unlikely(!dpage)) {
292 /* No empty page is added to the page cache */
293 err = -ENOMEM;
294 unlock_page(page);
295 break;
296 }
297 if (unlikely(!page_has_buffers(page)))
298 NILFS_PAGE_BUG(page,
299 "found empty page in dat page cache");
300
301 nilfs_copy_page(dpage, page, 1);
302 __set_page_dirty_nobuffers(dpage);
303
304 unlock_page(dpage);
305 page_cache_release(dpage);
306 unlock_page(page);
307 }
308 pagevec_release(&pvec);
309 cond_resched();
310
311 if (likely(!err))
312 goto repeat;
313 return err;
314}
315
316/**
7a65004b 317 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
0bd49f94
RK
318 * @dmap: destination page cache
319 * @smap: source page cache
320 *
321 * No pages must no be added to the cache during this process.
322 * This must be ensured by the caller.
323 */
324void nilfs_copy_back_pages(struct address_space *dmap,
325 struct address_space *smap)
326{
327 struct pagevec pvec;
328 unsigned int i, n;
329 pgoff_t index = 0;
330 int err;
331
332 pagevec_init(&pvec, 0);
333repeat:
334 n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);
335 if (!n)
336 return;
337 index = pvec.pages[n - 1]->index + 1;
338
339 for (i = 0; i < pagevec_count(&pvec); i++) {
340 struct page *page = pvec.pages[i], *dpage;
341 pgoff_t offset = page->index;
342
343 lock_page(page);
344 dpage = find_lock_page(dmap, offset);
345 if (dpage) {
346 /* override existing page on the destination cache */
1f5abe7e 347 WARN_ON(PageDirty(dpage));
0bd49f94
RK
348 nilfs_copy_page(dpage, page, 0);
349 unlock_page(dpage);
350 page_cache_release(dpage);
351 } else {
352 struct page *page2;
353
354 /* move the page to the destination cache */
355 spin_lock_irq(&smap->tree_lock);
356 page2 = radix_tree_delete(&smap->page_tree, offset);
1f5abe7e
RK
357 WARN_ON(page2 != page);
358
0bd49f94
RK
359 smap->nrpages--;
360 spin_unlock_irq(&smap->tree_lock);
361
362 spin_lock_irq(&dmap->tree_lock);
363 err = radix_tree_insert(&dmap->page_tree, offset, page);
364 if (unlikely(err < 0)) {
1f5abe7e 365 WARN_ON(err == -EEXIST);
0bd49f94
RK
366 page->mapping = NULL;
367 page_cache_release(page); /* for cache */
368 } else {
369 page->mapping = dmap;
370 dmap->nrpages++;
371 if (PageDirty(page))
372 radix_tree_tag_set(&dmap->page_tree,
373 offset,
374 PAGECACHE_TAG_DIRTY);
375 }
376 spin_unlock_irq(&dmap->tree_lock);
377 }
378 unlock_page(page);
379 }
380 pagevec_release(&pvec);
381 cond_resched();
382
383 goto repeat;
384}
385
386void nilfs_clear_dirty_pages(struct address_space *mapping)
387{
388 struct pagevec pvec;
389 unsigned int i;
390 pgoff_t index = 0;
391
392 pagevec_init(&pvec, 0);
393
394 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
395 PAGEVEC_SIZE)) {
396 for (i = 0; i < pagevec_count(&pvec); i++) {
397 struct page *page = pvec.pages[i];
398 struct buffer_head *bh, *head;
399
400 lock_page(page);
401 ClearPageUptodate(page);
402 ClearPageMappedToDisk(page);
403 bh = head = page_buffers(page);
404 do {
405 lock_buffer(bh);
406 clear_buffer_dirty(bh);
407 clear_buffer_nilfs_volatile(bh);
4e13e66b 408 clear_buffer_nilfs_checked(bh);
b1f6a4f2 409 clear_buffer_nilfs_redirected(bh);
0bd49f94
RK
410 clear_buffer_uptodate(bh);
411 clear_buffer_mapped(bh);
412 unlock_buffer(bh);
413 bh = bh->b_this_page;
414 } while (bh != head);
415
416 __nilfs_clear_page_dirty(page);
417 unlock_page(page);
418 }
419 pagevec_release(&pvec);
420 cond_resched();
421 }
422}
423
424unsigned nilfs_page_count_clean_buffers(struct page *page,
425 unsigned from, unsigned to)
426{
427 unsigned block_start, block_end;
428 struct buffer_head *bh, *head;
429 unsigned nc = 0;
430
431 for (bh = head = page_buffers(page), block_start = 0;
432 bh != head || !block_start;
433 block_start = block_end, bh = bh->b_this_page) {
434 block_end = block_start + bh->b_size;
435 if (block_end > from && block_start < to && !buffer_dirty(bh))
436 nc++;
437 }
438 return nc;
439}
ae53a0a2 440
aa405b1f 441void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
7eaceacc 442 struct backing_dev_info *bdi)
ebdfed4d 443{
aa405b1f 444 mapping->host = inode;
ebdfed4d
RK
445 mapping->flags = 0;
446 mapping_set_gfp_mask(mapping, GFP_NOFS);
447 mapping->assoc_mapping = NULL;
448 mapping->backing_dev_info = bdi;
d611b22f 449 mapping->a_ops = &empty_aops;
ebdfed4d 450}
0bd49f94
RK
451
452/*
453 * NILFS2 needs clear_page_dirty() in the following two cases:
454 *
455 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
456 * page dirty flags when it copies back pages from the shadow cache
457 * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
458 * (dat->{i_mapping,i_btnode_cache}).
459 *
460 * 2) Some B-tree operations like insertion or deletion may dispose buffers
461 * in dirty state, and this needs to cancel the dirty state of their pages.
462 */
463int __nilfs_clear_page_dirty(struct page *page)
464{
465 struct address_space *mapping = page->mapping;
466
467 if (mapping) {
468 spin_lock_irq(&mapping->tree_lock);
469 if (test_bit(PG_dirty, &page->flags)) {
470 radix_tree_tag_clear(&mapping->page_tree,
471 page_index(page),
472 PAGECACHE_TAG_DIRTY);
473 spin_unlock_irq(&mapping->tree_lock);
474 return clear_page_dirty_for_io(page);
475 }
476 spin_unlock_irq(&mapping->tree_lock);
477 return 0;
478 }
479 return TestClearPageDirty(page);
480}
622daaff
RK
481
482/**
483 * nilfs_find_uncommitted_extent - find extent of uncommitted data
484 * @inode: inode
485 * @start_blk: start block offset (in)
486 * @blkoff: start offset of the found extent (out)
487 *
488 * This function searches an extent of buffers marked "delayed" which
489 * starts from a block offset equal to or larger than @start_blk. If
490 * such an extent was found, this will store the start offset in
491 * @blkoff and return its length in blocks. Otherwise, zero is
492 * returned.
493 */
494unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
495 sector_t start_blk,
496 sector_t *blkoff)
497{
498 unsigned int i;
499 pgoff_t index;
500 unsigned int nblocks_in_page;
501 unsigned long length = 0;
502 sector_t b;
503 struct pagevec pvec;
504 struct page *page;
505
506 if (inode->i_mapping->nrpages == 0)
507 return 0;
508
509 index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
510 nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits);
511
512 pagevec_init(&pvec, 0);
513
514repeat:
515 pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
516 pvec.pages);
517 if (pvec.nr == 0)
518 return length;
519
520 if (length > 0 && pvec.pages[0]->index > index)
521 goto out;
522
523 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
524 i = 0;
525 do {
526 page = pvec.pages[i];
527
528 lock_page(page);
529 if (page_has_buffers(page)) {
530 struct buffer_head *bh, *head;
531
532 bh = head = page_buffers(page);
533 do {
534 if (b < start_blk)
535 continue;
536 if (buffer_delay(bh)) {
537 if (length == 0)
538 *blkoff = b;
539 length++;
540 } else if (length > 0) {
541 goto out_locked;
542 }
543 } while (++b, bh = bh->b_this_page, bh != head);
544 } else {
545 if (length > 0)
546 goto out_locked;
547
548 b += nblocks_in_page;
549 }
550 unlock_page(page);
551
552 } while (++i < pagevec_count(&pvec));
553
554 index = page->index + 1;
555 pagevec_release(&pvec);
556 cond_resched();
557 goto repeat;
558
559out_locked:
560 unlock_page(page);
561out:
562 pagevec_release(&pvec);
563 return length;
564}
This page took 0.1694 seconds and 5 git commands to generate.