Merge tag 'driver-core-4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / fs / jfs / jfs_metapage.c
1 /*
2 * Copyright (C) International Business Machines Corp., 2000-2005
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/bio.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/buffer_head.h>
27 #include <linux/mempool.h>
28 #include <linux/seq_file.h>
29 #include "jfs_incore.h"
30 #include "jfs_superblock.h"
31 #include "jfs_filsys.h"
32 #include "jfs_metapage.h"
33 #include "jfs_txnmgr.h"
34 #include "jfs_debug.h"
35
36 #ifdef CONFIG_JFS_STATISTICS
37 static struct {
38 uint pagealloc; /* # of page allocations */
39 uint pagefree; /* # of page frees */
40 uint lockwait; /* # of sleeping lock_metapage() calls */
41 } mpStat;
42 #endif
43
44 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
45 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
46
47 static inline void unlock_metapage(struct metapage *mp)
48 {
49 clear_bit_unlock(META_locked, &mp->flag);
50 wake_up(&mp->wait);
51 }
52
53 static inline void __lock_metapage(struct metapage *mp)
54 {
55 DECLARE_WAITQUEUE(wait, current);
56 INCREMENT(mpStat.lockwait);
57 add_wait_queue_exclusive(&mp->wait, &wait);
58 do {
59 set_current_state(TASK_UNINTERRUPTIBLE);
60 if (metapage_locked(mp)) {
61 unlock_page(mp->page);
62 io_schedule();
63 lock_page(mp->page);
64 }
65 } while (trylock_metapage(mp));
66 __set_current_state(TASK_RUNNING);
67 remove_wait_queue(&mp->wait, &wait);
68 }
69
70 /*
71 * Must have mp->page locked
72 */
73 static inline void lock_metapage(struct metapage *mp)
74 {
75 if (trylock_metapage(mp))
76 __lock_metapage(mp);
77 }
78
79 #define METAPOOL_MIN_PAGES 32
80 static struct kmem_cache *metapage_cache;
81 static mempool_t *metapage_mempool;
82
83 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
84
85 #if MPS_PER_PAGE > 1
86
87 struct meta_anchor {
88 int mp_count;
89 atomic_t io_count;
90 struct metapage *mp[MPS_PER_PAGE];
91 };
92 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
93
94 static inline struct metapage *page_to_mp(struct page *page, int offset)
95 {
96 if (!PagePrivate(page))
97 return NULL;
98 return mp_anchor(page)->mp[offset >> L2PSIZE];
99 }
100
101 static inline int insert_metapage(struct page *page, struct metapage *mp)
102 {
103 struct meta_anchor *a;
104 int index;
105 int l2mp_blocks; /* log2 blocks per metapage */
106
107 if (PagePrivate(page))
108 a = mp_anchor(page);
109 else {
110 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
111 if (!a)
112 return -ENOMEM;
113 set_page_private(page, (unsigned long)a);
114 SetPagePrivate(page);
115 kmap(page);
116 }
117
118 if (mp) {
119 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
120 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
121 a->mp_count++;
122 a->mp[index] = mp;
123 }
124
125 return 0;
126 }
127
128 static inline void remove_metapage(struct page *page, struct metapage *mp)
129 {
130 struct meta_anchor *a = mp_anchor(page);
131 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
132 int index;
133
134 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
135
136 BUG_ON(a->mp[index] != mp);
137
138 a->mp[index] = NULL;
139 if (--a->mp_count == 0) {
140 kfree(a);
141 set_page_private(page, 0);
142 ClearPagePrivate(page);
143 kunmap(page);
144 }
145 }
146
147 static inline void inc_io(struct page *page)
148 {
149 atomic_inc(&mp_anchor(page)->io_count);
150 }
151
152 static inline void dec_io(struct page *page, void (*handler) (struct page *))
153 {
154 if (atomic_dec_and_test(&mp_anchor(page)->io_count))
155 handler(page);
156 }
157
158 #else
159 static inline struct metapage *page_to_mp(struct page *page, int offset)
160 {
161 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
162 }
163
164 static inline int insert_metapage(struct page *page, struct metapage *mp)
165 {
166 if (mp) {
167 set_page_private(page, (unsigned long)mp);
168 SetPagePrivate(page);
169 kmap(page);
170 }
171 return 0;
172 }
173
174 static inline void remove_metapage(struct page *page, struct metapage *mp)
175 {
176 set_page_private(page, 0);
177 ClearPagePrivate(page);
178 kunmap(page);
179 }
180
181 #define inc_io(page) do {} while(0)
182 #define dec_io(page, handler) handler(page)
183
184 #endif
185
186 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
187 {
188 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
189
190 if (mp) {
191 mp->lid = 0;
192 mp->lsn = 0;
193 mp->data = NULL;
194 mp->clsn = 0;
195 mp->log = NULL;
196 init_waitqueue_head(&mp->wait);
197 }
198 return mp;
199 }
200
201 static inline void free_metapage(struct metapage *mp)
202 {
203 mempool_free(mp, metapage_mempool);
204 }
205
206 int __init metapage_init(void)
207 {
208 /*
209 * Allocate the metapage structures
210 */
211 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
212 0, 0, NULL);
213 if (metapage_cache == NULL)
214 return -ENOMEM;
215
216 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
217 metapage_cache);
218
219 if (metapage_mempool == NULL) {
220 kmem_cache_destroy(metapage_cache);
221 return -ENOMEM;
222 }
223
224 return 0;
225 }
226
227 void metapage_exit(void)
228 {
229 mempool_destroy(metapage_mempool);
230 kmem_cache_destroy(metapage_cache);
231 }
232
233 static inline void drop_metapage(struct page *page, struct metapage *mp)
234 {
235 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
236 test_bit(META_io, &mp->flag))
237 return;
238 remove_metapage(page, mp);
239 INCREMENT(mpStat.pagefree);
240 free_metapage(mp);
241 }
242
243 /*
244 * Metapage address space operations
245 */
246
247 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
248 int *len)
249 {
250 int rc = 0;
251 int xflag;
252 s64 xaddr;
253 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
254 inode->i_blkbits;
255
256 if (lblock >= file_blocks)
257 return 0;
258 if (lblock + *len > file_blocks)
259 *len = file_blocks - lblock;
260
261 if (inode->i_ino) {
262 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
263 if ((rc == 0) && *len)
264 lblock = (sector_t)xaddr;
265 else
266 lblock = 0;
267 } /* else no mapping */
268
269 return lblock;
270 }
271
272 static void last_read_complete(struct page *page)
273 {
274 if (!PageError(page))
275 SetPageUptodate(page);
276 unlock_page(page);
277 }
278
279 static void metapage_read_end_io(struct bio *bio)
280 {
281 struct page *page = bio->bi_private;
282
283 if (bio->bi_error) {
284 printk(KERN_ERR "metapage_read_end_io: I/O error\n");
285 SetPageError(page);
286 }
287
288 dec_io(page, last_read_complete);
289 bio_put(bio);
290 }
291
292 static void remove_from_logsync(struct metapage *mp)
293 {
294 struct jfs_log *log = mp->log;
295 unsigned long flags;
296 /*
297 * This can race. Recheck that log hasn't been set to null, and after
298 * acquiring logsync lock, recheck lsn
299 */
300 if (!log)
301 return;
302
303 LOGSYNC_LOCK(log, flags);
304 if (mp->lsn) {
305 mp->log = NULL;
306 mp->lsn = 0;
307 mp->clsn = 0;
308 log->count--;
309 list_del(&mp->synclist);
310 }
311 LOGSYNC_UNLOCK(log, flags);
312 }
313
314 static void last_write_complete(struct page *page)
315 {
316 struct metapage *mp;
317 unsigned int offset;
318
319 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
320 mp = page_to_mp(page, offset);
321 if (mp && test_bit(META_io, &mp->flag)) {
322 if (mp->lsn)
323 remove_from_logsync(mp);
324 clear_bit(META_io, &mp->flag);
325 }
326 /*
327 * I'd like to call drop_metapage here, but I don't think it's
328 * safe unless I have the page locked
329 */
330 }
331 end_page_writeback(page);
332 }
333
334 static void metapage_write_end_io(struct bio *bio)
335 {
336 struct page *page = bio->bi_private;
337
338 BUG_ON(!PagePrivate(page));
339
340 if (bio->bi_error) {
341 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
342 SetPageError(page);
343 }
344 dec_io(page, last_write_complete);
345 bio_put(bio);
346 }
347
348 static int metapage_writepage(struct page *page, struct writeback_control *wbc)
349 {
350 struct bio *bio = NULL;
351 int block_offset; /* block offset of mp within page */
352 struct inode *inode = page->mapping->host;
353 int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
354 int len;
355 int xlen;
356 struct metapage *mp;
357 int redirty = 0;
358 sector_t lblock;
359 int nr_underway = 0;
360 sector_t pblock;
361 sector_t next_block = 0;
362 sector_t page_start;
363 unsigned long bio_bytes = 0;
364 unsigned long bio_offset = 0;
365 int offset;
366 int bad_blocks = 0;
367
368 page_start = (sector_t)page->index <<
369 (PAGE_SHIFT - inode->i_blkbits);
370 BUG_ON(!PageLocked(page));
371 BUG_ON(PageWriteback(page));
372 set_page_writeback(page);
373
374 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
375 mp = page_to_mp(page, offset);
376
377 if (!mp || !test_bit(META_dirty, &mp->flag))
378 continue;
379
380 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
381 redirty = 1;
382 /*
383 * Make sure this page isn't blocked indefinitely.
384 * If the journal isn't undergoing I/O, push it
385 */
386 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
387 jfs_flush_journal(mp->log, 0);
388 continue;
389 }
390
391 clear_bit(META_dirty, &mp->flag);
392 set_bit(META_io, &mp->flag);
393 block_offset = offset >> inode->i_blkbits;
394 lblock = page_start + block_offset;
395 if (bio) {
396 if (xlen && lblock == next_block) {
397 /* Contiguous, in memory & on disk */
398 len = min(xlen, blocks_per_mp);
399 xlen -= len;
400 bio_bytes += len << inode->i_blkbits;
401 continue;
402 }
403 /* Not contiguous */
404 if (bio_add_page(bio, page, bio_bytes, bio_offset) <
405 bio_bytes)
406 goto add_failed;
407 /*
408 * Increment counter before submitting i/o to keep
409 * count from hitting zero before we're through
410 */
411 inc_io(page);
412 if (!bio->bi_iter.bi_size)
413 goto dump_bio;
414 submit_bio(WRITE, bio);
415 nr_underway++;
416 bio = NULL;
417 } else
418 inc_io(page);
419 xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
420 pblock = metapage_get_blocks(inode, lblock, &xlen);
421 if (!pblock) {
422 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
423 /*
424 * We already called inc_io(), but can't cancel it
425 * with dec_io() until we're done with the page
426 */
427 bad_blocks++;
428 continue;
429 }
430 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
431
432 bio = bio_alloc(GFP_NOFS, 1);
433 bio->bi_bdev = inode->i_sb->s_bdev;
434 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
435 bio->bi_end_io = metapage_write_end_io;
436 bio->bi_private = page;
437
438 /* Don't call bio_add_page yet, we may add to this vec */
439 bio_offset = offset;
440 bio_bytes = len << inode->i_blkbits;
441
442 xlen -= len;
443 next_block = lblock + len;
444 }
445 if (bio) {
446 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
447 goto add_failed;
448 if (!bio->bi_iter.bi_size)
449 goto dump_bio;
450
451 submit_bio(WRITE, bio);
452 nr_underway++;
453 }
454 if (redirty)
455 redirty_page_for_writepage(wbc, page);
456
457 unlock_page(page);
458
459 if (bad_blocks)
460 goto err_out;
461
462 if (nr_underway == 0)
463 end_page_writeback(page);
464
465 return 0;
466 add_failed:
467 /* We should never reach here, since we're only adding one vec */
468 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
469 goto skip;
470 dump_bio:
471 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
472 4, bio, sizeof(*bio), 0);
473 skip:
474 bio_put(bio);
475 unlock_page(page);
476 dec_io(page, last_write_complete);
477 err_out:
478 while (bad_blocks--)
479 dec_io(page, last_write_complete);
480 return -EIO;
481 }
482
483 static int metapage_readpage(struct file *fp, struct page *page)
484 {
485 struct inode *inode = page->mapping->host;
486 struct bio *bio = NULL;
487 int block_offset;
488 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
489 sector_t page_start; /* address of page in fs blocks */
490 sector_t pblock;
491 int xlen;
492 unsigned int len;
493 int offset;
494
495 BUG_ON(!PageLocked(page));
496 page_start = (sector_t)page->index <<
497 (PAGE_SHIFT - inode->i_blkbits);
498
499 block_offset = 0;
500 while (block_offset < blocks_per_page) {
501 xlen = blocks_per_page - block_offset;
502 pblock = metapage_get_blocks(inode, page_start + block_offset,
503 &xlen);
504 if (pblock) {
505 if (!PagePrivate(page))
506 insert_metapage(page, NULL);
507 inc_io(page);
508 if (bio)
509 submit_bio(READ, bio);
510
511 bio = bio_alloc(GFP_NOFS, 1);
512 bio->bi_bdev = inode->i_sb->s_bdev;
513 bio->bi_iter.bi_sector =
514 pblock << (inode->i_blkbits - 9);
515 bio->bi_end_io = metapage_read_end_io;
516 bio->bi_private = page;
517 len = xlen << inode->i_blkbits;
518 offset = block_offset << inode->i_blkbits;
519 if (bio_add_page(bio, page, len, offset) < len)
520 goto add_failed;
521 block_offset += xlen;
522 } else
523 block_offset++;
524 }
525 if (bio)
526 submit_bio(READ, bio);
527 else
528 unlock_page(page);
529
530 return 0;
531
532 add_failed:
533 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
534 bio_put(bio);
535 dec_io(page, last_read_complete);
536 return -EIO;
537 }
538
539 static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
540 {
541 struct metapage *mp;
542 int ret = 1;
543 int offset;
544
545 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
546 mp = page_to_mp(page, offset);
547
548 if (!mp)
549 continue;
550
551 jfs_info("metapage_releasepage: mp = 0x%p", mp);
552 if (mp->count || mp->nohomeok ||
553 test_bit(META_dirty, &mp->flag)) {
554 jfs_info("count = %ld, nohomeok = %d", mp->count,
555 mp->nohomeok);
556 ret = 0;
557 continue;
558 }
559 if (mp->lsn)
560 remove_from_logsync(mp);
561 remove_metapage(page, mp);
562 INCREMENT(mpStat.pagefree);
563 free_metapage(mp);
564 }
565 return ret;
566 }
567
568 static void metapage_invalidatepage(struct page *page, unsigned int offset,
569 unsigned int length)
570 {
571 BUG_ON(offset || length < PAGE_SIZE);
572
573 BUG_ON(PageWriteback(page));
574
575 metapage_releasepage(page, 0);
576 }
577
578 const struct address_space_operations jfs_metapage_aops = {
579 .readpage = metapage_readpage,
580 .writepage = metapage_writepage,
581 .releasepage = metapage_releasepage,
582 .invalidatepage = metapage_invalidatepage,
583 .set_page_dirty = __set_page_dirty_nobuffers,
584 };
585
586 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
587 unsigned int size, int absolute,
588 unsigned long new)
589 {
590 int l2BlocksPerPage;
591 int l2bsize;
592 struct address_space *mapping;
593 struct metapage *mp = NULL;
594 struct page *page;
595 unsigned long page_index;
596 unsigned long page_offset;
597
598 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
599 inode->i_ino, lblock, absolute);
600
601 l2bsize = inode->i_blkbits;
602 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
603 page_index = lblock >> l2BlocksPerPage;
604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
605 if ((page_offset + size) > PAGE_SIZE) {
606 jfs_err("MetaData crosses page boundary!!");
607 jfs_err("lblock = %lx, size = %d", lblock, size);
608 dump_stack();
609 return NULL;
610 }
611 if (absolute)
612 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
613 else {
614 /*
615 * If an nfs client tries to read an inode that is larger
616 * than any existing inodes, we may try to read past the
617 * end of the inode map
618 */
619 if ((lblock << inode->i_blkbits) >= inode->i_size)
620 return NULL;
621 mapping = inode->i_mapping;
622 }
623
624 if (new && (PSIZE == PAGE_SIZE)) {
625 page = grab_cache_page(mapping, page_index);
626 if (!page) {
627 jfs_err("grab_cache_page failed!");
628 return NULL;
629 }
630 SetPageUptodate(page);
631 } else {
632 page = read_mapping_page(mapping, page_index, NULL);
633 if (IS_ERR(page) || !PageUptodate(page)) {
634 jfs_err("read_mapping_page failed!");
635 return NULL;
636 }
637 lock_page(page);
638 }
639
640 mp = page_to_mp(page, page_offset);
641 if (mp) {
642 if (mp->logical_size != size) {
643 jfs_error(inode->i_sb,
644 "get_mp->logical_size != size\n");
645 jfs_err("logical_size = %d, size = %d",
646 mp->logical_size, size);
647 dump_stack();
648 goto unlock;
649 }
650 mp->count++;
651 lock_metapage(mp);
652 if (test_bit(META_discard, &mp->flag)) {
653 if (!new) {
654 jfs_error(inode->i_sb,
655 "using a discarded metapage\n");
656 discard_metapage(mp);
657 goto unlock;
658 }
659 clear_bit(META_discard, &mp->flag);
660 }
661 } else {
662 INCREMENT(mpStat.pagealloc);
663 mp = alloc_metapage(GFP_NOFS);
664 mp->page = page;
665 mp->flag = 0;
666 mp->xflag = COMMIT_PAGE;
667 mp->count = 1;
668 mp->nohomeok = 0;
669 mp->logical_size = size;
670 mp->data = page_address(page) + page_offset;
671 mp->index = lblock;
672 if (unlikely(insert_metapage(page, mp))) {
673 free_metapage(mp);
674 goto unlock;
675 }
676 lock_metapage(mp);
677 }
678
679 if (new) {
680 jfs_info("zeroing mp = 0x%p", mp);
681 memset(mp->data, 0, PSIZE);
682 }
683
684 unlock_page(page);
685 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
686 return mp;
687
688 unlock:
689 unlock_page(page);
690 return NULL;
691 }
692
693 void grab_metapage(struct metapage * mp)
694 {
695 jfs_info("grab_metapage: mp = 0x%p", mp);
696 get_page(mp->page);
697 lock_page(mp->page);
698 mp->count++;
699 lock_metapage(mp);
700 unlock_page(mp->page);
701 }
702
703 void force_metapage(struct metapage *mp)
704 {
705 struct page *page = mp->page;
706 jfs_info("force_metapage: mp = 0x%p", mp);
707 set_bit(META_forcewrite, &mp->flag);
708 clear_bit(META_sync, &mp->flag);
709 get_page(page);
710 lock_page(page);
711 set_page_dirty(page);
712 write_one_page(page, 1);
713 clear_bit(META_forcewrite, &mp->flag);
714 put_page(page);
715 }
716
717 void hold_metapage(struct metapage *mp)
718 {
719 lock_page(mp->page);
720 }
721
722 void put_metapage(struct metapage *mp)
723 {
724 if (mp->count || mp->nohomeok) {
725 /* Someone else will release this */
726 unlock_page(mp->page);
727 return;
728 }
729 get_page(mp->page);
730 mp->count++;
731 lock_metapage(mp);
732 unlock_page(mp->page);
733 release_metapage(mp);
734 }
735
736 void release_metapage(struct metapage * mp)
737 {
738 struct page *page = mp->page;
739 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
740
741 BUG_ON(!page);
742
743 lock_page(page);
744 unlock_metapage(mp);
745
746 assert(mp->count);
747 if (--mp->count || mp->nohomeok) {
748 unlock_page(page);
749 put_page(page);
750 return;
751 }
752
753 if (test_bit(META_dirty, &mp->flag)) {
754 set_page_dirty(page);
755 if (test_bit(META_sync, &mp->flag)) {
756 clear_bit(META_sync, &mp->flag);
757 write_one_page(page, 1);
758 lock_page(page); /* write_one_page unlocks the page */
759 }
760 } else if (mp->lsn) /* discard_metapage doesn't remove it */
761 remove_from_logsync(mp);
762
763 /* Try to keep metapages from using up too much memory */
764 drop_metapage(page, mp);
765
766 unlock_page(page);
767 put_page(page);
768 }
769
770 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
771 {
772 sector_t lblock;
773 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
774 int BlocksPerPage = 1 << l2BlocksPerPage;
775 /* All callers are interested in block device's mapping */
776 struct address_space *mapping =
777 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
778 struct metapage *mp;
779 struct page *page;
780 unsigned int offset;
781
782 /*
783 * Mark metapages to discard. They will eventually be
784 * released, but should not be written.
785 */
786 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
787 lblock += BlocksPerPage) {
788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
789 if (!page)
790 continue;
791 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
792 mp = page_to_mp(page, offset);
793 if (!mp)
794 continue;
795 if (mp->index < addr)
796 continue;
797 if (mp->index >= addr + len)
798 break;
799
800 clear_bit(META_dirty, &mp->flag);
801 set_bit(META_discard, &mp->flag);
802 if (mp->lsn)
803 remove_from_logsync(mp);
804 }
805 unlock_page(page);
806 put_page(page);
807 }
808 }
809
810 #ifdef CONFIG_JFS_STATISTICS
811 static int jfs_mpstat_proc_show(struct seq_file *m, void *v)
812 {
813 seq_printf(m,
814 "JFS Metapage statistics\n"
815 "=======================\n"
816 "page allocations = %d\n"
817 "page frees = %d\n"
818 "lock waits = %d\n",
819 mpStat.pagealloc,
820 mpStat.pagefree,
821 mpStat.lockwait);
822 return 0;
823 }
824
825 static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
826 {
827 return single_open(file, jfs_mpstat_proc_show, NULL);
828 }
829
830 const struct file_operations jfs_mpstat_proc_fops = {
831 .owner = THIS_MODULE,
832 .open = jfs_mpstat_proc_open,
833 .read = seq_read,
834 .llseek = seq_lseek,
835 .release = single_release,
836 };
837 #endif
This page took 0.049006 seconds and 5 git commands to generate.