Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy...
[deliverable/linux.git] / mm / filemap.c
1 /*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7 /*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/compiler.h>
15 #include <linux/fs.h>
16 #include <linux/uaccess.h>
17 #include <linux/aio.h>
18 #include <linux/capability.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/backing-dev.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/cpuset.h>
35 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
36 #include "internal.h"
37
38 /*
39 * FIXME: remove all knowledge of the buffer layer from the core VM
40 */
41 #include <linux/buffer_head.h> /* for generic_osync_inode */
42
43 #include <asm/mman.h>
44
45 static ssize_t
46 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
47 loff_t offset, unsigned long nr_segs);
48
49 /*
50 * Shared mappings implemented 30.11.1994. It's not fully working yet,
51 * though.
52 *
53 * Shared mappings now work. 15.8.1995 Bruno.
54 *
55 * finished 'unifying' the page and buffer cache and SMP-threaded the
56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
57 *
58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
59 */
60
61 /*
62 * Lock ordering:
63 *
64 * ->i_mmap_lock (vmtruncate)
65 * ->private_lock (__free_pte->__set_page_dirty_buffers)
66 * ->swap_lock (exclusive_swap_page, others)
67 * ->mapping->tree_lock
68 * ->zone.lock
69 *
70 * ->i_mutex
71 * ->i_mmap_lock (truncate->unmap_mapping_range)
72 *
73 * ->mmap_sem
74 * ->i_mmap_lock
75 * ->page_table_lock or pte_lock (various, mainly in memory.c)
76 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
77 *
78 * ->mmap_sem
79 * ->lock_page (access_process_vm)
80 *
81 * ->i_mutex (generic_file_buffered_write)
82 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
83 *
84 * ->i_mutex
85 * ->i_alloc_sem (various)
86 *
87 * ->inode_lock
88 * ->sb_lock (fs/fs-writeback.c)
89 * ->mapping->tree_lock (__sync_single_inode)
90 *
91 * ->i_mmap_lock
92 * ->anon_vma.lock (vma_adjust)
93 *
94 * ->anon_vma.lock
95 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
96 *
97 * ->page_table_lock or pte_lock
98 * ->swap_lock (try_to_unmap_one)
99 * ->private_lock (try_to_unmap_one)
100 * ->tree_lock (try_to_unmap_one)
101 * ->zone.lru_lock (follow_page->mark_page_accessed)
102 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
103 * ->private_lock (page_remove_rmap->set_page_dirty)
104 * ->tree_lock (page_remove_rmap->set_page_dirty)
105 * ->inode_lock (page_remove_rmap->set_page_dirty)
106 * ->inode_lock (zap_pte_range->set_page_dirty)
107 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
108 *
109 * ->task->proc_lock
110 * ->dcache_lock (proc_pid_lookup)
111 */
112
113 /*
114 * Remove a page from the page cache and free it. Caller has to make
115 * sure the page is locked and that nobody else uses it - or that usage
116 * is safe. The caller must hold a write_lock on the mapping's tree_lock.
117 */
118 void __remove_from_page_cache(struct page *page)
119 {
120 struct address_space *mapping = page->mapping;
121
122 radix_tree_delete(&mapping->page_tree, page->index);
123 page->mapping = NULL;
124 mapping->nrpages--;
125 __dec_zone_page_state(page, NR_FILE_PAGES);
126 BUG_ON(page_mapped(page));
127
128 /*
129 * Some filesystems seem to re-dirty the page even after
130 * the VM has canceled the dirty bit (eg ext3 journaling).
131 *
132 * Fix it up by doing a final dirty accounting check after
133 * having removed the page entirely.
134 */
135 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
136 dec_zone_page_state(page, NR_FILE_DIRTY);
137 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
138 }
139 }
140
141 void remove_from_page_cache(struct page *page)
142 {
143 struct address_space *mapping = page->mapping;
144
145 BUG_ON(!PageLocked(page));
146
147 write_lock_irq(&mapping->tree_lock);
148 __remove_from_page_cache(page);
149 write_unlock_irq(&mapping->tree_lock);
150 }
151
152 static int sync_page(void *word)
153 {
154 struct address_space *mapping;
155 struct page *page;
156
157 page = container_of((unsigned long *)word, struct page, flags);
158
159 /*
160 * page_mapping() is being called without PG_locked held.
161 * Some knowledge of the state and use of the page is used to
162 * reduce the requirements down to a memory barrier.
163 * The danger here is of a stale page_mapping() return value
164 * indicating a struct address_space different from the one it's
165 * associated with when it is associated with one.
166 * After smp_mb(), it's either the correct page_mapping() for
167 * the page, or an old page_mapping() and the page's own
168 * page_mapping() has gone NULL.
169 * The ->sync_page() address_space operation must tolerate
170 * page_mapping() going NULL. By an amazing coincidence,
171 * this comes about because none of the users of the page
172 * in the ->sync_page() methods make essential use of the
173 * page_mapping(), merely passing the page down to the backing
174 * device's unplug functions when it's non-NULL, which in turn
175 * ignore it for all cases but swap, where only page_private(page) is
176 * of interest. When page_mapping() does go NULL, the entire
177 * call stack gracefully ignores the page and returns.
178 * -- wli
179 */
180 smp_mb();
181 mapping = page_mapping(page);
182 if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
183 mapping->a_ops->sync_page(page);
184 io_schedule();
185 return 0;
186 }
187
188 static int sync_page_killable(void *word)
189 {
190 sync_page(word);
191 return fatal_signal_pending(current) ? -EINTR : 0;
192 }
193
194 /**
195 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
196 * @mapping: address space structure to write
197 * @start: offset in bytes where the range starts
198 * @end: offset in bytes where the range ends (inclusive)
199 * @sync_mode: enable synchronous operation
200 *
201 * Start writeback against all of a mapping's dirty pages that lie
202 * within the byte offsets <start, end> inclusive.
203 *
204 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
205 * opposed to a regular memory cleansing writeback. The difference between
206 * these two operations is that if a dirty page/buffer is encountered, it must
207 * be waited upon, and not just skipped over.
208 */
209 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
210 loff_t end, int sync_mode)
211 {
212 int ret;
213 struct writeback_control wbc = {
214 .sync_mode = sync_mode,
215 .nr_to_write = mapping->nrpages * 2,
216 .range_start = start,
217 .range_end = end,
218 };
219
220 if (!mapping_cap_writeback_dirty(mapping))
221 return 0;
222
223 ret = do_writepages(mapping, &wbc);
224 return ret;
225 }
226
227 static inline int __filemap_fdatawrite(struct address_space *mapping,
228 int sync_mode)
229 {
230 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
231 }
232
233 int filemap_fdatawrite(struct address_space *mapping)
234 {
235 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
236 }
237 EXPORT_SYMBOL(filemap_fdatawrite);
238
239 static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
240 loff_t end)
241 {
242 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
243 }
244
245 /**
246 * filemap_flush - mostly a non-blocking flush
247 * @mapping: target address_space
248 *
249 * This is a mostly non-blocking flush. Not suitable for data-integrity
250 * purposes - I/O may not be started against all dirty pages.
251 */
252 int filemap_flush(struct address_space *mapping)
253 {
254 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
255 }
256 EXPORT_SYMBOL(filemap_flush);
257
258 /**
259 * wait_on_page_writeback_range - wait for writeback to complete
260 * @mapping: target address_space
261 * @start: beginning page index
262 * @end: ending page index
263 *
264 * Wait for writeback to complete against pages indexed by start->end
265 * inclusive
266 */
267 int wait_on_page_writeback_range(struct address_space *mapping,
268 pgoff_t start, pgoff_t end)
269 {
270 struct pagevec pvec;
271 int nr_pages;
272 int ret = 0;
273 pgoff_t index;
274
275 if (end < start)
276 return 0;
277
278 pagevec_init(&pvec, 0);
279 index = start;
280 while ((index <= end) &&
281 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
282 PAGECACHE_TAG_WRITEBACK,
283 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
284 unsigned i;
285
286 for (i = 0; i < nr_pages; i++) {
287 struct page *page = pvec.pages[i];
288
289 /* until radix tree lookup accepts end_index */
290 if (page->index > end)
291 continue;
292
293 wait_on_page_writeback(page);
294 if (PageError(page))
295 ret = -EIO;
296 }
297 pagevec_release(&pvec);
298 cond_resched();
299 }
300
301 /* Check for outstanding write errors */
302 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
303 ret = -ENOSPC;
304 if (test_and_clear_bit(AS_EIO, &mapping->flags))
305 ret = -EIO;
306
307 return ret;
308 }
309
310 /**
311 * sync_page_range - write and wait on all pages in the passed range
312 * @inode: target inode
313 * @mapping: target address_space
314 * @pos: beginning offset in pages to write
315 * @count: number of bytes to write
316 *
317 * Write and wait upon all the pages in the passed range. This is a "data
318 * integrity" operation. It waits upon in-flight writeout before starting and
319 * waiting upon new writeout. If there was an IO error, return it.
320 *
321 * We need to re-take i_mutex during the generic_osync_inode list walk because
322 * it is otherwise livelockable.
323 */
324 int sync_page_range(struct inode *inode, struct address_space *mapping,
325 loff_t pos, loff_t count)
326 {
327 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
328 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
329 int ret;
330
331 if (!mapping_cap_writeback_dirty(mapping) || !count)
332 return 0;
333 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
334 if (ret == 0) {
335 mutex_lock(&inode->i_mutex);
336 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
337 mutex_unlock(&inode->i_mutex);
338 }
339 if (ret == 0)
340 ret = wait_on_page_writeback_range(mapping, start, end);
341 return ret;
342 }
343 EXPORT_SYMBOL(sync_page_range);
344
345 /**
346 * sync_page_range_nolock
347 * @inode: target inode
348 * @mapping: target address_space
349 * @pos: beginning offset in pages to write
350 * @count: number of bytes to write
351 *
352 * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
353 * as it forces O_SYNC writers to different parts of the same file
354 * to be serialised right until io completion.
355 */
356 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
357 loff_t pos, loff_t count)
358 {
359 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
360 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
361 int ret;
362
363 if (!mapping_cap_writeback_dirty(mapping) || !count)
364 return 0;
365 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
366 if (ret == 0)
367 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
368 if (ret == 0)
369 ret = wait_on_page_writeback_range(mapping, start, end);
370 return ret;
371 }
372 EXPORT_SYMBOL(sync_page_range_nolock);
373
374 /**
375 * filemap_fdatawait - wait for all under-writeback pages to complete
376 * @mapping: address space structure to wait for
377 *
378 * Walk the list of under-writeback pages of the given address space
379 * and wait for all of them.
380 */
381 int filemap_fdatawait(struct address_space *mapping)
382 {
383 loff_t i_size = i_size_read(mapping->host);
384
385 if (i_size == 0)
386 return 0;
387
388 return wait_on_page_writeback_range(mapping, 0,
389 (i_size - 1) >> PAGE_CACHE_SHIFT);
390 }
391 EXPORT_SYMBOL(filemap_fdatawait);
392
393 int filemap_write_and_wait(struct address_space *mapping)
394 {
395 int err = 0;
396
397 if (mapping->nrpages) {
398 err = filemap_fdatawrite(mapping);
399 /*
400 * Even if the above returned error, the pages may be
401 * written partially (e.g. -ENOSPC), so we wait for it.
402 * But the -EIO is special case, it may indicate the worst
403 * thing (e.g. bug) happened, so we avoid waiting for it.
404 */
405 if (err != -EIO) {
406 int err2 = filemap_fdatawait(mapping);
407 if (!err)
408 err = err2;
409 }
410 }
411 return err;
412 }
413 EXPORT_SYMBOL(filemap_write_and_wait);
414
415 /**
416 * filemap_write_and_wait_range - write out & wait on a file range
417 * @mapping: the address_space for the pages
418 * @lstart: offset in bytes where the range starts
419 * @lend: offset in bytes where the range ends (inclusive)
420 *
421 * Write out and wait upon file offsets lstart->lend, inclusive.
422 *
423 * Note that `lend' is inclusive (describes the last byte to be written) so
424 * that this function can be used to write to the very end-of-file (end = -1).
425 */
426 int filemap_write_and_wait_range(struct address_space *mapping,
427 loff_t lstart, loff_t lend)
428 {
429 int err = 0;
430
431 if (mapping->nrpages) {
432 err = __filemap_fdatawrite_range(mapping, lstart, lend,
433 WB_SYNC_ALL);
434 /* See comment of filemap_write_and_wait() */
435 if (err != -EIO) {
436 int err2 = wait_on_page_writeback_range(mapping,
437 lstart >> PAGE_CACHE_SHIFT,
438 lend >> PAGE_CACHE_SHIFT);
439 if (!err)
440 err = err2;
441 }
442 }
443 return err;
444 }
445
446 /**
447 * add_to_page_cache - add newly allocated pagecache pages
448 * @page: page to add
449 * @mapping: the page's address_space
450 * @offset: page index
451 * @gfp_mask: page allocation mode
452 *
453 * This function is used to add newly allocated pagecache pages;
454 * the page is new, so we can just run SetPageLocked() against it.
455 * The other page state flags were set by rmqueue().
456 *
457 * This function does not add the page to the LRU. The caller must do that.
458 */
459 int add_to_page_cache(struct page *page, struct address_space *mapping,
460 pgoff_t offset, gfp_t gfp_mask)
461 {
462 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
463
464 if (error == 0) {
465 write_lock_irq(&mapping->tree_lock);
466 error = radix_tree_insert(&mapping->page_tree, offset, page);
467 if (!error) {
468 page_cache_get(page);
469 SetPageLocked(page);
470 page->mapping = mapping;
471 page->index = offset;
472 mapping->nrpages++;
473 __inc_zone_page_state(page, NR_FILE_PAGES);
474 }
475 write_unlock_irq(&mapping->tree_lock);
476 radix_tree_preload_end();
477 }
478 return error;
479 }
480 EXPORT_SYMBOL(add_to_page_cache);
481
482 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
483 pgoff_t offset, gfp_t gfp_mask)
484 {
485 int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
486 if (ret == 0)
487 lru_cache_add(page);
488 return ret;
489 }
490
491 #ifdef CONFIG_NUMA
492 struct page *__page_cache_alloc(gfp_t gfp)
493 {
494 if (cpuset_do_page_mem_spread()) {
495 int n = cpuset_mem_spread_node();
496 return alloc_pages_node(n, gfp, 0);
497 }
498 return alloc_pages(gfp, 0);
499 }
500 EXPORT_SYMBOL(__page_cache_alloc);
501 #endif
502
503 static int __sleep_on_page_lock(void *word)
504 {
505 io_schedule();
506 return 0;
507 }
508
509 /*
510 * In order to wait for pages to become available there must be
511 * waitqueues associated with pages. By using a hash table of
512 * waitqueues where the bucket discipline is to maintain all
513 * waiters on the same queue and wake all when any of the pages
514 * become available, and for the woken contexts to check to be
515 * sure the appropriate page became available, this saves space
516 * at a cost of "thundering herd" phenomena during rare hash
517 * collisions.
518 */
519 static wait_queue_head_t *page_waitqueue(struct page *page)
520 {
521 const struct zone *zone = page_zone(page);
522
523 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
524 }
525
526 static inline void wake_up_page(struct page *page, int bit)
527 {
528 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
529 }
530
531 void fastcall wait_on_page_bit(struct page *page, int bit_nr)
532 {
533 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
534
535 if (test_bit(bit_nr, &page->flags))
536 __wait_on_bit(page_waitqueue(page), &wait, sync_page,
537 TASK_UNINTERRUPTIBLE);
538 }
539 EXPORT_SYMBOL(wait_on_page_bit);
540
541 /**
542 * unlock_page - unlock a locked page
543 * @page: the page
544 *
545 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
546 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
547 * mechananism between PageLocked pages and PageWriteback pages is shared.
548 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
549 *
550 * The first mb is necessary to safely close the critical section opened by the
551 * TestSetPageLocked(), the second mb is necessary to enforce ordering between
552 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
553 * parallel wait_on_page_locked()).
554 */
555 void fastcall unlock_page(struct page *page)
556 {
557 smp_mb__before_clear_bit();
558 if (!TestClearPageLocked(page))
559 BUG();
560 smp_mb__after_clear_bit();
561 wake_up_page(page, PG_locked);
562 }
563 EXPORT_SYMBOL(unlock_page);
564
565 /**
566 * end_page_writeback - end writeback against a page
567 * @page: the page
568 */
569 void end_page_writeback(struct page *page)
570 {
571 if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
572 if (!test_clear_page_writeback(page))
573 BUG();
574 }
575 smp_mb__after_clear_bit();
576 wake_up_page(page, PG_writeback);
577 }
578 EXPORT_SYMBOL(end_page_writeback);
579
580 /**
581 * __lock_page - get a lock on the page, assuming we need to sleep to get it
582 * @page: the page to lock
583 *
584 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
585 * random driver's requestfn sets TASK_RUNNING, we could busywait. However
586 * chances are that on the second loop, the block layer's plug list is empty,
587 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
588 */
589 void fastcall __lock_page(struct page *page)
590 {
591 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
592
593 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
594 TASK_UNINTERRUPTIBLE);
595 }
596 EXPORT_SYMBOL(__lock_page);
597
598 int fastcall __lock_page_killable(struct page *page)
599 {
600 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
601
602 return __wait_on_bit_lock(page_waitqueue(page), &wait,
603 sync_page_killable, TASK_KILLABLE);
604 }
605
606 /*
607 * Variant of lock_page that does not require the caller to hold a reference
608 * on the page's mapping.
609 */
610 void fastcall __lock_page_nosync(struct page *page)
611 {
612 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
613 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
614 TASK_UNINTERRUPTIBLE);
615 }
616
617 /**
618 * find_get_page - find and get a page reference
619 * @mapping: the address_space to search
620 * @offset: the page index
621 *
622 * Is there a pagecache struct page at the given (mapping, offset) tuple?
623 * If yes, increment its refcount and return it; if no, return NULL.
624 */
625 struct page * find_get_page(struct address_space *mapping, pgoff_t offset)
626 {
627 struct page *page;
628
629 read_lock_irq(&mapping->tree_lock);
630 page = radix_tree_lookup(&mapping->page_tree, offset);
631 if (page)
632 page_cache_get(page);
633 read_unlock_irq(&mapping->tree_lock);
634 return page;
635 }
636 EXPORT_SYMBOL(find_get_page);
637
638 /**
639 * find_lock_page - locate, pin and lock a pagecache page
640 * @mapping: the address_space to search
641 * @offset: the page index
642 *
643 * Locates the desired pagecache page, locks it, increments its reference
644 * count and returns its address.
645 *
646 * Returns zero if the page was not present. find_lock_page() may sleep.
647 */
648 struct page *find_lock_page(struct address_space *mapping,
649 pgoff_t offset)
650 {
651 struct page *page;
652
653 repeat:
654 read_lock_irq(&mapping->tree_lock);
655 page = radix_tree_lookup(&mapping->page_tree, offset);
656 if (page) {
657 page_cache_get(page);
658 if (TestSetPageLocked(page)) {
659 read_unlock_irq(&mapping->tree_lock);
660 __lock_page(page);
661
662 /* Has the page been truncated while we slept? */
663 if (unlikely(page->mapping != mapping)) {
664 unlock_page(page);
665 page_cache_release(page);
666 goto repeat;
667 }
668 VM_BUG_ON(page->index != offset);
669 goto out;
670 }
671 }
672 read_unlock_irq(&mapping->tree_lock);
673 out:
674 return page;
675 }
676 EXPORT_SYMBOL(find_lock_page);
677
678 /**
679 * find_or_create_page - locate or add a pagecache page
680 * @mapping: the page's address_space
681 * @index: the page's index into the mapping
682 * @gfp_mask: page allocation mode
683 *
684 * Locates a page in the pagecache. If the page is not present, a new page
685 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
686 * LRU list. The returned page is locked and has its reference count
687 * incremented.
688 *
689 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
690 * allocation!
691 *
692 * find_or_create_page() returns the desired page's address, or zero on
693 * memory exhaustion.
694 */
695 struct page *find_or_create_page(struct address_space *mapping,
696 pgoff_t index, gfp_t gfp_mask)
697 {
698 struct page *page;
699 int err;
700 repeat:
701 page = find_lock_page(mapping, index);
702 if (!page) {
703 page = __page_cache_alloc(gfp_mask);
704 if (!page)
705 return NULL;
706 err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
707 if (unlikely(err)) {
708 page_cache_release(page);
709 page = NULL;
710 if (err == -EEXIST)
711 goto repeat;
712 }
713 }
714 return page;
715 }
716 EXPORT_SYMBOL(find_or_create_page);
717
718 /**
719 * find_get_pages - gang pagecache lookup
720 * @mapping: The address_space to search
721 * @start: The starting page index
722 * @nr_pages: The maximum number of pages
723 * @pages: Where the resulting pages are placed
724 *
725 * find_get_pages() will search for and return a group of up to
726 * @nr_pages pages in the mapping. The pages are placed at @pages.
727 * find_get_pages() takes a reference against the returned pages.
728 *
729 * The search returns a group of mapping-contiguous pages with ascending
730 * indexes. There may be holes in the indices due to not-present pages.
731 *
732 * find_get_pages() returns the number of pages which were found.
733 */
734 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
735 unsigned int nr_pages, struct page **pages)
736 {
737 unsigned int i;
738 unsigned int ret;
739
740 read_lock_irq(&mapping->tree_lock);
741 ret = radix_tree_gang_lookup(&mapping->page_tree,
742 (void **)pages, start, nr_pages);
743 for (i = 0; i < ret; i++)
744 page_cache_get(pages[i]);
745 read_unlock_irq(&mapping->tree_lock);
746 return ret;
747 }
748
749 /**
750 * find_get_pages_contig - gang contiguous pagecache lookup
751 * @mapping: The address_space to search
752 * @index: The starting page index
753 * @nr_pages: The maximum number of pages
754 * @pages: Where the resulting pages are placed
755 *
756 * find_get_pages_contig() works exactly like find_get_pages(), except
757 * that the returned number of pages are guaranteed to be contiguous.
758 *
759 * find_get_pages_contig() returns the number of pages which were found.
760 */
761 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
762 unsigned int nr_pages, struct page **pages)
763 {
764 unsigned int i;
765 unsigned int ret;
766
767 read_lock_irq(&mapping->tree_lock);
768 ret = radix_tree_gang_lookup(&mapping->page_tree,
769 (void **)pages, index, nr_pages);
770 for (i = 0; i < ret; i++) {
771 if (pages[i]->mapping == NULL || pages[i]->index != index)
772 break;
773
774 page_cache_get(pages[i]);
775 index++;
776 }
777 read_unlock_irq(&mapping->tree_lock);
778 return i;
779 }
780 EXPORT_SYMBOL(find_get_pages_contig);
781
782 /**
783 * find_get_pages_tag - find and return pages that match @tag
784 * @mapping: the address_space to search
785 * @index: the starting page index
786 * @tag: the tag index
787 * @nr_pages: the maximum number of pages
788 * @pages: where the resulting pages are placed
789 *
790 * Like find_get_pages, except we only return pages which are tagged with
791 * @tag. We update @index to index the next page for the traversal.
792 */
793 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
794 int tag, unsigned int nr_pages, struct page **pages)
795 {
796 unsigned int i;
797 unsigned int ret;
798
799 read_lock_irq(&mapping->tree_lock);
800 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
801 (void **)pages, *index, nr_pages, tag);
802 for (i = 0; i < ret; i++)
803 page_cache_get(pages[i]);
804 if (ret)
805 *index = pages[ret - 1]->index + 1;
806 read_unlock_irq(&mapping->tree_lock);
807 return ret;
808 }
809 EXPORT_SYMBOL(find_get_pages_tag);
810
811 /**
812 * grab_cache_page_nowait - returns locked page at given index in given cache
813 * @mapping: target address_space
814 * @index: the page index
815 *
816 * Same as grab_cache_page(), but do not wait if the page is unavailable.
817 * This is intended for speculative data generators, where the data can
818 * be regenerated if the page couldn't be grabbed. This routine should
819 * be safe to call while holding the lock for another page.
820 *
821 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
822 * and deadlock against the caller's locked page.
823 */
824 struct page *
825 grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
826 {
827 struct page *page = find_get_page(mapping, index);
828
829 if (page) {
830 if (!TestSetPageLocked(page))
831 return page;
832 page_cache_release(page);
833 return NULL;
834 }
835 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
836 if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
837 page_cache_release(page);
838 page = NULL;
839 }
840 return page;
841 }
842 EXPORT_SYMBOL(grab_cache_page_nowait);
843
844 /*
845 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
846 * a _large_ part of the i/o request. Imagine the worst scenario:
847 *
848 * ---R__________________________________________B__________
849 * ^ reading here ^ bad block(assume 4k)
850 *
851 * read(R) => miss => readahead(R...B) => media error => frustrating retries
852 * => failing the whole request => read(R) => read(R+1) =>
853 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
854 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
855 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
856 *
857 * It is going insane. Fix it by quickly scaling down the readahead size.
858 */
859 static void shrink_readahead_size_eio(struct file *filp,
860 struct file_ra_state *ra)
861 {
862 if (!ra->ra_pages)
863 return;
864
865 ra->ra_pages /= 4;
866 }
867
868 /**
869 * do_generic_mapping_read - generic file read routine
870 * @mapping: address_space to be read
871 * @ra: file's readahead state
872 * @filp: the file to read
873 * @ppos: current file position
874 * @desc: read_descriptor
875 * @actor: read method
876 *
877 * This is a generic file read routine, and uses the
878 * mapping->a_ops->readpage() function for the actual low-level stuff.
879 *
880 * This is really ugly. But the goto's actually try to clarify some
881 * of the logic when it comes to error handling etc.
882 *
883 * Note the struct file* is only passed for the use of readpage.
884 * It may be NULL.
885 */
886 void do_generic_mapping_read(struct address_space *mapping,
887 struct file_ra_state *ra,
888 struct file *filp,
889 loff_t *ppos,
890 read_descriptor_t *desc,
891 read_actor_t actor)
892 {
893 struct inode *inode = mapping->host;
894 pgoff_t index;
895 pgoff_t last_index;
896 pgoff_t prev_index;
897 unsigned long offset; /* offset into pagecache page */
898 unsigned int prev_offset;
899 int error;
900
901 index = *ppos >> PAGE_CACHE_SHIFT;
902 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
903 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
904 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
905 offset = *ppos & ~PAGE_CACHE_MASK;
906
907 for (;;) {
908 struct page *page;
909 pgoff_t end_index;
910 loff_t isize;
911 unsigned long nr, ret;
912
913 cond_resched();
914 find_page:
915 page = find_get_page(mapping, index);
916 if (!page) {
917 page_cache_sync_readahead(mapping,
918 ra, filp,
919 index, last_index - index);
920 page = find_get_page(mapping, index);
921 if (unlikely(page == NULL))
922 goto no_cached_page;
923 }
924 if (PageReadahead(page)) {
925 page_cache_async_readahead(mapping,
926 ra, filp, page,
927 index, last_index - index);
928 }
929 if (!PageUptodate(page))
930 goto page_not_up_to_date;
931 page_ok:
932 /*
933 * i_size must be checked after we know the page is Uptodate.
934 *
935 * Checking i_size after the check allows us to calculate
936 * the correct value for "nr", which means the zero-filled
937 * part of the page is not copied back to userspace (unless
938 * another truncate extends the file - this is desired though).
939 */
940
941 isize = i_size_read(inode);
942 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
943 if (unlikely(!isize || index > end_index)) {
944 page_cache_release(page);
945 goto out;
946 }
947
948 /* nr is the maximum number of bytes to copy from this page */
949 nr = PAGE_CACHE_SIZE;
950 if (index == end_index) {
951 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
952 if (nr <= offset) {
953 page_cache_release(page);
954 goto out;
955 }
956 }
957 nr = nr - offset;
958
959 /* If users can be writing to this page using arbitrary
960 * virtual addresses, take care about potential aliasing
961 * before reading the page on the kernel side.
962 */
963 if (mapping_writably_mapped(mapping))
964 flush_dcache_page(page);
965
966 /*
967 * When a sequential read accesses a page several times,
968 * only mark it as accessed the first time.
969 */
970 if (prev_index != index || offset != prev_offset)
971 mark_page_accessed(page);
972 prev_index = index;
973
974 /*
975 * Ok, we have the page, and it's up-to-date, so
976 * now we can copy it to user space...
977 *
978 * The actor routine returns how many bytes were actually used..
979 * NOTE! This may not be the same as how much of a user buffer
980 * we filled up (we may be padding etc), so we can only update
981 * "pos" here (the actor routine has to update the user buffer
982 * pointers and the remaining count).
983 */
984 ret = actor(desc, page, offset, nr);
985 offset += ret;
986 index += offset >> PAGE_CACHE_SHIFT;
987 offset &= ~PAGE_CACHE_MASK;
988 prev_offset = offset;
989
990 page_cache_release(page);
991 if (ret == nr && desc->count)
992 continue;
993 goto out;
994
995 page_not_up_to_date:
996 /* Get exclusive access to the page ... */
997 if (lock_page_killable(page))
998 goto readpage_eio;
999
1000 /* Did it get truncated before we got the lock? */
1001 if (!page->mapping) {
1002 unlock_page(page);
1003 page_cache_release(page);
1004 continue;
1005 }
1006
1007 /* Did somebody else fill it already? */
1008 if (PageUptodate(page)) {
1009 unlock_page(page);
1010 goto page_ok;
1011 }
1012
1013 readpage:
1014 /* Start the actual read. The read will unlock the page. */
1015 error = mapping->a_ops->readpage(filp, page);
1016
1017 if (unlikely(error)) {
1018 if (error == AOP_TRUNCATED_PAGE) {
1019 page_cache_release(page);
1020 goto find_page;
1021 }
1022 goto readpage_error;
1023 }
1024
1025 if (!PageUptodate(page)) {
1026 if (lock_page_killable(page))
1027 goto readpage_eio;
1028 if (!PageUptodate(page)) {
1029 if (page->mapping == NULL) {
1030 /*
1031 * invalidate_inode_pages got it
1032 */
1033 unlock_page(page);
1034 page_cache_release(page);
1035 goto find_page;
1036 }
1037 unlock_page(page);
1038 shrink_readahead_size_eio(filp, ra);
1039 goto readpage_eio;
1040 }
1041 unlock_page(page);
1042 }
1043
1044 goto page_ok;
1045
1046 readpage_eio:
1047 error = -EIO;
1048 readpage_error:
1049 /* UHHUH! A synchronous read error occurred. Report it */
1050 desc->error = error;
1051 page_cache_release(page);
1052 goto out;
1053
1054 no_cached_page:
1055 /*
1056 * Ok, it wasn't cached, so we need to create a new
1057 * page..
1058 */
1059 page = page_cache_alloc_cold(mapping);
1060 if (!page) {
1061 desc->error = -ENOMEM;
1062 goto out;
1063 }
1064 error = add_to_page_cache_lru(page, mapping,
1065 index, GFP_KERNEL);
1066 if (error) {
1067 page_cache_release(page);
1068 if (error == -EEXIST)
1069 goto find_page;
1070 desc->error = error;
1071 goto out;
1072 }
1073 goto readpage;
1074 }
1075
1076 out:
1077 ra->prev_pos = prev_index;
1078 ra->prev_pos <<= PAGE_CACHE_SHIFT;
1079 ra->prev_pos |= prev_offset;
1080
1081 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1082 if (filp)
1083 file_accessed(filp);
1084 }
1085 EXPORT_SYMBOL(do_generic_mapping_read);
1086
1087 int file_read_actor(read_descriptor_t *desc, struct page *page,
1088 unsigned long offset, unsigned long size)
1089 {
1090 char *kaddr;
1091 unsigned long left, count = desc->count;
1092
1093 if (size > count)
1094 size = count;
1095
1096 /*
1097 * Faults on the destination of a read are common, so do it before
1098 * taking the kmap.
1099 */
1100 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1101 kaddr = kmap_atomic(page, KM_USER0);
1102 left = __copy_to_user_inatomic(desc->arg.buf,
1103 kaddr + offset, size);
1104 kunmap_atomic(kaddr, KM_USER0);
1105 if (left == 0)
1106 goto success;
1107 }
1108
1109 /* Do it the slow way */
1110 kaddr = kmap(page);
1111 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1112 kunmap(page);
1113
1114 if (left) {
1115 size -= left;
1116 desc->error = -EFAULT;
1117 }
1118 success:
1119 desc->count = count - size;
1120 desc->written += size;
1121 desc->arg.buf += size;
1122 return size;
1123 }
1124
1125 /*
1126 * Performs necessary checks before doing a write
1127 * @iov: io vector request
1128 * @nr_segs: number of segments in the iovec
1129 * @count: number of bytes to write
1130 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1131 *
1132 * Adjust number of segments and amount of bytes to write (nr_segs should be
1133 * properly initialized first). Returns appropriate error code that caller
1134 * should return or zero in case that write should be allowed.
1135 */
1136 int generic_segment_checks(const struct iovec *iov,
1137 unsigned long *nr_segs, size_t *count, int access_flags)
1138 {
1139 unsigned long seg;
1140 size_t cnt = 0;
1141 for (seg = 0; seg < *nr_segs; seg++) {
1142 const struct iovec *iv = &iov[seg];
1143
1144 /*
1145 * If any segment has a negative length, or the cumulative
1146 * length ever wraps negative then return -EINVAL.
1147 */
1148 cnt += iv->iov_len;
1149 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1150 return -EINVAL;
1151 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1152 continue;
1153 if (seg == 0)
1154 return -EFAULT;
1155 *nr_segs = seg;
1156 cnt -= iv->iov_len; /* This segment is no good */
1157 break;
1158 }
1159 *count = cnt;
1160 return 0;
1161 }
1162 EXPORT_SYMBOL(generic_segment_checks);
1163
1164 /**
1165 * generic_file_aio_read - generic filesystem read routine
1166 * @iocb: kernel I/O control block
1167 * @iov: io vector request
1168 * @nr_segs: number of segments in the iovec
1169 * @pos: current file position
1170 *
1171 * This is the "read()" routine for all filesystems
1172 * that can use the page cache directly.
1173 */
1174 ssize_t
1175 generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1176 unsigned long nr_segs, loff_t pos)
1177 {
1178 struct file *filp = iocb->ki_filp;
1179 ssize_t retval;
1180 unsigned long seg;
1181 size_t count;
1182 loff_t *ppos = &iocb->ki_pos;
1183
1184 count = 0;
1185 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1186 if (retval)
1187 return retval;
1188
1189 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1190 if (filp->f_flags & O_DIRECT) {
1191 loff_t size;
1192 struct address_space *mapping;
1193 struct inode *inode;
1194
1195 mapping = filp->f_mapping;
1196 inode = mapping->host;
1197 retval = 0;
1198 if (!count)
1199 goto out; /* skip atime */
1200 size = i_size_read(inode);
1201 if (pos < size) {
1202 retval = generic_file_direct_IO(READ, iocb,
1203 iov, pos, nr_segs);
1204 if (retval > 0)
1205 *ppos = pos + retval;
1206 }
1207 if (likely(retval != 0)) {
1208 file_accessed(filp);
1209 goto out;
1210 }
1211 }
1212
1213 retval = 0;
1214 if (count) {
1215 for (seg = 0; seg < nr_segs; seg++) {
1216 read_descriptor_t desc;
1217
1218 desc.written = 0;
1219 desc.arg.buf = iov[seg].iov_base;
1220 desc.count = iov[seg].iov_len;
1221 if (desc.count == 0)
1222 continue;
1223 desc.error = 0;
1224 do_generic_file_read(filp,ppos,&desc,file_read_actor);
1225 retval += desc.written;
1226 if (desc.error) {
1227 retval = retval ?: desc.error;
1228 break;
1229 }
1230 if (desc.count > 0)
1231 break;
1232 }
1233 }
1234 out:
1235 return retval;
1236 }
1237 EXPORT_SYMBOL(generic_file_aio_read);
1238
1239 static ssize_t
1240 do_readahead(struct address_space *mapping, struct file *filp,
1241 pgoff_t index, unsigned long nr)
1242 {
1243 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1244 return -EINVAL;
1245
1246 force_page_cache_readahead(mapping, filp, index,
1247 max_sane_readahead(nr));
1248 return 0;
1249 }
1250
1251 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1252 {
1253 ssize_t ret;
1254 struct file *file;
1255
1256 ret = -EBADF;
1257 file = fget(fd);
1258 if (file) {
1259 if (file->f_mode & FMODE_READ) {
1260 struct address_space *mapping = file->f_mapping;
1261 pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1262 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1263 unsigned long len = end - start + 1;
1264 ret = do_readahead(mapping, file, start, len);
1265 }
1266 fput(file);
1267 }
1268 return ret;
1269 }
1270
1271 #ifdef CONFIG_MMU
1272 /**
1273 * page_cache_read - adds requested page to the page cache if not already there
1274 * @file: file to read
1275 * @offset: page index
1276 *
1277 * This adds the requested page to the page cache if it isn't already there,
1278 * and schedules an I/O to read in its contents from disk.
1279 */
1280 static int fastcall page_cache_read(struct file * file, pgoff_t offset)
1281 {
1282 struct address_space *mapping = file->f_mapping;
1283 struct page *page;
1284 int ret;
1285
1286 do {
1287 page = page_cache_alloc_cold(mapping);
1288 if (!page)
1289 return -ENOMEM;
1290
1291 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1292 if (ret == 0)
1293 ret = mapping->a_ops->readpage(file, page);
1294 else if (ret == -EEXIST)
1295 ret = 0; /* losing race to add is OK */
1296
1297 page_cache_release(page);
1298
1299 } while (ret == AOP_TRUNCATED_PAGE);
1300
1301 return ret;
1302 }
1303
1304 #define MMAP_LOTSAMISS (100)
1305
1306 /**
1307 * filemap_fault - read in file data for page fault handling
1308 * @vma: vma in which the fault was taken
1309 * @vmf: struct vm_fault containing details of the fault
1310 *
1311 * filemap_fault() is invoked via the vma operations vector for a
1312 * mapped memory region to read in file data during a page fault.
1313 *
1314 * The goto's are kind of ugly, but this streamlines the normal case of having
1315 * it in the page cache, and handles the special cases reasonably without
1316 * having a lot of duplicated code.
1317 */
1318 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1319 {
1320 int error;
1321 struct file *file = vma->vm_file;
1322 struct address_space *mapping = file->f_mapping;
1323 struct file_ra_state *ra = &file->f_ra;
1324 struct inode *inode = mapping->host;
1325 struct page *page;
1326 unsigned long size;
1327 int did_readaround = 0;
1328 int ret = 0;
1329
1330 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1331 if (vmf->pgoff >= size)
1332 return VM_FAULT_SIGBUS;
1333
1334 /* If we don't want any read-ahead, don't bother */
1335 if (VM_RandomReadHint(vma))
1336 goto no_cached_page;
1337
1338 /*
1339 * Do we have something in the page cache already?
1340 */
1341 retry_find:
1342 page = find_lock_page(mapping, vmf->pgoff);
1343 /*
1344 * For sequential accesses, we use the generic readahead logic.
1345 */
1346 if (VM_SequentialReadHint(vma)) {
1347 if (!page) {
1348 page_cache_sync_readahead(mapping, ra, file,
1349 vmf->pgoff, 1);
1350 page = find_lock_page(mapping, vmf->pgoff);
1351 if (!page)
1352 goto no_cached_page;
1353 }
1354 if (PageReadahead(page)) {
1355 page_cache_async_readahead(mapping, ra, file, page,
1356 vmf->pgoff, 1);
1357 }
1358 }
1359
1360 if (!page) {
1361 unsigned long ra_pages;
1362
1363 ra->mmap_miss++;
1364
1365 /*
1366 * Do we miss much more than hit in this file? If so,
1367 * stop bothering with read-ahead. It will only hurt.
1368 */
1369 if (ra->mmap_miss > MMAP_LOTSAMISS)
1370 goto no_cached_page;
1371
1372 /*
1373 * To keep the pgmajfault counter straight, we need to
1374 * check did_readaround, as this is an inner loop.
1375 */
1376 if (!did_readaround) {
1377 ret = VM_FAULT_MAJOR;
1378 count_vm_event(PGMAJFAULT);
1379 }
1380 did_readaround = 1;
1381 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1382 if (ra_pages) {
1383 pgoff_t start = 0;
1384
1385 if (vmf->pgoff > ra_pages / 2)
1386 start = vmf->pgoff - ra_pages / 2;
1387 do_page_cache_readahead(mapping, file, start, ra_pages);
1388 }
1389 page = find_lock_page(mapping, vmf->pgoff);
1390 if (!page)
1391 goto no_cached_page;
1392 }
1393
1394 if (!did_readaround)
1395 ra->mmap_miss--;
1396
1397 /*
1398 * We have a locked page in the page cache, now we need to check
1399 * that it's up-to-date. If not, it is going to be due to an error.
1400 */
1401 if (unlikely(!PageUptodate(page)))
1402 goto page_not_uptodate;
1403
1404 /* Must recheck i_size under page lock */
1405 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1406 if (unlikely(vmf->pgoff >= size)) {
1407 unlock_page(page);
1408 page_cache_release(page);
1409 return VM_FAULT_SIGBUS;
1410 }
1411
1412 /*
1413 * Found the page and have a reference on it.
1414 */
1415 mark_page_accessed(page);
1416 ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
1417 vmf->page = page;
1418 return ret | VM_FAULT_LOCKED;
1419
1420 no_cached_page:
1421 /*
1422 * We're only likely to ever get here if MADV_RANDOM is in
1423 * effect.
1424 */
1425 error = page_cache_read(file, vmf->pgoff);
1426
1427 /*
1428 * The page we want has now been added to the page cache.
1429 * In the unlikely event that someone removed it in the
1430 * meantime, we'll just come back here and read it again.
1431 */
1432 if (error >= 0)
1433 goto retry_find;
1434
1435 /*
1436 * An error return from page_cache_read can result if the
1437 * system is low on memory, or a problem occurs while trying
1438 * to schedule I/O.
1439 */
1440 if (error == -ENOMEM)
1441 return VM_FAULT_OOM;
1442 return VM_FAULT_SIGBUS;
1443
1444 page_not_uptodate:
1445 /* IO error path */
1446 if (!did_readaround) {
1447 ret = VM_FAULT_MAJOR;
1448 count_vm_event(PGMAJFAULT);
1449 }
1450
1451 /*
1452 * Umm, take care of errors if the page isn't up-to-date.
1453 * Try to re-read it _once_. We do this synchronously,
1454 * because there really aren't any performance issues here
1455 * and we need to check for errors.
1456 */
1457 ClearPageError(page);
1458 error = mapping->a_ops->readpage(file, page);
1459 page_cache_release(page);
1460
1461 if (!error || error == AOP_TRUNCATED_PAGE)
1462 goto retry_find;
1463
1464 /* Things didn't work out. Return zero to tell the mm layer so. */
1465 shrink_readahead_size_eio(file, ra);
1466 return VM_FAULT_SIGBUS;
1467 }
1468 EXPORT_SYMBOL(filemap_fault);
1469
1470 struct vm_operations_struct generic_file_vm_ops = {
1471 .fault = filemap_fault,
1472 };
1473
1474 /* This is used for a general mmap of a disk file */
1475
1476 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1477 {
1478 struct address_space *mapping = file->f_mapping;
1479
1480 if (!mapping->a_ops->readpage)
1481 return -ENOEXEC;
1482 file_accessed(file);
1483 vma->vm_ops = &generic_file_vm_ops;
1484 vma->vm_flags |= VM_CAN_NONLINEAR;
1485 return 0;
1486 }
1487
1488 /*
1489 * This is for filesystems which do not implement ->writepage.
1490 */
1491 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1492 {
1493 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1494 return -EINVAL;
1495 return generic_file_mmap(file, vma);
1496 }
1497 #else
1498 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1499 {
1500 return -ENOSYS;
1501 }
1502 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1503 {
1504 return -ENOSYS;
1505 }
1506 #endif /* CONFIG_MMU */
1507
1508 EXPORT_SYMBOL(generic_file_mmap);
1509 EXPORT_SYMBOL(generic_file_readonly_mmap);
1510
1511 static struct page *__read_cache_page(struct address_space *mapping,
1512 pgoff_t index,
1513 int (*filler)(void *,struct page*),
1514 void *data)
1515 {
1516 struct page *page;
1517 int err;
1518 repeat:
1519 page = find_get_page(mapping, index);
1520 if (!page) {
1521 page = page_cache_alloc_cold(mapping);
1522 if (!page)
1523 return ERR_PTR(-ENOMEM);
1524 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1525 if (unlikely(err)) {
1526 page_cache_release(page);
1527 if (err == -EEXIST)
1528 goto repeat;
1529 /* Presumably ENOMEM for radix tree node */
1530 return ERR_PTR(err);
1531 }
1532 err = filler(data, page);
1533 if (err < 0) {
1534 page_cache_release(page);
1535 page = ERR_PTR(err);
1536 }
1537 }
1538 return page;
1539 }
1540
1541 /*
1542 * Same as read_cache_page, but don't wait for page to become unlocked
1543 * after submitting it to the filler.
1544 */
1545 struct page *read_cache_page_async(struct address_space *mapping,
1546 pgoff_t index,
1547 int (*filler)(void *,struct page*),
1548 void *data)
1549 {
1550 struct page *page;
1551 int err;
1552
1553 retry:
1554 page = __read_cache_page(mapping, index, filler, data);
1555 if (IS_ERR(page))
1556 return page;
1557 if (PageUptodate(page))
1558 goto out;
1559
1560 lock_page(page);
1561 if (!page->mapping) {
1562 unlock_page(page);
1563 page_cache_release(page);
1564 goto retry;
1565 }
1566 if (PageUptodate(page)) {
1567 unlock_page(page);
1568 goto out;
1569 }
1570 err = filler(data, page);
1571 if (err < 0) {
1572 page_cache_release(page);
1573 return ERR_PTR(err);
1574 }
1575 out:
1576 mark_page_accessed(page);
1577 return page;
1578 }
1579 EXPORT_SYMBOL(read_cache_page_async);
1580
1581 /**
1582 * read_cache_page - read into page cache, fill it if needed
1583 * @mapping: the page's address_space
1584 * @index: the page index
1585 * @filler: function to perform the read
1586 * @data: destination for read data
1587 *
1588 * Read into the page cache. If a page already exists, and PageUptodate() is
1589 * not set, try to fill the page then wait for it to become unlocked.
1590 *
1591 * If the page does not get brought uptodate, return -EIO.
1592 */
1593 struct page *read_cache_page(struct address_space *mapping,
1594 pgoff_t index,
1595 int (*filler)(void *,struct page*),
1596 void *data)
1597 {
1598 struct page *page;
1599
1600 page = read_cache_page_async(mapping, index, filler, data);
1601 if (IS_ERR(page))
1602 goto out;
1603 wait_on_page_locked(page);
1604 if (!PageUptodate(page)) {
1605 page_cache_release(page);
1606 page = ERR_PTR(-EIO);
1607 }
1608 out:
1609 return page;
1610 }
1611 EXPORT_SYMBOL(read_cache_page);
1612
1613 /*
1614 * The logic we want is
1615 *
1616 * if suid or (sgid and xgrp)
1617 * remove privs
1618 */
1619 int should_remove_suid(struct dentry *dentry)
1620 {
1621 mode_t mode = dentry->d_inode->i_mode;
1622 int kill = 0;
1623
1624 /* suid always must be killed */
1625 if (unlikely(mode & S_ISUID))
1626 kill = ATTR_KILL_SUID;
1627
1628 /*
1629 * sgid without any exec bits is just a mandatory locking mark; leave
1630 * it alone. If some exec bits are set, it's a real sgid; kill it.
1631 */
1632 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1633 kill |= ATTR_KILL_SGID;
1634
1635 if (unlikely(kill && !capable(CAP_FSETID)))
1636 return kill;
1637
1638 return 0;
1639 }
1640 EXPORT_SYMBOL(should_remove_suid);
1641
1642 int __remove_suid(struct dentry *dentry, int kill)
1643 {
1644 struct iattr newattrs;
1645
1646 newattrs.ia_valid = ATTR_FORCE | kill;
1647 return notify_change(dentry, &newattrs);
1648 }
1649
1650 int remove_suid(struct dentry *dentry)
1651 {
1652 int killsuid = should_remove_suid(dentry);
1653 int killpriv = security_inode_need_killpriv(dentry);
1654 int error = 0;
1655
1656 if (killpriv < 0)
1657 return killpriv;
1658 if (killpriv)
1659 error = security_inode_killpriv(dentry);
1660 if (!error && killsuid)
1661 error = __remove_suid(dentry, killsuid);
1662
1663 return error;
1664 }
1665 EXPORT_SYMBOL(remove_suid);
1666
1667 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1668 const struct iovec *iov, size_t base, size_t bytes)
1669 {
1670 size_t copied = 0, left = 0;
1671
1672 while (bytes) {
1673 char __user *buf = iov->iov_base + base;
1674 int copy = min(bytes, iov->iov_len - base);
1675
1676 base = 0;
1677 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
1678 copied += copy;
1679 bytes -= copy;
1680 vaddr += copy;
1681 iov++;
1682
1683 if (unlikely(left))
1684 break;
1685 }
1686 return copied - left;
1687 }
1688
1689 /*
1690 * Copy as much as we can into the page and return the number of bytes which
1691 * were sucessfully copied. If a fault is encountered then return the number of
1692 * bytes which were copied.
1693 */
1694 size_t iov_iter_copy_from_user_atomic(struct page *page,
1695 struct iov_iter *i, unsigned long offset, size_t bytes)
1696 {
1697 char *kaddr;
1698 size_t copied;
1699
1700 BUG_ON(!in_atomic());
1701 kaddr = kmap_atomic(page, KM_USER0);
1702 if (likely(i->nr_segs == 1)) {
1703 int left;
1704 char __user *buf = i->iov->iov_base + i->iov_offset;
1705 left = __copy_from_user_inatomic_nocache(kaddr + offset,
1706 buf, bytes);
1707 copied = bytes - left;
1708 } else {
1709 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1710 i->iov, i->iov_offset, bytes);
1711 }
1712 kunmap_atomic(kaddr, KM_USER0);
1713
1714 return copied;
1715 }
1716 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1717
1718 /*
1719 * This has the same sideeffects and return value as
1720 * iov_iter_copy_from_user_atomic().
1721 * The difference is that it attempts to resolve faults.
1722 * Page must not be locked.
1723 */
1724 size_t iov_iter_copy_from_user(struct page *page,
1725 struct iov_iter *i, unsigned long offset, size_t bytes)
1726 {
1727 char *kaddr;
1728 size_t copied;
1729
1730 kaddr = kmap(page);
1731 if (likely(i->nr_segs == 1)) {
1732 int left;
1733 char __user *buf = i->iov->iov_base + i->iov_offset;
1734 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
1735 copied = bytes - left;
1736 } else {
1737 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1738 i->iov, i->iov_offset, bytes);
1739 }
1740 kunmap(page);
1741 return copied;
1742 }
1743 EXPORT_SYMBOL(iov_iter_copy_from_user);
1744
1745 static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
1746 {
1747 if (likely(i->nr_segs == 1)) {
1748 i->iov_offset += bytes;
1749 } else {
1750 const struct iovec *iov = i->iov;
1751 size_t base = i->iov_offset;
1752
1753 while (bytes) {
1754 int copy = min(bytes, iov->iov_len - base);
1755
1756 bytes -= copy;
1757 base += copy;
1758 if (iov->iov_len == base) {
1759 iov++;
1760 base = 0;
1761 }
1762 }
1763 i->iov = iov;
1764 i->iov_offset = base;
1765 }
1766 }
1767
1768 void iov_iter_advance(struct iov_iter *i, size_t bytes)
1769 {
1770 BUG_ON(i->count < bytes);
1771
1772 __iov_iter_advance_iov(i, bytes);
1773 i->count -= bytes;
1774 }
1775 EXPORT_SYMBOL(iov_iter_advance);
1776
1777 /*
1778 * Fault in the first iovec of the given iov_iter, to a maximum length
1779 * of bytes. Returns 0 on success, or non-zero if the memory could not be
1780 * accessed (ie. because it is an invalid address).
1781 *
1782 * writev-intensive code may want this to prefault several iovecs -- that
1783 * would be possible (callers must not rely on the fact that _only_ the
1784 * first iovec will be faulted with the current implementation).
1785 */
1786 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
1787 {
1788 char __user *buf = i->iov->iov_base + i->iov_offset;
1789 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
1790 return fault_in_pages_readable(buf, bytes);
1791 }
1792 EXPORT_SYMBOL(iov_iter_fault_in_readable);
1793
1794 /*
1795 * Return the count of just the current iov_iter segment.
1796 */
1797 size_t iov_iter_single_seg_count(struct iov_iter *i)
1798 {
1799 const struct iovec *iov = i->iov;
1800 if (i->nr_segs == 1)
1801 return i->count;
1802 else
1803 return min(i->count, iov->iov_len - i->iov_offset);
1804 }
1805 EXPORT_SYMBOL(iov_iter_single_seg_count);
1806
1807 /*
1808 * Performs necessary checks before doing a write
1809 *
1810 * Can adjust writing position or amount of bytes to write.
1811 * Returns appropriate error code that caller should return or
1812 * zero in case that write should be allowed.
1813 */
1814 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1815 {
1816 struct inode *inode = file->f_mapping->host;
1817 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1818
1819 if (unlikely(*pos < 0))
1820 return -EINVAL;
1821
1822 if (!isblk) {
1823 /* FIXME: this is for backwards compatibility with 2.4 */
1824 if (file->f_flags & O_APPEND)
1825 *pos = i_size_read(inode);
1826
1827 if (limit != RLIM_INFINITY) {
1828 if (*pos >= limit) {
1829 send_sig(SIGXFSZ, current, 0);
1830 return -EFBIG;
1831 }
1832 if (*count > limit - (typeof(limit))*pos) {
1833 *count = limit - (typeof(limit))*pos;
1834 }
1835 }
1836 }
1837
1838 /*
1839 * LFS rule
1840 */
1841 if (unlikely(*pos + *count > MAX_NON_LFS &&
1842 !(file->f_flags & O_LARGEFILE))) {
1843 if (*pos >= MAX_NON_LFS) {
1844 return -EFBIG;
1845 }
1846 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1847 *count = MAX_NON_LFS - (unsigned long)*pos;
1848 }
1849 }
1850
1851 /*
1852 * Are we about to exceed the fs block limit ?
1853 *
1854 * If we have written data it becomes a short write. If we have
1855 * exceeded without writing data we send a signal and return EFBIG.
1856 * Linus frestrict idea will clean these up nicely..
1857 */
1858 if (likely(!isblk)) {
1859 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1860 if (*count || *pos > inode->i_sb->s_maxbytes) {
1861 return -EFBIG;
1862 }
1863 /* zero-length writes at ->s_maxbytes are OK */
1864 }
1865
1866 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1867 *count = inode->i_sb->s_maxbytes - *pos;
1868 } else {
1869 #ifdef CONFIG_BLOCK
1870 loff_t isize;
1871 if (bdev_read_only(I_BDEV(inode)))
1872 return -EPERM;
1873 isize = i_size_read(inode);
1874 if (*pos >= isize) {
1875 if (*count || *pos > isize)
1876 return -ENOSPC;
1877 }
1878
1879 if (*pos + *count > isize)
1880 *count = isize - *pos;
1881 #else
1882 return -EPERM;
1883 #endif
1884 }
1885 return 0;
1886 }
1887 EXPORT_SYMBOL(generic_write_checks);
1888
1889 int pagecache_write_begin(struct file *file, struct address_space *mapping,
1890 loff_t pos, unsigned len, unsigned flags,
1891 struct page **pagep, void **fsdata)
1892 {
1893 const struct address_space_operations *aops = mapping->a_ops;
1894
1895 if (aops->write_begin) {
1896 return aops->write_begin(file, mapping, pos, len, flags,
1897 pagep, fsdata);
1898 } else {
1899 int ret;
1900 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1901 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1902 struct inode *inode = mapping->host;
1903 struct page *page;
1904 again:
1905 page = __grab_cache_page(mapping, index);
1906 *pagep = page;
1907 if (!page)
1908 return -ENOMEM;
1909
1910 if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
1911 /*
1912 * There is no way to resolve a short write situation
1913 * for a !Uptodate page (except by double copying in
1914 * the caller done by generic_perform_write_2copy).
1915 *
1916 * Instead, we have to bring it uptodate here.
1917 */
1918 ret = aops->readpage(file, page);
1919 page_cache_release(page);
1920 if (ret) {
1921 if (ret == AOP_TRUNCATED_PAGE)
1922 goto again;
1923 return ret;
1924 }
1925 goto again;
1926 }
1927
1928 ret = aops->prepare_write(file, page, offset, offset+len);
1929 if (ret) {
1930 unlock_page(page);
1931 page_cache_release(page);
1932 if (pos + len > inode->i_size)
1933 vmtruncate(inode, inode->i_size);
1934 }
1935 return ret;
1936 }
1937 }
1938 EXPORT_SYMBOL(pagecache_write_begin);
1939
1940 int pagecache_write_end(struct file *file, struct address_space *mapping,
1941 loff_t pos, unsigned len, unsigned copied,
1942 struct page *page, void *fsdata)
1943 {
1944 const struct address_space_operations *aops = mapping->a_ops;
1945 int ret;
1946
1947 if (aops->write_end) {
1948 mark_page_accessed(page);
1949 ret = aops->write_end(file, mapping, pos, len, copied,
1950 page, fsdata);
1951 } else {
1952 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1953 struct inode *inode = mapping->host;
1954
1955 flush_dcache_page(page);
1956 ret = aops->commit_write(file, page, offset, offset+len);
1957 unlock_page(page);
1958 mark_page_accessed(page);
1959 page_cache_release(page);
1960
1961 if (ret < 0) {
1962 if (pos + len > inode->i_size)
1963 vmtruncate(inode, inode->i_size);
1964 } else if (ret > 0)
1965 ret = min_t(size_t, copied, ret);
1966 else
1967 ret = copied;
1968 }
1969
1970 return ret;
1971 }
1972 EXPORT_SYMBOL(pagecache_write_end);
1973
1974 ssize_t
1975 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1976 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
1977 size_t count, size_t ocount)
1978 {
1979 struct file *file = iocb->ki_filp;
1980 struct address_space *mapping = file->f_mapping;
1981 struct inode *inode = mapping->host;
1982 ssize_t written;
1983
1984 if (count != ocount)
1985 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1986
1987 written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
1988 if (written > 0) {
1989 loff_t end = pos + written;
1990 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
1991 i_size_write(inode, end);
1992 mark_inode_dirty(inode);
1993 }
1994 *ppos = end;
1995 }
1996
1997 /*
1998 * Sync the fs metadata but not the minor inode changes and
1999 * of course not the data as we did direct DMA for the IO.
2000 * i_mutex is held, which protects generic_osync_inode() from
2001 * livelocking. AIO O_DIRECT ops attempt to sync metadata here.
2002 */
2003 if ((written >= 0 || written == -EIOCBQUEUED) &&
2004 ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2005 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
2006 if (err < 0)
2007 written = err;
2008 }
2009 return written;
2010 }
2011 EXPORT_SYMBOL(generic_file_direct_write);
2012
2013 /*
2014 * Find or create a page at the given pagecache position. Return the locked
2015 * page. This function is specifically for buffered writes.
2016 */
2017 struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
2018 {
2019 int status;
2020 struct page *page;
2021 repeat:
2022 page = find_lock_page(mapping, index);
2023 if (likely(page))
2024 return page;
2025
2026 page = page_cache_alloc(mapping);
2027 if (!page)
2028 return NULL;
2029 status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
2030 if (unlikely(status)) {
2031 page_cache_release(page);
2032 if (status == -EEXIST)
2033 goto repeat;
2034 return NULL;
2035 }
2036 return page;
2037 }
2038 EXPORT_SYMBOL(__grab_cache_page);
2039
2040 static ssize_t generic_perform_write_2copy(struct file *file,
2041 struct iov_iter *i, loff_t pos)
2042 {
2043 struct address_space *mapping = file->f_mapping;
2044 const struct address_space_operations *a_ops = mapping->a_ops;
2045 struct inode *inode = mapping->host;
2046 long status = 0;
2047 ssize_t written = 0;
2048
2049 do {
2050 struct page *src_page;
2051 struct page *page;
2052 pgoff_t index; /* Pagecache index for current page */
2053 unsigned long offset; /* Offset into pagecache page */
2054 unsigned long bytes; /* Bytes to write to page */
2055 size_t copied; /* Bytes copied from user */
2056
2057 offset = (pos & (PAGE_CACHE_SIZE - 1));
2058 index = pos >> PAGE_CACHE_SHIFT;
2059 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2060 iov_iter_count(i));
2061
2062 /*
2063 * a non-NULL src_page indicates that we're doing the
2064 * copy via get_user_pages and kmap.
2065 */
2066 src_page = NULL;
2067
2068 /*
2069 * Bring in the user page that we will copy from _first_.
2070 * Otherwise there's a nasty deadlock on copying from the
2071 * same page as we're writing to, without it being marked
2072 * up-to-date.
2073 *
2074 * Not only is this an optimisation, but it is also required
2075 * to check that the address is actually valid, when atomic
2076 * usercopies are used, below.
2077 */
2078 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2079 status = -EFAULT;
2080 break;
2081 }
2082
2083 page = __grab_cache_page(mapping, index);
2084 if (!page) {
2085 status = -ENOMEM;
2086 break;
2087 }
2088
2089 /*
2090 * non-uptodate pages cannot cope with short copies, and we
2091 * cannot take a pagefault with the destination page locked.
2092 * So pin the source page to copy it.
2093 */
2094 if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
2095 unlock_page(page);
2096
2097 src_page = alloc_page(GFP_KERNEL);
2098 if (!src_page) {
2099 page_cache_release(page);
2100 status = -ENOMEM;
2101 break;
2102 }
2103
2104 /*
2105 * Cannot get_user_pages with a page locked for the
2106 * same reason as we can't take a page fault with a
2107 * page locked (as explained below).
2108 */
2109 copied = iov_iter_copy_from_user(src_page, i,
2110 offset, bytes);
2111 if (unlikely(copied == 0)) {
2112 status = -EFAULT;
2113 page_cache_release(page);
2114 page_cache_release(src_page);
2115 break;
2116 }
2117 bytes = copied;
2118
2119 lock_page(page);
2120 /*
2121 * Can't handle the page going uptodate here, because
2122 * that means we would use non-atomic usercopies, which
2123 * zero out the tail of the page, which can cause
2124 * zeroes to become transiently visible. We could just
2125 * use a non-zeroing copy, but the APIs aren't too
2126 * consistent.
2127 */
2128 if (unlikely(!page->mapping || PageUptodate(page))) {
2129 unlock_page(page);
2130 page_cache_release(page);
2131 page_cache_release(src_page);
2132 continue;
2133 }
2134 }
2135
2136 status = a_ops->prepare_write(file, page, offset, offset+bytes);
2137 if (unlikely(status))
2138 goto fs_write_aop_error;
2139
2140 if (!src_page) {
2141 /*
2142 * Must not enter the pagefault handler here, because
2143 * we hold the page lock, so we might recursively
2144 * deadlock on the same lock, or get an ABBA deadlock
2145 * against a different lock, or against the mmap_sem
2146 * (which nests outside the page lock). So increment
2147 * preempt count, and use _atomic usercopies.
2148 *
2149 * The page is uptodate so we are OK to encounter a
2150 * short copy: if unmodified parts of the page are
2151 * marked dirty and written out to disk, it doesn't
2152 * really matter.
2153 */
2154 pagefault_disable();
2155 copied = iov_iter_copy_from_user_atomic(page, i,
2156 offset, bytes);
2157 pagefault_enable();
2158 } else {
2159 void *src, *dst;
2160 src = kmap_atomic(src_page, KM_USER0);
2161 dst = kmap_atomic(page, KM_USER1);
2162 memcpy(dst + offset, src + offset, bytes);
2163 kunmap_atomic(dst, KM_USER1);
2164 kunmap_atomic(src, KM_USER0);
2165 copied = bytes;
2166 }
2167 flush_dcache_page(page);
2168
2169 status = a_ops->commit_write(file, page, offset, offset+bytes);
2170 if (unlikely(status < 0))
2171 goto fs_write_aop_error;
2172 if (unlikely(status > 0)) /* filesystem did partial write */
2173 copied = min_t(size_t, copied, status);
2174
2175 unlock_page(page);
2176 mark_page_accessed(page);
2177 page_cache_release(page);
2178 if (src_page)
2179 page_cache_release(src_page);
2180
2181 iov_iter_advance(i, copied);
2182 pos += copied;
2183 written += copied;
2184
2185 balance_dirty_pages_ratelimited(mapping);
2186 cond_resched();
2187 continue;
2188
2189 fs_write_aop_error:
2190 unlock_page(page);
2191 page_cache_release(page);
2192 if (src_page)
2193 page_cache_release(src_page);
2194
2195 /*
2196 * prepare_write() may have instantiated a few blocks
2197 * outside i_size. Trim these off again. Don't need
2198 * i_size_read because we hold i_mutex.
2199 */
2200 if (pos + bytes > inode->i_size)
2201 vmtruncate(inode, inode->i_size);
2202 break;
2203 } while (iov_iter_count(i));
2204
2205 return written ? written : status;
2206 }
2207
2208 static ssize_t generic_perform_write(struct file *file,
2209 struct iov_iter *i, loff_t pos)
2210 {
2211 struct address_space *mapping = file->f_mapping;
2212 const struct address_space_operations *a_ops = mapping->a_ops;
2213 long status = 0;
2214 ssize_t written = 0;
2215 unsigned int flags = 0;
2216
2217 /*
2218 * Copies from kernel address space cannot fail (NFSD is a big user).
2219 */
2220 if (segment_eq(get_fs(), KERNEL_DS))
2221 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2222
2223 do {
2224 struct page *page;
2225 pgoff_t index; /* Pagecache index for current page */
2226 unsigned long offset; /* Offset into pagecache page */
2227 unsigned long bytes; /* Bytes to write to page */
2228 size_t copied; /* Bytes copied from user */
2229 void *fsdata;
2230
2231 offset = (pos & (PAGE_CACHE_SIZE - 1));
2232 index = pos >> PAGE_CACHE_SHIFT;
2233 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2234 iov_iter_count(i));
2235
2236 again:
2237
2238 /*
2239 * Bring in the user page that we will copy from _first_.
2240 * Otherwise there's a nasty deadlock on copying from the
2241 * same page as we're writing to, without it being marked
2242 * up-to-date.
2243 *
2244 * Not only is this an optimisation, but it is also required
2245 * to check that the address is actually valid, when atomic
2246 * usercopies are used, below.
2247 */
2248 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2249 status = -EFAULT;
2250 break;
2251 }
2252
2253 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2254 &page, &fsdata);
2255 if (unlikely(status))
2256 break;
2257
2258 pagefault_disable();
2259 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2260 pagefault_enable();
2261 flush_dcache_page(page);
2262
2263 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2264 page, fsdata);
2265 if (unlikely(status < 0))
2266 break;
2267 copied = status;
2268
2269 cond_resched();
2270
2271 if (unlikely(copied == 0)) {
2272 /*
2273 * If we were unable to copy any data at all, we must
2274 * fall back to a single segment length write.
2275 *
2276 * If we didn't fallback here, we could livelock
2277 * because not all segments in the iov can be copied at
2278 * once without a pagefault.
2279 */
2280 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2281 iov_iter_single_seg_count(i));
2282 goto again;
2283 }
2284 iov_iter_advance(i, copied);
2285 pos += copied;
2286 written += copied;
2287
2288 balance_dirty_pages_ratelimited(mapping);
2289
2290 } while (iov_iter_count(i));
2291
2292 return written ? written : status;
2293 }
2294
2295 ssize_t
2296 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2297 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2298 size_t count, ssize_t written)
2299 {
2300 struct file *file = iocb->ki_filp;
2301 struct address_space *mapping = file->f_mapping;
2302 const struct address_space_operations *a_ops = mapping->a_ops;
2303 struct inode *inode = mapping->host;
2304 ssize_t status;
2305 struct iov_iter i;
2306
2307 iov_iter_init(&i, iov, nr_segs, count, written);
2308 if (a_ops->write_begin)
2309 status = generic_perform_write(file, &i, pos);
2310 else
2311 status = generic_perform_write_2copy(file, &i, pos);
2312
2313 if (likely(status >= 0)) {
2314 written += status;
2315 *ppos = pos + status;
2316
2317 /*
2318 * For now, when the user asks for O_SYNC, we'll actually give
2319 * O_DSYNC
2320 */
2321 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2322 if (!a_ops->writepage || !is_sync_kiocb(iocb))
2323 status = generic_osync_inode(inode, mapping,
2324 OSYNC_METADATA|OSYNC_DATA);
2325 }
2326 }
2327
2328 /*
2329 * If we get here for O_DIRECT writes then we must have fallen through
2330 * to buffered writes (block instantiation inside i_size). So we sync
2331 * the file data here, to try to honour O_DIRECT expectations.
2332 */
2333 if (unlikely(file->f_flags & O_DIRECT) && written)
2334 status = filemap_write_and_wait(mapping);
2335
2336 return written ? written : status;
2337 }
2338 EXPORT_SYMBOL(generic_file_buffered_write);
2339
2340 static ssize_t
2341 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2342 unsigned long nr_segs, loff_t *ppos)
2343 {
2344 struct file *file = iocb->ki_filp;
2345 struct address_space * mapping = file->f_mapping;
2346 size_t ocount; /* original count */
2347 size_t count; /* after file limit checks */
2348 struct inode *inode = mapping->host;
2349 loff_t pos;
2350 ssize_t written;
2351 ssize_t err;
2352
2353 ocount = 0;
2354 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2355 if (err)
2356 return err;
2357
2358 count = ocount;
2359 pos = *ppos;
2360
2361 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2362
2363 /* We can write back this queue in page reclaim */
2364 current->backing_dev_info = mapping->backing_dev_info;
2365 written = 0;
2366
2367 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2368 if (err)
2369 goto out;
2370
2371 if (count == 0)
2372 goto out;
2373
2374 err = remove_suid(file->f_path.dentry);
2375 if (err)
2376 goto out;
2377
2378 file_update_time(file);
2379
2380 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2381 if (unlikely(file->f_flags & O_DIRECT)) {
2382 loff_t endbyte;
2383 ssize_t written_buffered;
2384
2385 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2386 ppos, count, ocount);
2387 if (written < 0 || written == count)
2388 goto out;
2389 /*
2390 * direct-io write to a hole: fall through to buffered I/O
2391 * for completing the rest of the request.
2392 */
2393 pos += written;
2394 count -= written;
2395 written_buffered = generic_file_buffered_write(iocb, iov,
2396 nr_segs, pos, ppos, count,
2397 written);
2398 /*
2399 * If generic_file_buffered_write() retuned a synchronous error
2400 * then we want to return the number of bytes which were
2401 * direct-written, or the error code if that was zero. Note
2402 * that this differs from normal direct-io semantics, which
2403 * will return -EFOO even if some bytes were written.
2404 */
2405 if (written_buffered < 0) {
2406 err = written_buffered;
2407 goto out;
2408 }
2409
2410 /*
2411 * We need to ensure that the page cache pages are written to
2412 * disk and invalidated to preserve the expected O_DIRECT
2413 * semantics.
2414 */
2415 endbyte = pos + written_buffered - written - 1;
2416 err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
2417 SYNC_FILE_RANGE_WAIT_BEFORE|
2418 SYNC_FILE_RANGE_WRITE|
2419 SYNC_FILE_RANGE_WAIT_AFTER);
2420 if (err == 0) {
2421 written = written_buffered;
2422 invalidate_mapping_pages(mapping,
2423 pos >> PAGE_CACHE_SHIFT,
2424 endbyte >> PAGE_CACHE_SHIFT);
2425 } else {
2426 /*
2427 * We don't know how much we wrote, so just return
2428 * the number of bytes which were direct-written
2429 */
2430 }
2431 } else {
2432 written = generic_file_buffered_write(iocb, iov, nr_segs,
2433 pos, ppos, count, written);
2434 }
2435 out:
2436 current->backing_dev_info = NULL;
2437 return written ? written : err;
2438 }
2439
2440 ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
2441 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
2442 {
2443 struct file *file = iocb->ki_filp;
2444 struct address_space *mapping = file->f_mapping;
2445 struct inode *inode = mapping->host;
2446 ssize_t ret;
2447
2448 BUG_ON(iocb->ki_pos != pos);
2449
2450 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2451 &iocb->ki_pos);
2452
2453 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2454 ssize_t err;
2455
2456 err = sync_page_range_nolock(inode, mapping, pos, ret);
2457 if (err < 0)
2458 ret = err;
2459 }
2460 return ret;
2461 }
2462 EXPORT_SYMBOL(generic_file_aio_write_nolock);
2463
2464 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2465 unsigned long nr_segs, loff_t pos)
2466 {
2467 struct file *file = iocb->ki_filp;
2468 struct address_space *mapping = file->f_mapping;
2469 struct inode *inode = mapping->host;
2470 ssize_t ret;
2471
2472 BUG_ON(iocb->ki_pos != pos);
2473
2474 mutex_lock(&inode->i_mutex);
2475 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2476 &iocb->ki_pos);
2477 mutex_unlock(&inode->i_mutex);
2478
2479 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2480 ssize_t err;
2481
2482 err = sync_page_range(inode, mapping, pos, ret);
2483 if (err < 0)
2484 ret = err;
2485 }
2486 return ret;
2487 }
2488 EXPORT_SYMBOL(generic_file_aio_write);
2489
2490 /*
2491 * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
2492 * went wrong during pagecache shootdown.
2493 */
2494 static ssize_t
2495 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2496 loff_t offset, unsigned long nr_segs)
2497 {
2498 struct file *file = iocb->ki_filp;
2499 struct address_space *mapping = file->f_mapping;
2500 ssize_t retval;
2501 size_t write_len;
2502 pgoff_t end = 0; /* silence gcc */
2503
2504 /*
2505 * If it's a write, unmap all mmappings of the file up-front. This
2506 * will cause any pte dirty bits to be propagated into the pageframes
2507 * for the subsequent filemap_write_and_wait().
2508 */
2509 if (rw == WRITE) {
2510 write_len = iov_length(iov, nr_segs);
2511 end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
2512 if (mapping_mapped(mapping))
2513 unmap_mapping_range(mapping, offset, write_len, 0);
2514 }
2515
2516 retval = filemap_write_and_wait(mapping);
2517 if (retval)
2518 goto out;
2519
2520 /*
2521 * After a write we want buffered reads to be sure to go to disk to get
2522 * the new data. We invalidate clean cached page from the region we're
2523 * about to write. We do this *before* the write so that we can return
2524 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
2525 */
2526 if (rw == WRITE && mapping->nrpages) {
2527 retval = invalidate_inode_pages2_range(mapping,
2528 offset >> PAGE_CACHE_SHIFT, end);
2529 if (retval)
2530 goto out;
2531 }
2532
2533 retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
2534
2535 /*
2536 * Finally, try again to invalidate clean pages which might have been
2537 * cached by non-direct readahead, or faulted in by get_user_pages()
2538 * if the source of the write was an mmap'ed region of the file
2539 * we're writing. Either one is a pretty crazy thing to do,
2540 * so we don't support it 100%. If this invalidation
2541 * fails, tough, the write still worked...
2542 */
2543 if (rw == WRITE && mapping->nrpages) {
2544 invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
2545 }
2546 out:
2547 return retval;
2548 }
2549
2550 /**
2551 * try_to_release_page() - release old fs-specific metadata on a page
2552 *
2553 * @page: the page which the kernel is trying to free
2554 * @gfp_mask: memory allocation flags (and I/O mode)
2555 *
2556 * The address_space is to try to release any data against the page
2557 * (presumably at page->private). If the release was successful, return `1'.
2558 * Otherwise return zero.
2559 *
2560 * The @gfp_mask argument specifies whether I/O may be performed to release
2561 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
2562 *
2563 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
2564 */
2565 int try_to_release_page(struct page *page, gfp_t gfp_mask)
2566 {
2567 struct address_space * const mapping = page->mapping;
2568
2569 BUG_ON(!PageLocked(page));
2570 if (PageWriteback(page))
2571 return 0;
2572
2573 if (mapping && mapping->a_ops->releasepage)
2574 return mapping->a_ops->releasepage(page, gfp_mask);
2575 return try_to_free_buffers(page);
2576 }
2577
2578 EXPORT_SYMBOL(try_to_release_page);
This page took 0.078649 seconds and 6 git commands to generate.