projects
/
deliverable
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mem-controller gfp-mask fix
[deliverable/linux.git]
/
mm
/
filemap.c
diff --git
a/mm/filemap.c
b/mm/filemap.c
index f4d0cded0e10aa21b02707fcaf99c4cbcafa4f06..5357fcc4643b5a653c82d849690ababaeaec9a0e 100644
(file)
--- a/
mm/filemap.c
+++ b/
mm/filemap.c
@@
-33,6
+33,7
@@
#include <linux/syscalls.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/syscalls.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
+#include <linux/memcontrol.h>
#include "internal.h"
/*
#include "internal.h"
/*
@@
-65,7
+66,6
@@
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
- * ->zone.lock
*
* ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
*
* ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
@@
-119,6
+119,7
@@
void __remove_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
{
struct address_space *mapping = page->mapping;
+ mem_cgroup_uncharge_page(page);
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
mapping->nrpages--;
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
mapping->nrpages--;
@@
-185,6
+186,12
@@
static int sync_page(void *word)
return 0;
}
return 0;
}
+static int sync_page_killable(void *word)
+{
+ sync_page(word);
+ return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
@@
-453,8
+460,12
@@
int filemap_write_and_wait_range(struct address_space *mapping,
int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
- int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ int error = mem_cgroup_cache_charge(page, current->mm,
+ gfp_mask & ~__GFP_HIGHMEM);
+ if (error)
+ goto out;
+ error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
write_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page);
if (error == 0) {
write_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page);
@@
-465,10
+476,14
@@
int add_to_page_cache(struct page *page, struct address_space *mapping,
page->index = offset;
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
page->index = offset;
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
- }
+ } else
+ mem_cgroup_uncharge_page(page);
+
write_unlock_irq(&mapping->tree_lock);
radix_tree_preload_end();
write_unlock_irq(&mapping->tree_lock);
radix_tree_preload_end();
- }
+ } else
+ mem_cgroup_uncharge_page(page);
+out:
return error;
}
EXPORT_SYMBOL(add_to_page_cache);
return error;
}
EXPORT_SYMBOL(add_to_page_cache);
@@
-522,7
+537,7
@@
static inline void wake_up_page(struct page *page, int bit)
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
-void
fastcall
wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
@@
-546,7
+561,7
@@
EXPORT_SYMBOL(wait_on_page_bit);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()).
*/
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()).
*/
-void
fastcall
unlock_page(struct page *page)
+void unlock_page(struct page *page)
{
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
{
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
@@
-580,7
+595,7
@@
EXPORT_SYMBOL(end_page_writeback);
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
-void
fastcall
__lock_page(struct page *page)
+void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
@@
-589,11
+604,19
@@
void fastcall __lock_page(struct page *page)
}
EXPORT_SYMBOL(__lock_page);
}
EXPORT_SYMBOL(__lock_page);
+int fastcall __lock_page_killable(struct page *page)
+{
+ DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+ return __wait_on_bit_lock(page_waitqueue(page), &wait,
+ sync_page_killable, TASK_KILLABLE);
+}
+
/*
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
/*
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
-void
fastcall
__lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@
-980,7
+1003,8
@@
page_ok:
page_not_up_to_date:
/* Get exclusive access to the page ... */
page_not_up_to_date:
/* Get exclusive access to the page ... */
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
@@
-1008,7
+1032,8
@@
readpage:
}
if (!PageUptodate(page)) {
}
if (!PageUptodate(page)) {
- lock_page(page);
+ if (lock_page_killable(page))
+ goto readpage_eio;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
@@
-1019,15
+1044,16
@@
readpage:
goto find_page;
}
unlock_page(page);
goto find_page;
}
unlock_page(page);
- error = -EIO;
shrink_readahead_size_eio(filp, ra);
shrink_readahead_size_eio(filp, ra);
- goto readpage_e
rror
;
+ goto readpage_e
io
;
}
unlock_page(page);
}
goto page_ok;
}
unlock_page(page);
}
goto page_ok;
+readpage_eio:
+ error = -EIO;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
@@
-1260,7
+1286,7
@@
asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int
fastcall page_cache_read(struct file *
file, pgoff_t offset)
+static int
page_cache_read(struct file *
file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
{
struct address_space *mapping = file->f_mapping;
struct page *page;
@@
-1733,7
+1759,11
@@
static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
const struct iovec *iov = i->iov;
size_t base = i->iov_offset;
const struct iovec *iov = i->iov;
size_t base = i->iov_offset;
- while (bytes) {
+ /*
+ * The !iov->iov_len check ensures we skip over unlikely
+ * zero-length segments.
+ */
+ while (bytes || !iov->iov_len) {
int copy = min(bytes, iov->iov_len - base);
bytes -= copy;
int copy = min(bytes, iov->iov_len - base);
bytes -= copy;
@@
-2251,6
+2281,7
@@
again:
cond_resched();
cond_resched();
+ iov_iter_advance(i, copied);
if (unlikely(copied == 0)) {
/*
* If we were unable to copy any data at all, we must
if (unlikely(copied == 0)) {
/*
* If we were unable to copy any data at all, we must
@@
-2264,7
+2295,6
@@
again:
iov_iter_single_seg_count(i));
goto again;
}
iov_iter_single_seg_count(i));
goto again;
}
- iov_iter_advance(i, copied);
pos += copied;
written += copied;
pos += copied;
written += copied;
This page took
0.026759 seconds
and
5
git commands to generate.