2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 * This file is released under the GPL.
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/xattr.h>
30 #include <linux/exportfs.h>
31 #include <linux/generic_acl.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/pagemap.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/backing-dev.h>
40 #include <linux/shmem_fs.h>
41 #include <linux/mount.h>
42 #include <linux/writeback.h>
43 #include <linux/vfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/security.h>
46 #include <linux/swapops.h>
47 #include <linux/mempolicy.h>
48 #include <linux/namei.h>
49 #include <linux/ctype.h>
50 #include <linux/migrate.h>
51 #include <linux/highmem.h>
53 #include <asm/uaccess.h>
54 #include <asm/div64.h>
55 #include <asm/pgtable.h>
57 /* This magic number is used in glibc for posix shared memory */
58 #define TMPFS_MAGIC 0x01021994
60 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
64 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
67 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
69 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70 #define SHMEM_PAGEIN VM_READ
71 #define SHMEM_TRUNCATE VM_WRITE
73 /* Definition to limit shmem_truncate's steps between cond_rescheds */
74 #define LATENCY_LIMIT 64
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
79 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
81 SGP_READ
, /* don't exceed i_size, don't allocate page */
82 SGP_CACHE
, /* don't exceed i_size, may allocate page */
83 SGP_WRITE
, /* may exceed i_size, may allocate page */
86 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
87 struct page
**pagep
, enum sgp_type sgp
, int *type
);
89 static inline struct page
*shmem_dir_alloc(gfp_t gfp_mask
)
92 * The above definition of ENTRIES_PER_PAGE, and the use of
93 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
94 * might be reconsidered if it ever diverges from PAGE_SIZE.
96 * Mobility flags are masked out as swap vectors cannot move
98 return alloc_pages((gfp_mask
& ~GFP_MOVABLE_MASK
) | __GFP_ZERO
,
99 PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
102 static inline void shmem_dir_free(struct page
*page
)
104 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
107 static struct page
**shmem_dir_map(struct page
*page
)
109 return (struct page
**)kmap_atomic(page
, KM_USER0
);
112 static inline void shmem_dir_unmap(struct page
**dir
)
114 kunmap_atomic(dir
, KM_USER0
);
117 static swp_entry_t
*shmem_swp_map(struct page
*page
)
119 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
122 static inline void shmem_swp_balance_unmap(void)
125 * When passing a pointer to an i_direct entry, to code which
126 * also handles indirect entries and so will shmem_swp_unmap,
127 * we must arrange for the preempt count to remain in balance.
128 * What kmap_atomic of a lowmem page does depends on config
129 * and architecture, so pretend to kmap_atomic some lowmem page.
131 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
134 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
136 kunmap_atomic(entry
, KM_USER1
);
139 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
141 return sb
->s_fs_info
;
145 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
146 * for shared memory and for shared anonymous (/dev/zero) mappings
147 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
148 * consistent with the pre-accounting of private mappings ...
150 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
152 return (flags
& VM_ACCOUNT
)?
153 security_vm_enough_memory(VM_ACCT(size
)): 0;
156 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
158 if (flags
& VM_ACCOUNT
)
159 vm_unacct_memory(VM_ACCT(size
));
163 * ... whereas tmpfs objects are accounted incrementally as
164 * pages are allocated, in order to allow huge sparse files.
165 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
166 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
168 static inline int shmem_acct_block(unsigned long flags
)
170 return (flags
& VM_ACCOUNT
)?
171 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE
));
174 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
176 if (!(flags
& VM_ACCOUNT
))
177 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
180 static const struct super_operations shmem_ops
;
181 static const struct address_space_operations shmem_aops
;
182 static const struct file_operations shmem_file_operations
;
183 static const struct inode_operations shmem_inode_operations
;
184 static const struct inode_operations shmem_dir_inode_operations
;
185 static const struct inode_operations shmem_special_inode_operations
;
186 static struct vm_operations_struct shmem_vm_ops
;
188 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
189 .ra_pages
= 0, /* No readahead */
190 .capabilities
= BDI_CAP_NO_ACCT_DIRTY
| BDI_CAP_NO_WRITEBACK
,
191 .unplug_io_fn
= default_unplug_io_fn
,
194 static LIST_HEAD(shmem_swaplist
);
195 static DEFINE_SPINLOCK(shmem_swaplist_lock
);
197 static void shmem_free_blocks(struct inode
*inode
, long pages
)
199 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
200 if (sbinfo
->max_blocks
) {
201 spin_lock(&sbinfo
->stat_lock
);
202 sbinfo
->free_blocks
+= pages
;
203 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
204 spin_unlock(&sbinfo
->stat_lock
);
208 static int shmem_reserve_inode(struct super_block
*sb
)
210 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
211 if (sbinfo
->max_inodes
) {
212 spin_lock(&sbinfo
->stat_lock
);
213 if (!sbinfo
->free_inodes
) {
214 spin_unlock(&sbinfo
->stat_lock
);
217 sbinfo
->free_inodes
--;
218 spin_unlock(&sbinfo
->stat_lock
);
223 static void shmem_free_inode(struct super_block
*sb
)
225 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
226 if (sbinfo
->max_inodes
) {
227 spin_lock(&sbinfo
->stat_lock
);
228 sbinfo
->free_inodes
++;
229 spin_unlock(&sbinfo
->stat_lock
);
234 * shmem_recalc_inode - recalculate the size of an inode
236 * @inode: inode to recalc
238 * We have to calculate the free blocks since the mm can drop
239 * undirtied hole pages behind our back.
241 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
242 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
244 * It has to be called with the spinlock held.
246 static void shmem_recalc_inode(struct inode
*inode
)
248 struct shmem_inode_info
*info
= SHMEM_I(inode
);
251 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
253 info
->alloced
-= freed
;
254 shmem_unacct_blocks(info
->flags
, freed
);
255 shmem_free_blocks(inode
, freed
);
260 * shmem_swp_entry - find the swap vector position in the info structure
262 * @info: info structure for the inode
263 * @index: index of the page to find
264 * @page: optional page to add to the structure. Has to be preset to
267 * If there is no space allocated yet it will return NULL when
268 * page is NULL, else it will use the page for the needed block,
269 * setting it to NULL on return to indicate that it has been used.
271 * The swap vector is organized the following way:
273 * There are SHMEM_NR_DIRECT entries directly stored in the
274 * shmem_inode_info structure. So small files do not need an addional
277 * For pages with index > SHMEM_NR_DIRECT there is the pointer
278 * i_indirect which points to a page which holds in the first half
279 * doubly indirect blocks, in the second half triple indirect blocks:
281 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
282 * following layout (for SHMEM_NR_DIRECT == 16):
284 * i_indirect -> dir --> 16-19
297 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
299 unsigned long offset
;
303 if (index
< SHMEM_NR_DIRECT
) {
304 shmem_swp_balance_unmap();
305 return info
->i_direct
+index
;
307 if (!info
->i_indirect
) {
309 info
->i_indirect
= *page
;
312 return NULL
; /* need another page */
315 index
-= SHMEM_NR_DIRECT
;
316 offset
= index
% ENTRIES_PER_PAGE
;
317 index
/= ENTRIES_PER_PAGE
;
318 dir
= shmem_dir_map(info
->i_indirect
);
320 if (index
>= ENTRIES_PER_PAGE
/2) {
321 index
-= ENTRIES_PER_PAGE
/2;
322 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
323 index
%= ENTRIES_PER_PAGE
;
330 shmem_dir_unmap(dir
);
331 return NULL
; /* need another page */
333 shmem_dir_unmap(dir
);
334 dir
= shmem_dir_map(subdir
);
340 if (!page
|| !(subdir
= *page
)) {
341 shmem_dir_unmap(dir
);
342 return NULL
; /* need a page */
347 shmem_dir_unmap(dir
);
348 return shmem_swp_map(subdir
) + offset
;
351 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
353 long incdec
= value
? 1: -1;
356 info
->swapped
+= incdec
;
357 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
) {
358 struct page
*page
= kmap_atomic_to_page(entry
);
359 set_page_private(page
, page_private(page
) + incdec
);
364 * shmem_swp_alloc - get the position of the swap entry for the page.
365 * If it does not exist allocate the entry.
367 * @info: info structure for the inode
368 * @index: index of the page to find
369 * @sgp: check and recheck i_size? skip allocation?
371 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
, unsigned long index
, enum sgp_type sgp
)
373 struct inode
*inode
= &info
->vfs_inode
;
374 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
375 struct page
*page
= NULL
;
378 if (sgp
!= SGP_WRITE
&&
379 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
380 return ERR_PTR(-EINVAL
);
382 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
384 return shmem_swp_map(ZERO_PAGE(0));
386 * Test free_blocks against 1 not 0, since we have 1 data
387 * page (and perhaps indirect index pages) yet to allocate:
388 * a waste to allocate index if we cannot allocate data.
390 if (sbinfo
->max_blocks
) {
391 spin_lock(&sbinfo
->stat_lock
);
392 if (sbinfo
->free_blocks
<= 1) {
393 spin_unlock(&sbinfo
->stat_lock
);
394 return ERR_PTR(-ENOSPC
);
396 sbinfo
->free_blocks
--;
397 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
398 spin_unlock(&sbinfo
->stat_lock
);
401 spin_unlock(&info
->lock
);
402 page
= shmem_dir_alloc(mapping_gfp_mask(inode
->i_mapping
));
404 set_page_private(page
, 0);
405 spin_lock(&info
->lock
);
408 shmem_free_blocks(inode
, 1);
409 return ERR_PTR(-ENOMEM
);
411 if (sgp
!= SGP_WRITE
&&
412 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
413 entry
= ERR_PTR(-EINVAL
);
416 if (info
->next_index
<= index
)
417 info
->next_index
= index
+ 1;
420 /* another task gave its page, or truncated the file */
421 shmem_free_blocks(inode
, 1);
422 shmem_dir_free(page
);
424 if (info
->next_index
<= index
&& !IS_ERR(entry
))
425 info
->next_index
= index
+ 1;
430 * shmem_free_swp - free some swap entries in a directory
432 * @dir: pointer to the directory
433 * @edir: pointer after last entry of the directory
434 * @punch_lock: pointer to spinlock when needed for the holepunch case
436 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
,
437 spinlock_t
*punch_lock
)
439 spinlock_t
*punch_unlock
= NULL
;
443 for (ptr
= dir
; ptr
< edir
; ptr
++) {
445 if (unlikely(punch_lock
)) {
446 punch_unlock
= punch_lock
;
448 spin_lock(punch_unlock
);
452 free_swap_and_cache(*ptr
);
453 *ptr
= (swp_entry_t
){0};
458 spin_unlock(punch_unlock
);
462 static int shmem_map_and_free_swp(struct page
*subdir
, int offset
,
463 int limit
, struct page
***dir
, spinlock_t
*punch_lock
)
468 ptr
= shmem_swp_map(subdir
);
469 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
470 int size
= limit
- offset
;
471 if (size
> LATENCY_LIMIT
)
472 size
= LATENCY_LIMIT
;
473 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
,
475 if (need_resched()) {
476 shmem_swp_unmap(ptr
);
478 shmem_dir_unmap(*dir
);
482 ptr
= shmem_swp_map(subdir
);
485 shmem_swp_unmap(ptr
);
489 static void shmem_free_pages(struct list_head
*next
)
495 page
= container_of(next
, struct page
, lru
);
497 shmem_dir_free(page
);
499 if (freed
>= LATENCY_LIMIT
) {
506 static void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
508 struct shmem_inode_info
*info
= SHMEM_I(inode
);
513 unsigned long diroff
;
519 LIST_HEAD(pages_to_free
);
520 long nr_pages_to_free
= 0;
521 long nr_swaps_freed
= 0;
525 spinlock_t
*needs_lock
;
526 spinlock_t
*punch_lock
;
527 unsigned long upper_limit
;
529 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
530 idx
= (start
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
531 if (idx
>= info
->next_index
)
534 spin_lock(&info
->lock
);
535 info
->flags
|= SHMEM_TRUNCATE
;
536 if (likely(end
== (loff_t
) -1)) {
537 limit
= info
->next_index
;
538 upper_limit
= SHMEM_MAX_INDEX
;
539 info
->next_index
= idx
;
543 if (end
+ 1 >= inode
->i_size
) { /* we may free a little more */
544 limit
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >>
546 upper_limit
= SHMEM_MAX_INDEX
;
548 limit
= (end
+ 1) >> PAGE_CACHE_SHIFT
;
551 needs_lock
= &info
->lock
;
555 topdir
= info
->i_indirect
;
556 if (topdir
&& idx
<= SHMEM_NR_DIRECT
&& !punch_hole
) {
557 info
->i_indirect
= NULL
;
559 list_add(&topdir
->lru
, &pages_to_free
);
561 spin_unlock(&info
->lock
);
563 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
564 ptr
= info
->i_direct
;
566 if (size
> SHMEM_NR_DIRECT
)
567 size
= SHMEM_NR_DIRECT
;
568 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
, needs_lock
);
572 * If there are no indirect blocks or we are punching a hole
573 * below indirect blocks, nothing to be done.
575 if (!topdir
|| limit
<= SHMEM_NR_DIRECT
)
579 * The truncation case has already dropped info->lock, and we're safe
580 * because i_size and next_index have already been lowered, preventing
581 * access beyond. But in the punch_hole case, we still need to take
582 * the lock when updating the swap directory, because there might be
583 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
584 * shmem_writepage. However, whenever we find we can remove a whole
585 * directory page (not at the misaligned start or end of the range),
586 * we first NULLify its pointer in the level above, and then have no
587 * need to take the lock when updating its contents: needs_lock and
588 * punch_lock (either pointing to info->lock or NULL) manage this.
591 upper_limit
-= SHMEM_NR_DIRECT
;
592 limit
-= SHMEM_NR_DIRECT
;
593 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
594 offset
= idx
% ENTRIES_PER_PAGE
;
597 dir
= shmem_dir_map(topdir
);
598 stage
= ENTRIES_PER_PAGEPAGE
/2;
599 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
601 diroff
= idx
/ENTRIES_PER_PAGE
;
603 dir
+= ENTRIES_PER_PAGE
/2;
604 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
606 stage
+= ENTRIES_PER_PAGEPAGE
;
609 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
610 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
611 if (!diroff
&& !offset
&& upper_limit
>= stage
) {
613 spin_lock(needs_lock
);
615 spin_unlock(needs_lock
);
620 list_add(&middir
->lru
, &pages_to_free
);
622 shmem_dir_unmap(dir
);
623 dir
= shmem_dir_map(middir
);
631 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
632 if (unlikely(idx
== stage
)) {
633 shmem_dir_unmap(dir
);
634 dir
= shmem_dir_map(topdir
) +
635 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
638 idx
+= ENTRIES_PER_PAGEPAGE
;
642 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
645 needs_lock
= &info
->lock
;
646 if (upper_limit
>= stage
) {
648 spin_lock(needs_lock
);
650 spin_unlock(needs_lock
);
655 list_add(&middir
->lru
, &pages_to_free
);
657 shmem_dir_unmap(dir
);
659 dir
= shmem_dir_map(middir
);
662 punch_lock
= needs_lock
;
663 subdir
= dir
[diroff
];
664 if (subdir
&& !offset
&& upper_limit
-idx
>= ENTRIES_PER_PAGE
) {
666 spin_lock(needs_lock
);
668 spin_unlock(needs_lock
);
673 list_add(&subdir
->lru
, &pages_to_free
);
675 if (subdir
&& page_private(subdir
) /* has swap entries */) {
677 if (size
> ENTRIES_PER_PAGE
)
678 size
= ENTRIES_PER_PAGE
;
679 freed
= shmem_map_and_free_swp(subdir
,
680 offset
, size
, &dir
, punch_lock
);
682 dir
= shmem_dir_map(middir
);
683 nr_swaps_freed
+= freed
;
684 if (offset
|| punch_lock
) {
685 spin_lock(&info
->lock
);
686 set_page_private(subdir
,
687 page_private(subdir
) - freed
);
688 spin_unlock(&info
->lock
);
690 BUG_ON(page_private(subdir
) != freed
);
695 shmem_dir_unmap(dir
);
697 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
699 * Call truncate_inode_pages again: racing shmem_unuse_inode
700 * may have swizzled a page in from swap since vmtruncate or
701 * generic_delete_inode did it, before we lowered next_index.
702 * Also, though shmem_getpage checks i_size before adding to
703 * cache, no recheck after: so fix the narrow window there too.
705 * Recalling truncate_inode_pages_range and unmap_mapping_range
706 * every time for punch_hole (which never got a chance to clear
707 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
708 * yet hardly ever necessary: try to optimize them out later.
710 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
712 unmap_mapping_range(inode
->i_mapping
, start
,
716 spin_lock(&info
->lock
);
717 info
->flags
&= ~SHMEM_TRUNCATE
;
718 info
->swapped
-= nr_swaps_freed
;
719 if (nr_pages_to_free
)
720 shmem_free_blocks(inode
, nr_pages_to_free
);
721 shmem_recalc_inode(inode
);
722 spin_unlock(&info
->lock
);
725 * Empty swap vector directory pages to be freed?
727 if (!list_empty(&pages_to_free
)) {
728 pages_to_free
.prev
->next
= NULL
;
729 shmem_free_pages(pages_to_free
.next
);
733 static void shmem_truncate(struct inode
*inode
)
735 shmem_truncate_range(inode
, inode
->i_size
, (loff_t
)-1);
738 static int shmem_notify_change(struct dentry
*dentry
, struct iattr
*attr
)
740 struct inode
*inode
= dentry
->d_inode
;
741 struct page
*page
= NULL
;
744 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
745 if (attr
->ia_size
< inode
->i_size
) {
747 * If truncating down to a partial page, then
748 * if that page is already allocated, hold it
749 * in memory until the truncation is over, so
750 * truncate_partial_page cannnot miss it were
751 * it assigned to swap.
753 if (attr
->ia_size
& (PAGE_CACHE_SIZE
-1)) {
754 (void) shmem_getpage(inode
,
755 attr
->ia_size
>>PAGE_CACHE_SHIFT
,
756 &page
, SGP_READ
, NULL
);
761 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
762 * detect if any pages might have been added to cache
763 * after truncate_inode_pages. But we needn't bother
764 * if it's being fully truncated to zero-length: the
765 * nrpages check is efficient enough in that case.
768 struct shmem_inode_info
*info
= SHMEM_I(inode
);
769 spin_lock(&info
->lock
);
770 info
->flags
&= ~SHMEM_PAGEIN
;
771 spin_unlock(&info
->lock
);
776 error
= inode_change_ok(inode
, attr
);
778 error
= inode_setattr(inode
, attr
);
779 #ifdef CONFIG_TMPFS_POSIX_ACL
780 if (!error
&& (attr
->ia_valid
& ATTR_MODE
))
781 error
= generic_acl_chmod(inode
, &shmem_acl_ops
);
784 page_cache_release(page
);
788 static void shmem_delete_inode(struct inode
*inode
)
790 struct shmem_inode_info
*info
= SHMEM_I(inode
);
792 if (inode
->i_op
->truncate
== shmem_truncate
) {
793 truncate_inode_pages(inode
->i_mapping
, 0);
794 shmem_unacct_size(info
->flags
, inode
->i_size
);
796 shmem_truncate(inode
);
797 if (!list_empty(&info
->swaplist
)) {
798 spin_lock(&shmem_swaplist_lock
);
799 list_del_init(&info
->swaplist
);
800 spin_unlock(&shmem_swaplist_lock
);
803 BUG_ON(inode
->i_blocks
);
804 shmem_free_inode(inode
->i_sb
);
808 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
812 for (ptr
= dir
; ptr
< edir
; ptr
++) {
813 if (ptr
->val
== entry
.val
)
819 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
833 ptr
= info
->i_direct
;
834 spin_lock(&info
->lock
);
835 limit
= info
->next_index
;
837 if (size
> SHMEM_NR_DIRECT
)
838 size
= SHMEM_NR_DIRECT
;
839 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
841 shmem_swp_balance_unmap();
844 if (!info
->i_indirect
)
847 dir
= shmem_dir_map(info
->i_indirect
);
848 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
850 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
851 if (unlikely(idx
== stage
)) {
852 shmem_dir_unmap(dir
-1);
853 dir
= shmem_dir_map(info
->i_indirect
) +
854 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
857 idx
+= ENTRIES_PER_PAGEPAGE
;
861 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
863 shmem_dir_unmap(dir
);
864 dir
= shmem_dir_map(subdir
);
867 if (subdir
&& page_private(subdir
)) {
868 ptr
= shmem_swp_map(subdir
);
870 if (size
> ENTRIES_PER_PAGE
)
871 size
= ENTRIES_PER_PAGE
;
872 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
874 shmem_dir_unmap(dir
);
877 shmem_swp_unmap(ptr
);
881 shmem_dir_unmap(dir
-1);
883 spin_unlock(&info
->lock
);
887 inode
= &info
->vfs_inode
;
888 error
= add_to_page_cache(page
, inode
->i_mapping
, idx
, GFP_ATOMIC
);
889 if (error
== -EEXIST
) {
890 struct page
*filepage
= find_get_page(inode
->i_mapping
, idx
);
893 * There might be a more uptodate page coming down
894 * from a stacked writepage: forget our swappage if so.
896 if (PageUptodate(filepage
))
898 page_cache_release(filepage
);
902 delete_from_swap_cache(page
);
903 set_page_dirty(page
);
904 info
->flags
|= SHMEM_PAGEIN
;
905 shmem_swp_set(info
, ptr
+ offset
, 0);
907 shmem_swp_unmap(ptr
);
908 spin_unlock(&info
->lock
);
910 * Decrement swap count even when the entry is left behind:
911 * try_to_unuse will skip over mms, then reincrement count.
918 * shmem_unuse() search for an eventually swapped out shmem page.
920 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
922 struct list_head
*p
, *next
;
923 struct shmem_inode_info
*info
;
926 spin_lock(&shmem_swaplist_lock
);
927 list_for_each_safe(p
, next
, &shmem_swaplist
) {
928 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
930 list_del_init(&info
->swaplist
);
931 else if (shmem_unuse_inode(info
, entry
, page
)) {
932 /* move head to start search for next from here */
933 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
938 spin_unlock(&shmem_swaplist_lock
);
943 * Move the page from the page cache to the swap cache.
945 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
947 struct shmem_inode_info
*info
;
948 swp_entry_t
*entry
, swap
;
949 struct address_space
*mapping
;
953 BUG_ON(!PageLocked(page
));
954 mapping
= page
->mapping
;
956 inode
= mapping
->host
;
957 info
= SHMEM_I(inode
);
958 if (info
->flags
& VM_LOCKED
)
960 if (!total_swap_pages
)
964 * shmem_backing_dev_info's capabilities prevent regular writeback or
965 * sync from ever calling shmem_writepage; but a stacking filesystem
966 * may use the ->writepage of its underlying filesystem, in which case
967 * tmpfs should write out to swap only in response to memory pressure,
968 * and not for pdflush or sync. However, in those cases, we do still
969 * want to check if there's a redundant swappage to be discarded.
971 if (wbc
->for_reclaim
)
972 swap
= get_swap_page();
976 spin_lock(&info
->lock
);
977 if (index
>= info
->next_index
) {
978 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
981 entry
= shmem_swp_entry(info
, index
, NULL
);
984 * The more uptodate page coming down from a stacked
985 * writepage should replace our old swappage.
987 free_swap_and_cache(*entry
);
988 shmem_swp_set(info
, entry
, 0);
990 shmem_recalc_inode(inode
);
992 if (swap
.val
&& add_to_swap_cache(page
, swap
, GFP_ATOMIC
) == 0) {
993 remove_from_page_cache(page
);
994 shmem_swp_set(info
, entry
, swap
.val
);
995 shmem_swp_unmap(entry
);
996 spin_unlock(&info
->lock
);
997 if (list_empty(&info
->swaplist
)) {
998 spin_lock(&shmem_swaplist_lock
);
999 /* move instead of add in case we're racing */
1000 list_move_tail(&info
->swaplist
, &shmem_swaplist
);
1001 spin_unlock(&shmem_swaplist_lock
);
1003 swap_duplicate(swap
);
1004 BUG_ON(page_mapped(page
));
1005 page_cache_release(page
); /* pagecache ref */
1006 set_page_dirty(page
);
1011 shmem_swp_unmap(entry
);
1013 spin_unlock(&info
->lock
);
1016 set_page_dirty(page
);
1017 if (wbc
->for_reclaim
)
1018 return AOP_WRITEPAGE_ACTIVATE
; /* Return with page locked */
1024 static inline int shmem_parse_mpol(char *value
, int *policy
, nodemask_t
*policy_nodes
)
1026 char *nodelist
= strchr(value
, ':');
1030 /* NUL-terminate policy string */
1032 if (nodelist_parse(nodelist
, *policy_nodes
))
1034 if (!nodes_subset(*policy_nodes
, node_states
[N_HIGH_MEMORY
]))
1037 if (!strcmp(value
, "default")) {
1038 *policy
= MPOL_DEFAULT
;
1039 /* Don't allow a nodelist */
1042 } else if (!strcmp(value
, "prefer")) {
1043 *policy
= MPOL_PREFERRED
;
1044 /* Insist on a nodelist of one node only */
1046 char *rest
= nodelist
;
1047 while (isdigit(*rest
))
1052 } else if (!strcmp(value
, "bind")) {
1053 *policy
= MPOL_BIND
;
1054 /* Insist on a nodelist */
1057 } else if (!strcmp(value
, "interleave")) {
1058 *policy
= MPOL_INTERLEAVE
;
1060 * Default to online nodes with memory if no nodelist
1063 *policy_nodes
= node_states
[N_HIGH_MEMORY
];
1067 /* Restore string for error message */
1073 static struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1074 struct shmem_inode_info
*info
, unsigned long idx
)
1076 struct vm_area_struct pvma
;
1079 /* Create a pseudo vma that just contains the policy */
1081 pvma
.vm_pgoff
= idx
;
1083 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1084 page
= swapin_readahead(entry
, gfp
, &pvma
, 0);
1085 mpol_free(pvma
.vm_policy
);
1089 static struct page
*shmem_alloc_page(gfp_t gfp
,
1090 struct shmem_inode_info
*info
, unsigned long idx
)
1092 struct vm_area_struct pvma
;
1095 /* Create a pseudo vma that just contains the policy */
1097 pvma
.vm_pgoff
= idx
;
1099 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1100 page
= alloc_page_vma(gfp
, &pvma
, 0);
1101 mpol_free(pvma
.vm_policy
);
1105 static inline int shmem_parse_mpol(char *value
, int *policy
,
1106 nodemask_t
*policy_nodes
)
1111 static inline struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1112 struct shmem_inode_info
*info
, unsigned long idx
)
1114 return swapin_readahead(entry
, gfp
, NULL
, 0);
1117 static inline struct page
*shmem_alloc_page(gfp_t gfp
,
1118 struct shmem_inode_info
*info
, unsigned long idx
)
1120 return alloc_page(gfp
);
1125 * shmem_getpage - either get the page from swap or allocate a new one
1127 * If we allocate a new one we do not mark it dirty. That's up to the
1128 * vm. If we swap it in we mark it dirty since we also free the swap
1129 * entry since a page cannot live in both the swap and page cache
1131 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
1132 struct page
**pagep
, enum sgp_type sgp
, int *type
)
1134 struct address_space
*mapping
= inode
->i_mapping
;
1135 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1136 struct shmem_sb_info
*sbinfo
;
1137 struct page
*filepage
= *pagep
;
1138 struct page
*swappage
;
1144 if (idx
>= SHMEM_MAX_INDEX
)
1151 * Normally, filepage is NULL on entry, and either found
1152 * uptodate immediately, or allocated and zeroed, or read
1153 * in under swappage, which is then assigned to filepage.
1154 * But shmem_readpage (required for splice) passes in a locked
1155 * filepage, which may be found not uptodate by other callers
1156 * too, and may need to be copied from the swappage read in.
1160 filepage
= find_lock_page(mapping
, idx
);
1161 if (filepage
&& PageUptodate(filepage
))
1164 gfp
= mapping_gfp_mask(mapping
);
1166 spin_lock(&info
->lock
);
1167 shmem_recalc_inode(inode
);
1168 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1169 if (IS_ERR(entry
)) {
1170 spin_unlock(&info
->lock
);
1171 error
= PTR_ERR(entry
);
1177 /* Look it up and read it in.. */
1178 swappage
= lookup_swap_cache(swap
);
1180 shmem_swp_unmap(entry
);
1181 /* here we actually do the io */
1182 if (type
&& !(*type
& VM_FAULT_MAJOR
)) {
1183 __count_vm_event(PGMAJFAULT
);
1184 *type
|= VM_FAULT_MAJOR
;
1186 spin_unlock(&info
->lock
);
1187 swappage
= shmem_swapin(swap
, gfp
, info
, idx
);
1189 spin_lock(&info
->lock
);
1190 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1192 error
= PTR_ERR(entry
);
1194 if (entry
->val
== swap
.val
)
1196 shmem_swp_unmap(entry
);
1198 spin_unlock(&info
->lock
);
1203 wait_on_page_locked(swappage
);
1204 page_cache_release(swappage
);
1208 /* We have to do this with page locked to prevent races */
1209 if (TestSetPageLocked(swappage
)) {
1210 shmem_swp_unmap(entry
);
1211 spin_unlock(&info
->lock
);
1212 wait_on_page_locked(swappage
);
1213 page_cache_release(swappage
);
1216 if (PageWriteback(swappage
)) {
1217 shmem_swp_unmap(entry
);
1218 spin_unlock(&info
->lock
);
1219 wait_on_page_writeback(swappage
);
1220 unlock_page(swappage
);
1221 page_cache_release(swappage
);
1224 if (!PageUptodate(swappage
)) {
1225 shmem_swp_unmap(entry
);
1226 spin_unlock(&info
->lock
);
1227 unlock_page(swappage
);
1228 page_cache_release(swappage
);
1234 shmem_swp_set(info
, entry
, 0);
1235 shmem_swp_unmap(entry
);
1236 delete_from_swap_cache(swappage
);
1237 spin_unlock(&info
->lock
);
1238 copy_highpage(filepage
, swappage
);
1239 unlock_page(swappage
);
1240 page_cache_release(swappage
);
1241 flush_dcache_page(filepage
);
1242 SetPageUptodate(filepage
);
1243 set_page_dirty(filepage
);
1245 } else if (!(error
= add_to_page_cache(
1246 swappage
, mapping
, idx
, GFP_ATOMIC
))) {
1247 info
->flags
|= SHMEM_PAGEIN
;
1248 shmem_swp_set(info
, entry
, 0);
1249 shmem_swp_unmap(entry
);
1250 delete_from_swap_cache(swappage
);
1251 spin_unlock(&info
->lock
);
1252 filepage
= swappage
;
1253 set_page_dirty(filepage
);
1256 shmem_swp_unmap(entry
);
1257 spin_unlock(&info
->lock
);
1258 unlock_page(swappage
);
1259 page_cache_release(swappage
);
1260 if (error
== -ENOMEM
) {
1261 /* let kswapd refresh zone for GFP_ATOMICs */
1262 congestion_wait(WRITE
, HZ
/50);
1266 } else if (sgp
== SGP_READ
&& !filepage
) {
1267 shmem_swp_unmap(entry
);
1268 filepage
= find_get_page(mapping
, idx
);
1270 (!PageUptodate(filepage
) || TestSetPageLocked(filepage
))) {
1271 spin_unlock(&info
->lock
);
1272 wait_on_page_locked(filepage
);
1273 page_cache_release(filepage
);
1277 spin_unlock(&info
->lock
);
1279 shmem_swp_unmap(entry
);
1280 sbinfo
= SHMEM_SB(inode
->i_sb
);
1281 if (sbinfo
->max_blocks
) {
1282 spin_lock(&sbinfo
->stat_lock
);
1283 if (sbinfo
->free_blocks
== 0 ||
1284 shmem_acct_block(info
->flags
)) {
1285 spin_unlock(&sbinfo
->stat_lock
);
1286 spin_unlock(&info
->lock
);
1290 sbinfo
->free_blocks
--;
1291 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1292 spin_unlock(&sbinfo
->stat_lock
);
1293 } else if (shmem_acct_block(info
->flags
)) {
1294 spin_unlock(&info
->lock
);
1300 spin_unlock(&info
->lock
);
1301 filepage
= shmem_alloc_page(gfp
, info
, idx
);
1303 shmem_unacct_blocks(info
->flags
, 1);
1304 shmem_free_blocks(inode
, 1);
1309 spin_lock(&info
->lock
);
1310 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1312 error
= PTR_ERR(entry
);
1315 shmem_swp_unmap(entry
);
1317 if (error
|| swap
.val
|| 0 != add_to_page_cache_lru(
1318 filepage
, mapping
, idx
, GFP_ATOMIC
)) {
1319 spin_unlock(&info
->lock
);
1320 page_cache_release(filepage
);
1321 shmem_unacct_blocks(info
->flags
, 1);
1322 shmem_free_blocks(inode
, 1);
1328 info
->flags
|= SHMEM_PAGEIN
;
1332 spin_unlock(&info
->lock
);
1333 clear_highpage(filepage
);
1334 flush_dcache_page(filepage
);
1335 SetPageUptodate(filepage
);
1342 if (*pagep
!= filepage
) {
1343 unlock_page(filepage
);
1344 page_cache_release(filepage
);
1349 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1351 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1355 if (((loff_t
)vmf
->pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1356 return VM_FAULT_SIGBUS
;
1358 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_CACHE
, &ret
);
1360 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1362 mark_page_accessed(vmf
->page
);
1363 return ret
| VM_FAULT_LOCKED
;
1367 static int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1369 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1370 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1373 static struct mempolicy
*shmem_get_policy(struct vm_area_struct
*vma
,
1376 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1379 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1380 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1384 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1386 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1387 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1388 int retval
= -ENOMEM
;
1390 spin_lock(&info
->lock
);
1391 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1392 if (!user_shm_lock(inode
->i_size
, user
))
1394 info
->flags
|= VM_LOCKED
;
1396 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1397 user_shm_unlock(inode
->i_size
, user
);
1398 info
->flags
&= ~VM_LOCKED
;
1402 spin_unlock(&info
->lock
);
1406 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1408 file_accessed(file
);
1409 vma
->vm_ops
= &shmem_vm_ops
;
1410 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1414 static struct inode
*
1415 shmem_get_inode(struct super_block
*sb
, int mode
, dev_t dev
)
1417 struct inode
*inode
;
1418 struct shmem_inode_info
*info
;
1419 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1421 if (shmem_reserve_inode(sb
))
1424 inode
= new_inode(sb
);
1426 inode
->i_mode
= mode
;
1427 inode
->i_uid
= current
->fsuid
;
1428 inode
->i_gid
= current
->fsgid
;
1429 inode
->i_blocks
= 0;
1430 inode
->i_mapping
->a_ops
= &shmem_aops
;
1431 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1432 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1433 inode
->i_generation
= get_seconds();
1434 info
= SHMEM_I(inode
);
1435 memset(info
, 0, (char *)inode
- (char *)info
);
1436 spin_lock_init(&info
->lock
);
1437 INIT_LIST_HEAD(&info
->swaplist
);
1439 switch (mode
& S_IFMT
) {
1441 inode
->i_op
= &shmem_special_inode_operations
;
1442 init_special_inode(inode
, mode
, dev
);
1445 inode
->i_op
= &shmem_inode_operations
;
1446 inode
->i_fop
= &shmem_file_operations
;
1447 mpol_shared_policy_init(&info
->policy
, sbinfo
->policy
,
1448 &sbinfo
->policy_nodes
);
1452 /* Some things misbehave if size == 0 on a directory */
1453 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1454 inode
->i_op
= &shmem_dir_inode_operations
;
1455 inode
->i_fop
= &simple_dir_operations
;
1459 * Must not load anything in the rbtree,
1460 * mpol_free_shared_policy will not be called.
1462 mpol_shared_policy_init(&info
->policy
, MPOL_DEFAULT
,
1467 shmem_free_inode(sb
);
1472 static const struct inode_operations shmem_symlink_inode_operations
;
1473 static const struct inode_operations shmem_symlink_inline_operations
;
1476 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1477 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1478 * below the loop driver, in the generic fashion that many filesystems support.
1480 static int shmem_readpage(struct file
*file
, struct page
*page
)
1482 struct inode
*inode
= page
->mapping
->host
;
1483 int error
= shmem_getpage(inode
, page
->index
, &page
, SGP_CACHE
, NULL
);
1489 shmem_write_begin(struct file
*file
, struct address_space
*mapping
,
1490 loff_t pos
, unsigned len
, unsigned flags
,
1491 struct page
**pagep
, void **fsdata
)
1493 struct inode
*inode
= mapping
->host
;
1494 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1496 return shmem_getpage(inode
, index
, pagep
, SGP_WRITE
, NULL
);
1500 shmem_write_end(struct file
*file
, struct address_space
*mapping
,
1501 loff_t pos
, unsigned len
, unsigned copied
,
1502 struct page
*page
, void *fsdata
)
1504 struct inode
*inode
= mapping
->host
;
1506 if (pos
+ copied
> inode
->i_size
)
1507 i_size_write(inode
, pos
+ copied
);
1510 set_page_dirty(page
);
1511 page_cache_release(page
);
1516 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1518 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1519 struct address_space
*mapping
= inode
->i_mapping
;
1520 unsigned long index
, offset
;
1522 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1523 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1526 struct page
*page
= NULL
;
1527 unsigned long end_index
, nr
, ret
;
1528 loff_t i_size
= i_size_read(inode
);
1530 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1531 if (index
> end_index
)
1533 if (index
== end_index
) {
1534 nr
= i_size
& ~PAGE_CACHE_MASK
;
1539 desc
->error
= shmem_getpage(inode
, index
, &page
, SGP_READ
, NULL
);
1541 if (desc
->error
== -EINVAL
)
1549 * We must evaluate after, since reads (unlike writes)
1550 * are called without i_mutex protection against truncate
1552 nr
= PAGE_CACHE_SIZE
;
1553 i_size
= i_size_read(inode
);
1554 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1555 if (index
== end_index
) {
1556 nr
= i_size
& ~PAGE_CACHE_MASK
;
1559 page_cache_release(page
);
1567 * If users can be writing to this page using arbitrary
1568 * virtual addresses, take care about potential aliasing
1569 * before reading the page on the kernel side.
1571 if (mapping_writably_mapped(mapping
))
1572 flush_dcache_page(page
);
1574 * Mark the page accessed if we read the beginning.
1577 mark_page_accessed(page
);
1579 page
= ZERO_PAGE(0);
1580 page_cache_get(page
);
1584 * Ok, we have the page, and it's up-to-date, so
1585 * now we can copy it to user space...
1587 * The actor routine returns how many bytes were actually used..
1588 * NOTE! This may not be the same as how much of a user buffer
1589 * we filled up (we may be padding etc), so we can only update
1590 * "pos" here (the actor routine has to update the user buffer
1591 * pointers and the remaining count).
1593 ret
= actor(desc
, page
, offset
, nr
);
1595 index
+= offset
>> PAGE_CACHE_SHIFT
;
1596 offset
&= ~PAGE_CACHE_MASK
;
1598 page_cache_release(page
);
1599 if (ret
!= nr
|| !desc
->count
)
1605 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1606 file_accessed(filp
);
1609 static ssize_t
shmem_file_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1611 read_descriptor_t desc
;
1613 if ((ssize_t
) count
< 0)
1615 if (!access_ok(VERIFY_WRITE
, buf
, count
))
1625 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1627 return desc
.written
;
1631 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1633 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1635 buf
->f_type
= TMPFS_MAGIC
;
1636 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1637 buf
->f_namelen
= NAME_MAX
;
1638 spin_lock(&sbinfo
->stat_lock
);
1639 if (sbinfo
->max_blocks
) {
1640 buf
->f_blocks
= sbinfo
->max_blocks
;
1641 buf
->f_bavail
= buf
->f_bfree
= sbinfo
->free_blocks
;
1643 if (sbinfo
->max_inodes
) {
1644 buf
->f_files
= sbinfo
->max_inodes
;
1645 buf
->f_ffree
= sbinfo
->free_inodes
;
1647 /* else leave those fields 0 like simple_statfs */
1648 spin_unlock(&sbinfo
->stat_lock
);
1653 * File creation. Allocate an inode, and we're done..
1656 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1658 struct inode
*inode
= shmem_get_inode(dir
->i_sb
, mode
, dev
);
1659 int error
= -ENOSPC
;
1662 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1665 if (error
!= -EOPNOTSUPP
) {
1670 error
= shmem_acl_init(inode
, dir
);
1675 if (dir
->i_mode
& S_ISGID
) {
1676 inode
->i_gid
= dir
->i_gid
;
1678 inode
->i_mode
|= S_ISGID
;
1680 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1681 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1682 d_instantiate(dentry
, inode
);
1683 dget(dentry
); /* Extra count - pin the dentry in core */
1688 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1692 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1698 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1699 struct nameidata
*nd
)
1701 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1707 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1709 struct inode
*inode
= old_dentry
->d_inode
;
1713 * No ordinary (disk based) filesystem counts links as inodes;
1714 * but each new link needs a new dentry, pinning lowmem, and
1715 * tmpfs dentries cannot be pruned until they are unlinked.
1717 ret
= shmem_reserve_inode(inode
->i_sb
);
1721 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1722 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1724 atomic_inc(&inode
->i_count
); /* New dentry reference */
1725 dget(dentry
); /* Extra pinning count for the created dentry */
1726 d_instantiate(dentry
, inode
);
1731 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1733 struct inode
*inode
= dentry
->d_inode
;
1735 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
))
1736 shmem_free_inode(inode
->i_sb
);
1738 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1739 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1741 dput(dentry
); /* Undo the count from "create" - this does all the work */
1745 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1747 if (!simple_empty(dentry
))
1750 drop_nlink(dentry
->d_inode
);
1752 return shmem_unlink(dir
, dentry
);
1756 * The VFS layer already does all the dentry stuff for rename,
1757 * we just have to decrement the usage count for the target if
1758 * it exists so that the VFS layer correctly free's it when it
1761 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1763 struct inode
*inode
= old_dentry
->d_inode
;
1764 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1766 if (!simple_empty(new_dentry
))
1769 if (new_dentry
->d_inode
) {
1770 (void) shmem_unlink(new_dir
, new_dentry
);
1772 drop_nlink(old_dir
);
1773 } else if (they_are_dirs
) {
1774 drop_nlink(old_dir
);
1778 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1779 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1780 old_dir
->i_ctime
= old_dir
->i_mtime
=
1781 new_dir
->i_ctime
= new_dir
->i_mtime
=
1782 inode
->i_ctime
= CURRENT_TIME
;
1786 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1790 struct inode
*inode
;
1791 struct page
*page
= NULL
;
1793 struct shmem_inode_info
*info
;
1795 len
= strlen(symname
) + 1;
1796 if (len
> PAGE_CACHE_SIZE
)
1797 return -ENAMETOOLONG
;
1799 inode
= shmem_get_inode(dir
->i_sb
, S_IFLNK
|S_IRWXUGO
, 0);
1803 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1806 if (error
!= -EOPNOTSUPP
) {
1813 info
= SHMEM_I(inode
);
1814 inode
->i_size
= len
-1;
1815 if (len
<= (char *)inode
- (char *)info
) {
1817 memcpy(info
, symname
, len
);
1818 inode
->i_op
= &shmem_symlink_inline_operations
;
1820 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1826 inode
->i_op
= &shmem_symlink_inode_operations
;
1827 kaddr
= kmap_atomic(page
, KM_USER0
);
1828 memcpy(kaddr
, symname
, len
);
1829 kunmap_atomic(kaddr
, KM_USER0
);
1830 set_page_dirty(page
);
1831 page_cache_release(page
);
1833 if (dir
->i_mode
& S_ISGID
)
1834 inode
->i_gid
= dir
->i_gid
;
1835 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1836 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1837 d_instantiate(dentry
, inode
);
1842 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
1844 nd_set_link(nd
, (char *)SHMEM_I(dentry
->d_inode
));
1848 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1850 struct page
*page
= NULL
;
1851 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
1852 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
1858 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
1860 if (!IS_ERR(nd_get_link(nd
))) {
1861 struct page
*page
= cookie
;
1863 mark_page_accessed(page
);
1864 page_cache_release(page
);
1868 static const struct inode_operations shmem_symlink_inline_operations
= {
1869 .readlink
= generic_readlink
,
1870 .follow_link
= shmem_follow_link_inline
,
1873 static const struct inode_operations shmem_symlink_inode_operations
= {
1874 .truncate
= shmem_truncate
,
1875 .readlink
= generic_readlink
,
1876 .follow_link
= shmem_follow_link
,
1877 .put_link
= shmem_put_link
,
1880 #ifdef CONFIG_TMPFS_POSIX_ACL
1882 * Superblocks without xattr inode operations will get security.* xattr
1883 * support from the VFS "for free". As soon as we have any other xattrs
1884 * like ACLs, we also need to implement the security.* handlers at
1885 * filesystem level, though.
1888 static size_t shmem_xattr_security_list(struct inode
*inode
, char *list
,
1889 size_t list_len
, const char *name
,
1892 return security_inode_listsecurity(inode
, list
, list_len
);
1895 static int shmem_xattr_security_get(struct inode
*inode
, const char *name
,
1896 void *buffer
, size_t size
)
1898 if (strcmp(name
, "") == 0)
1900 return security_inode_getsecurity(inode
, name
, buffer
, size
,
1904 static int shmem_xattr_security_set(struct inode
*inode
, const char *name
,
1905 const void *value
, size_t size
, int flags
)
1907 if (strcmp(name
, "") == 0)
1909 return security_inode_setsecurity(inode
, name
, value
, size
, flags
);
1912 static struct xattr_handler shmem_xattr_security_handler
= {
1913 .prefix
= XATTR_SECURITY_PREFIX
,
1914 .list
= shmem_xattr_security_list
,
1915 .get
= shmem_xattr_security_get
,
1916 .set
= shmem_xattr_security_set
,
1919 static struct xattr_handler
*shmem_xattr_handlers
[] = {
1920 &shmem_xattr_acl_access_handler
,
1921 &shmem_xattr_acl_default_handler
,
1922 &shmem_xattr_security_handler
,
1927 static struct dentry
*shmem_get_parent(struct dentry
*child
)
1929 return ERR_PTR(-ESTALE
);
1932 static int shmem_match(struct inode
*ino
, void *vfh
)
1936 inum
= (inum
<< 32) | fh
[1];
1937 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
1940 static struct dentry
*shmem_fh_to_dentry(struct super_block
*sb
,
1941 struct fid
*fid
, int fh_len
, int fh_type
)
1943 struct inode
*inode
;
1944 struct dentry
*dentry
= NULL
;
1945 u64 inum
= fid
->raw
[2];
1946 inum
= (inum
<< 32) | fid
->raw
[1];
1951 inode
= ilookup5(sb
, (unsigned long)(inum
+ fid
->raw
[0]),
1952 shmem_match
, fid
->raw
);
1954 dentry
= d_find_alias(inode
);
1961 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
1964 struct inode
*inode
= dentry
->d_inode
;
1969 if (hlist_unhashed(&inode
->i_hash
)) {
1970 /* Unfortunately insert_inode_hash is not idempotent,
1971 * so as we hash inodes here rather than at creation
1972 * time, we need a lock to ensure we only try
1975 static DEFINE_SPINLOCK(lock
);
1977 if (hlist_unhashed(&inode
->i_hash
))
1978 __insert_inode_hash(inode
,
1979 inode
->i_ino
+ inode
->i_generation
);
1983 fh
[0] = inode
->i_generation
;
1984 fh
[1] = inode
->i_ino
;
1985 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
1991 static const struct export_operations shmem_export_ops
= {
1992 .get_parent
= shmem_get_parent
,
1993 .encode_fh
= shmem_encode_fh
,
1994 .fh_to_dentry
= shmem_fh_to_dentry
,
1997 static int shmem_parse_options(char *options
, int *mode
, uid_t
*uid
,
1998 gid_t
*gid
, unsigned long *blocks
, unsigned long *inodes
,
1999 int *policy
, nodemask_t
*policy_nodes
)
2001 char *this_char
, *value
, *rest
;
2003 while (options
!= NULL
) {
2004 this_char
= options
;
2007 * NUL-terminate this option: unfortunately,
2008 * mount options form a comma-separated list,
2009 * but mpol's nodelist may also contain commas.
2011 options
= strchr(options
, ',');
2012 if (options
== NULL
)
2015 if (!isdigit(*options
)) {
2022 if ((value
= strchr(this_char
,'=')) != NULL
) {
2026 "tmpfs: No value for mount option '%s'\n",
2031 if (!strcmp(this_char
,"size")) {
2032 unsigned long long size
;
2033 size
= memparse(value
,&rest
);
2035 size
<<= PAGE_SHIFT
;
2036 size
*= totalram_pages
;
2042 *blocks
= DIV_ROUND_UP(size
, PAGE_CACHE_SIZE
);
2043 } else if (!strcmp(this_char
,"nr_blocks")) {
2044 *blocks
= memparse(value
,&rest
);
2047 } else if (!strcmp(this_char
,"nr_inodes")) {
2048 *inodes
= memparse(value
,&rest
);
2051 } else if (!strcmp(this_char
,"mode")) {
2054 *mode
= simple_strtoul(value
,&rest
,8);
2057 } else if (!strcmp(this_char
,"uid")) {
2060 *uid
= simple_strtoul(value
,&rest
,0);
2063 } else if (!strcmp(this_char
,"gid")) {
2066 *gid
= simple_strtoul(value
,&rest
,0);
2069 } else if (!strcmp(this_char
,"mpol")) {
2070 if (shmem_parse_mpol(value
,policy
,policy_nodes
))
2073 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2081 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2087 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2089 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2090 unsigned long max_blocks
= sbinfo
->max_blocks
;
2091 unsigned long max_inodes
= sbinfo
->max_inodes
;
2092 int policy
= sbinfo
->policy
;
2093 nodemask_t policy_nodes
= sbinfo
->policy_nodes
;
2094 unsigned long blocks
;
2095 unsigned long inodes
;
2096 int error
= -EINVAL
;
2098 if (shmem_parse_options(data
, NULL
, NULL
, NULL
, &max_blocks
,
2099 &max_inodes
, &policy
, &policy_nodes
))
2102 spin_lock(&sbinfo
->stat_lock
);
2103 blocks
= sbinfo
->max_blocks
- sbinfo
->free_blocks
;
2104 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2105 if (max_blocks
< blocks
)
2107 if (max_inodes
< inodes
)
2110 * Those tests also disallow limited->unlimited while any are in
2111 * use, so i_blocks will always be zero when max_blocks is zero;
2112 * but we must separately disallow unlimited->limited, because
2113 * in that case we have no record of how much is already in use.
2115 if (max_blocks
&& !sbinfo
->max_blocks
)
2117 if (max_inodes
&& !sbinfo
->max_inodes
)
2121 sbinfo
->max_blocks
= max_blocks
;
2122 sbinfo
->free_blocks
= max_blocks
- blocks
;
2123 sbinfo
->max_inodes
= max_inodes
;
2124 sbinfo
->free_inodes
= max_inodes
- inodes
;
2125 sbinfo
->policy
= policy
;
2126 sbinfo
->policy_nodes
= policy_nodes
;
2128 spin_unlock(&sbinfo
->stat_lock
);
2133 static void shmem_put_super(struct super_block
*sb
)
2135 kfree(sb
->s_fs_info
);
2136 sb
->s_fs_info
= NULL
;
2139 static int shmem_fill_super(struct super_block
*sb
,
2140 void *data
, int silent
)
2142 struct inode
*inode
;
2143 struct dentry
*root
;
2144 int mode
= S_IRWXUGO
| S_ISVTX
;
2145 uid_t uid
= current
->fsuid
;
2146 gid_t gid
= current
->fsgid
;
2148 struct shmem_sb_info
*sbinfo
;
2149 unsigned long blocks
= 0;
2150 unsigned long inodes
= 0;
2151 int policy
= MPOL_DEFAULT
;
2152 nodemask_t policy_nodes
= node_states
[N_HIGH_MEMORY
];
2156 * Per default we only allow half of the physical ram per
2157 * tmpfs instance, limiting inodes to one per page of lowmem;
2158 * but the internal instance is left unlimited.
2160 if (!(sb
->s_flags
& MS_NOUSER
)) {
2161 blocks
= totalram_pages
/ 2;
2162 inodes
= totalram_pages
- totalhigh_pages
;
2163 if (inodes
> blocks
)
2165 if (shmem_parse_options(data
, &mode
, &uid
, &gid
, &blocks
,
2166 &inodes
, &policy
, &policy_nodes
))
2169 sb
->s_export_op
= &shmem_export_ops
;
2171 sb
->s_flags
|= MS_NOUSER
;
2174 /* Round up to L1_CACHE_BYTES to resist false sharing */
2175 sbinfo
= kmalloc(max((int)sizeof(struct shmem_sb_info
),
2176 L1_CACHE_BYTES
), GFP_KERNEL
);
2180 spin_lock_init(&sbinfo
->stat_lock
);
2181 sbinfo
->max_blocks
= blocks
;
2182 sbinfo
->free_blocks
= blocks
;
2183 sbinfo
->max_inodes
= inodes
;
2184 sbinfo
->free_inodes
= inodes
;
2185 sbinfo
->policy
= policy
;
2186 sbinfo
->policy_nodes
= policy_nodes
;
2188 sb
->s_fs_info
= sbinfo
;
2189 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
2190 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2191 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2192 sb
->s_magic
= TMPFS_MAGIC
;
2193 sb
->s_op
= &shmem_ops
;
2194 sb
->s_time_gran
= 1;
2195 #ifdef CONFIG_TMPFS_POSIX_ACL
2196 sb
->s_xattr
= shmem_xattr_handlers
;
2197 sb
->s_flags
|= MS_POSIXACL
;
2200 inode
= shmem_get_inode(sb
, S_IFDIR
| mode
, 0);
2205 root
= d_alloc_root(inode
);
2214 shmem_put_super(sb
);
2218 static struct kmem_cache
*shmem_inode_cachep
;
2220 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2222 struct shmem_inode_info
*p
;
2223 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2226 return &p
->vfs_inode
;
2229 static void shmem_destroy_inode(struct inode
*inode
)
2231 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2232 /* only struct inode is valid if it's an inline symlink */
2233 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2235 shmem_acl_destroy_inode(inode
);
2236 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2239 static void init_once(struct kmem_cache
*cachep
, void *foo
)
2241 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2243 inode_init_once(&p
->vfs_inode
);
2244 #ifdef CONFIG_TMPFS_POSIX_ACL
2246 p
->i_default_acl
= NULL
;
2250 static int init_inodecache(void)
2252 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2253 sizeof(struct shmem_inode_info
),
2254 0, SLAB_PANIC
, init_once
);
2258 static void destroy_inodecache(void)
2260 kmem_cache_destroy(shmem_inode_cachep
);
2263 static const struct address_space_operations shmem_aops
= {
2264 .writepage
= shmem_writepage
,
2265 .set_page_dirty
= __set_page_dirty_no_writeback
,
2267 .readpage
= shmem_readpage
,
2268 .write_begin
= shmem_write_begin
,
2269 .write_end
= shmem_write_end
,
2271 .migratepage
= migrate_page
,
2274 static const struct file_operations shmem_file_operations
= {
2277 .llseek
= generic_file_llseek
,
2278 .read
= shmem_file_read
,
2279 .write
= do_sync_write
,
2280 .aio_write
= generic_file_aio_write
,
2281 .fsync
= simple_sync_file
,
2282 .splice_read
= generic_file_splice_read
,
2283 .splice_write
= generic_file_splice_write
,
2287 static const struct inode_operations shmem_inode_operations
= {
2288 .truncate
= shmem_truncate
,
2289 .setattr
= shmem_notify_change
,
2290 .truncate_range
= shmem_truncate_range
,
2291 #ifdef CONFIG_TMPFS_POSIX_ACL
2292 .setxattr
= generic_setxattr
,
2293 .getxattr
= generic_getxattr
,
2294 .listxattr
= generic_listxattr
,
2295 .removexattr
= generic_removexattr
,
2296 .permission
= shmem_permission
,
2301 static const struct inode_operations shmem_dir_inode_operations
= {
2303 .create
= shmem_create
,
2304 .lookup
= simple_lookup
,
2306 .unlink
= shmem_unlink
,
2307 .symlink
= shmem_symlink
,
2308 .mkdir
= shmem_mkdir
,
2309 .rmdir
= shmem_rmdir
,
2310 .mknod
= shmem_mknod
,
2311 .rename
= shmem_rename
,
2313 #ifdef CONFIG_TMPFS_POSIX_ACL
2314 .setattr
= shmem_notify_change
,
2315 .setxattr
= generic_setxattr
,
2316 .getxattr
= generic_getxattr
,
2317 .listxattr
= generic_listxattr
,
2318 .removexattr
= generic_removexattr
,
2319 .permission
= shmem_permission
,
2323 static const struct inode_operations shmem_special_inode_operations
= {
2324 #ifdef CONFIG_TMPFS_POSIX_ACL
2325 .setattr
= shmem_notify_change
,
2326 .setxattr
= generic_setxattr
,
2327 .getxattr
= generic_getxattr
,
2328 .listxattr
= generic_listxattr
,
2329 .removexattr
= generic_removexattr
,
2330 .permission
= shmem_permission
,
2334 static const struct super_operations shmem_ops
= {
2335 .alloc_inode
= shmem_alloc_inode
,
2336 .destroy_inode
= shmem_destroy_inode
,
2338 .statfs
= shmem_statfs
,
2339 .remount_fs
= shmem_remount_fs
,
2341 .delete_inode
= shmem_delete_inode
,
2342 .drop_inode
= generic_delete_inode
,
2343 .put_super
= shmem_put_super
,
2346 static struct vm_operations_struct shmem_vm_ops
= {
2347 .fault
= shmem_fault
,
2349 .set_policy
= shmem_set_policy
,
2350 .get_policy
= shmem_get_policy
,
2355 static int shmem_get_sb(struct file_system_type
*fs_type
,
2356 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
2358 return get_sb_nodev(fs_type
, flags
, data
, shmem_fill_super
, mnt
);
2361 static struct file_system_type tmpfs_fs_type
= {
2362 .owner
= THIS_MODULE
,
2364 .get_sb
= shmem_get_sb
,
2365 .kill_sb
= kill_litter_super
,
2367 static struct vfsmount
*shm_mnt
;
2369 static int __init
init_tmpfs(void)
2373 error
= bdi_init(&shmem_backing_dev_info
);
2377 error
= init_inodecache();
2381 error
= register_filesystem(&tmpfs_fs_type
);
2383 printk(KERN_ERR
"Could not register tmpfs\n");
2387 shm_mnt
= vfs_kern_mount(&tmpfs_fs_type
, MS_NOUSER
,
2388 tmpfs_fs_type
.name
, NULL
);
2389 if (IS_ERR(shm_mnt
)) {
2390 error
= PTR_ERR(shm_mnt
);
2391 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2397 unregister_filesystem(&tmpfs_fs_type
);
2399 destroy_inodecache();
2401 bdi_destroy(&shmem_backing_dev_info
);
2403 shm_mnt
= ERR_PTR(error
);
2406 module_init(init_tmpfs
)
2409 * shmem_file_setup - get an unlinked file living in tmpfs
2411 * @name: name for dentry (to be seen in /proc/<pid>/maps
2412 * @size: size to be set for the file
2415 struct file
*shmem_file_setup(char *name
, loff_t size
, unsigned long flags
)
2419 struct inode
*inode
;
2420 struct dentry
*dentry
, *root
;
2423 if (IS_ERR(shm_mnt
))
2424 return (void *)shm_mnt
;
2426 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
2427 return ERR_PTR(-EINVAL
);
2429 if (shmem_acct_size(flags
, size
))
2430 return ERR_PTR(-ENOMEM
);
2434 this.len
= strlen(name
);
2435 this.hash
= 0; /* will go */
2436 root
= shm_mnt
->mnt_root
;
2437 dentry
= d_alloc(root
, &this);
2442 file
= get_empty_filp();
2447 inode
= shmem_get_inode(root
->d_sb
, S_IFREG
| S_IRWXUGO
, 0);
2451 SHMEM_I(inode
)->flags
= flags
& VM_ACCOUNT
;
2452 d_instantiate(dentry
, inode
);
2453 inode
->i_size
= size
;
2454 inode
->i_nlink
= 0; /* It is unlinked */
2455 init_file(file
, shm_mnt
, dentry
, FMODE_WRITE
| FMODE_READ
,
2456 &shmem_file_operations
);
2464 shmem_unacct_size(flags
, size
);
2465 return ERR_PTR(error
);
2469 * shmem_zero_setup - setup a shared anonymous mapping
2471 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2473 int shmem_zero_setup(struct vm_area_struct
*vma
)
2476 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2478 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2480 return PTR_ERR(file
);
2484 vma
->vm_file
= file
;
2485 vma
->vm_ops
= &shmem_vm_ops
;