Merge tag 'kvm-arm-fixes-4.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / fs / hugetlbfs / inode.c
1 /*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
4 * Nadia Yvette Chambers, 2002
5 *
6 * Copyright (C) 2002 Linus Torvalds.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/module.h>
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched.h> /* remove ASAP */
15 #include <linux/fs.h>
16 #include <linux/mount.h>
17 #include <linux/file.h>
18 #include <linux/kernel.h>
19 #include <linux/writeback.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/capability.h>
25 #include <linux/ctype.h>
26 #include <linux/backing-dev.h>
27 #include <linux/hugetlb.h>
28 #include <linux/pagevec.h>
29 #include <linux/parser.h>
30 #include <linux/mman.h>
31 #include <linux/slab.h>
32 #include <linux/dnotify.h>
33 #include <linux/statfs.h>
34 #include <linux/security.h>
35 #include <linux/magic.h>
36 #include <linux/migrate.h>
37
38 #include <asm/uaccess.h>
39
40 static const struct super_operations hugetlbfs_ops;
41 static const struct address_space_operations hugetlbfs_aops;
42 const struct file_operations hugetlbfs_file_operations;
43 static const struct inode_operations hugetlbfs_dir_inode_operations;
44 static const struct inode_operations hugetlbfs_inode_operations;
45
46 struct hugetlbfs_config {
47 kuid_t uid;
48 kgid_t gid;
49 umode_t mode;
50 long nr_blocks;
51 long nr_inodes;
52 struct hstate *hstate;
53 };
54
55 struct hugetlbfs_inode_info {
56 struct shared_policy policy;
57 struct inode vfs_inode;
58 };
59
60 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
61 {
62 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
63 }
64
65 int sysctl_hugetlb_shm_group;
66
67 enum {
68 Opt_size, Opt_nr_inodes,
69 Opt_mode, Opt_uid, Opt_gid,
70 Opt_pagesize,
71 Opt_err,
72 };
73
74 static const match_table_t tokens = {
75 {Opt_size, "size=%s"},
76 {Opt_nr_inodes, "nr_inodes=%s"},
77 {Opt_mode, "mode=%o"},
78 {Opt_uid, "uid=%u"},
79 {Opt_gid, "gid=%u"},
80 {Opt_pagesize, "pagesize=%s"},
81 {Opt_err, NULL},
82 };
83
84 static void huge_pagevec_release(struct pagevec *pvec)
85 {
86 int i;
87
88 for (i = 0; i < pagevec_count(pvec); ++i)
89 put_page(pvec->pages[i]);
90
91 pagevec_reinit(pvec);
92 }
93
94 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
95 {
96 struct inode *inode = file_inode(file);
97 loff_t len, vma_len;
98 int ret;
99 struct hstate *h = hstate_file(file);
100
101 /*
102 * vma address alignment (but not the pgoff alignment) has
103 * already been checked by prepare_hugepage_range. If you add
104 * any error returns here, do so after setting VM_HUGETLB, so
105 * is_vm_hugetlb_page tests below unmap_region go the right
106 * way when do_mmap_pgoff unwinds (may be important on powerpc
107 * and ia64).
108 */
109 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
110 vma->vm_ops = &hugetlb_vm_ops;
111
112 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
113 return -EINVAL;
114
115 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
116
117 mutex_lock(&inode->i_mutex);
118 file_accessed(file);
119
120 ret = -ENOMEM;
121 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
122
123 if (hugetlb_reserve_pages(inode,
124 vma->vm_pgoff >> huge_page_order(h),
125 len >> huge_page_shift(h), vma,
126 vma->vm_flags))
127 goto out;
128
129 ret = 0;
130 hugetlb_prefault_arch_hook(vma->vm_mm);
131 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
132 inode->i_size = len;
133 out:
134 mutex_unlock(&inode->i_mutex);
135
136 return ret;
137 }
138
139 /*
140 * Called under down_write(mmap_sem).
141 */
142
143 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
144 static unsigned long
145 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
146 unsigned long len, unsigned long pgoff, unsigned long flags)
147 {
148 struct mm_struct *mm = current->mm;
149 struct vm_area_struct *vma;
150 struct hstate *h = hstate_file(file);
151 struct vm_unmapped_area_info info;
152
153 if (len & ~huge_page_mask(h))
154 return -EINVAL;
155 if (len > TASK_SIZE)
156 return -ENOMEM;
157
158 if (flags & MAP_FIXED) {
159 if (prepare_hugepage_range(file, addr, len))
160 return -EINVAL;
161 return addr;
162 }
163
164 if (addr) {
165 addr = ALIGN(addr, huge_page_size(h));
166 vma = find_vma(mm, addr);
167 if (TASK_SIZE - len >= addr &&
168 (!vma || addr + len <= vma->vm_start))
169 return addr;
170 }
171
172 info.flags = 0;
173 info.length = len;
174 info.low_limit = TASK_UNMAPPED_BASE;
175 info.high_limit = TASK_SIZE;
176 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
177 info.align_offset = 0;
178 return vm_unmapped_area(&info);
179 }
180 #endif
181
182 static int
183 hugetlbfs_read_actor(struct page *page, unsigned long offset,
184 char __user *buf, unsigned long count,
185 unsigned long size)
186 {
187 char *kaddr;
188 unsigned long left, copied = 0;
189 int i, chunksize;
190
191 if (size > count)
192 size = count;
193
194 /* Find which 4k chunk and offset with in that chunk */
195 i = offset >> PAGE_CACHE_SHIFT;
196 offset = offset & ~PAGE_CACHE_MASK;
197
198 while (size) {
199 chunksize = PAGE_CACHE_SIZE;
200 if (offset)
201 chunksize -= offset;
202 if (chunksize > size)
203 chunksize = size;
204 kaddr = kmap(&page[i]);
205 left = __copy_to_user(buf, kaddr + offset, chunksize);
206 kunmap(&page[i]);
207 if (left) {
208 copied += (chunksize - left);
209 break;
210 }
211 offset = 0;
212 size -= chunksize;
213 buf += chunksize;
214 copied += chunksize;
215 i++;
216 }
217 return copied ? copied : -EFAULT;
218 }
219
220 /*
221 * Support for read() - Find the page attached to f_mapping and copy out the
222 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
223 * since it has PAGE_CACHE_SIZE assumptions.
224 */
225 static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
226 size_t len, loff_t *ppos)
227 {
228 struct hstate *h = hstate_file(filp);
229 struct address_space *mapping = filp->f_mapping;
230 struct inode *inode = mapping->host;
231 unsigned long index = *ppos >> huge_page_shift(h);
232 unsigned long offset = *ppos & ~huge_page_mask(h);
233 unsigned long end_index;
234 loff_t isize;
235 ssize_t retval = 0;
236
237 /* validate length */
238 if (len == 0)
239 goto out;
240
241 for (;;) {
242 struct page *page;
243 unsigned long nr, ret;
244 int ra;
245
246 /* nr is the maximum number of bytes to copy from this page */
247 nr = huge_page_size(h);
248 isize = i_size_read(inode);
249 if (!isize)
250 goto out;
251 end_index = (isize - 1) >> huge_page_shift(h);
252 if (index >= end_index) {
253 if (index > end_index)
254 goto out;
255 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
256 if (nr <= offset)
257 goto out;
258 }
259 nr = nr - offset;
260
261 /* Find the page */
262 page = find_lock_page(mapping, index);
263 if (unlikely(page == NULL)) {
264 /*
265 * We have a HOLE, zero out the user-buffer for the
266 * length of the hole or request.
267 */
268 ret = len < nr ? len : nr;
269 if (clear_user(buf, ret))
270 ra = -EFAULT;
271 else
272 ra = 0;
273 } else {
274 unlock_page(page);
275
276 /*
277 * We have the page, copy it to user space buffer.
278 */
279 ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
280 ret = ra;
281 page_cache_release(page);
282 }
283 if (ra < 0) {
284 if (retval == 0)
285 retval = ra;
286 goto out;
287 }
288
289 offset += ret;
290 retval += ret;
291 len -= ret;
292 index += offset >> huge_page_shift(h);
293 offset &= ~huge_page_mask(h);
294
295 /* short read or no more work */
296 if ((ret != nr) || (len == 0))
297 break;
298 }
299 out:
300 *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
301 return retval;
302 }
303
304 static int hugetlbfs_write_begin(struct file *file,
305 struct address_space *mapping,
306 loff_t pos, unsigned len, unsigned flags,
307 struct page **pagep, void **fsdata)
308 {
309 return -EINVAL;
310 }
311
312 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
313 loff_t pos, unsigned len, unsigned copied,
314 struct page *page, void *fsdata)
315 {
316 BUG();
317 return -EINVAL;
318 }
319
320 static void truncate_huge_page(struct page *page)
321 {
322 cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
323 ClearPageUptodate(page);
324 delete_from_page_cache(page);
325 }
326
327 static void truncate_hugepages(struct inode *inode, loff_t lstart)
328 {
329 struct hstate *h = hstate_inode(inode);
330 struct address_space *mapping = &inode->i_data;
331 const pgoff_t start = lstart >> huge_page_shift(h);
332 struct pagevec pvec;
333 pgoff_t next;
334 int i, freed = 0;
335
336 pagevec_init(&pvec, 0);
337 next = start;
338 while (1) {
339 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
340 if (next == start)
341 break;
342 next = start;
343 continue;
344 }
345
346 for (i = 0; i < pagevec_count(&pvec); ++i) {
347 struct page *page = pvec.pages[i];
348
349 lock_page(page);
350 if (page->index > next)
351 next = page->index;
352 ++next;
353 truncate_huge_page(page);
354 unlock_page(page);
355 freed++;
356 }
357 huge_pagevec_release(&pvec);
358 }
359 BUG_ON(!lstart && mapping->nrpages);
360 hugetlb_unreserve_pages(inode, start, freed);
361 }
362
363 static void hugetlbfs_evict_inode(struct inode *inode)
364 {
365 struct resv_map *resv_map;
366
367 truncate_hugepages(inode, 0);
368 resv_map = (struct resv_map *)inode->i_mapping->private_data;
369 /* root inode doesn't have the resv_map, so we should check it */
370 if (resv_map)
371 resv_map_release(&resv_map->refs);
372 clear_inode(inode);
373 }
374
375 static inline void
376 hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
377 {
378 struct vm_area_struct *vma;
379
380 vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
381 unsigned long v_offset;
382
383 /*
384 * Can the expression below overflow on 32-bit arches?
385 * No, because the interval tree returns us only those vmas
386 * which overlap the truncated area starting at pgoff,
387 * and no vma on a 32-bit arch can span beyond the 4GB.
388 */
389 if (vma->vm_pgoff < pgoff)
390 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
391 else
392 v_offset = 0;
393
394 unmap_hugepage_range(vma, vma->vm_start + v_offset,
395 vma->vm_end, NULL);
396 }
397 }
398
399 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
400 {
401 pgoff_t pgoff;
402 struct address_space *mapping = inode->i_mapping;
403 struct hstate *h = hstate_inode(inode);
404
405 BUG_ON(offset & ~huge_page_mask(h));
406 pgoff = offset >> PAGE_SHIFT;
407
408 i_size_write(inode, offset);
409 i_mmap_lock_write(mapping);
410 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
411 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
412 i_mmap_unlock_write(mapping);
413 truncate_hugepages(inode, offset);
414 return 0;
415 }
416
417 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
418 {
419 struct inode *inode = dentry->d_inode;
420 struct hstate *h = hstate_inode(inode);
421 int error;
422 unsigned int ia_valid = attr->ia_valid;
423
424 BUG_ON(!inode);
425
426 error = inode_change_ok(inode, attr);
427 if (error)
428 return error;
429
430 if (ia_valid & ATTR_SIZE) {
431 error = -EINVAL;
432 if (attr->ia_size & ~huge_page_mask(h))
433 return -EINVAL;
434 error = hugetlb_vmtruncate(inode, attr->ia_size);
435 if (error)
436 return error;
437 }
438
439 setattr_copy(inode, attr);
440 mark_inode_dirty(inode);
441 return 0;
442 }
443
444 static struct inode *hugetlbfs_get_root(struct super_block *sb,
445 struct hugetlbfs_config *config)
446 {
447 struct inode *inode;
448
449 inode = new_inode(sb);
450 if (inode) {
451 struct hugetlbfs_inode_info *info;
452 inode->i_ino = get_next_ino();
453 inode->i_mode = S_IFDIR | config->mode;
454 inode->i_uid = config->uid;
455 inode->i_gid = config->gid;
456 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
457 info = HUGETLBFS_I(inode);
458 mpol_shared_policy_init(&info->policy, NULL);
459 inode->i_op = &hugetlbfs_dir_inode_operations;
460 inode->i_fop = &simple_dir_operations;
461 /* directory inodes start off with i_nlink == 2 (for "." entry) */
462 inc_nlink(inode);
463 lockdep_annotate_inode_mutex_key(inode);
464 }
465 return inode;
466 }
467
468 /*
469 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
470 * be taken from reclaim -- unlike regular filesystems. This needs an
471 * annotation because huge_pmd_share() does an allocation under
472 * i_mmap_rwsem.
473 */
474 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
475
476 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
477 struct inode *dir,
478 umode_t mode, dev_t dev)
479 {
480 struct inode *inode;
481 struct resv_map *resv_map;
482
483 resv_map = resv_map_alloc();
484 if (!resv_map)
485 return NULL;
486
487 inode = new_inode(sb);
488 if (inode) {
489 struct hugetlbfs_inode_info *info;
490 inode->i_ino = get_next_ino();
491 inode_init_owner(inode, dir, mode);
492 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
493 &hugetlbfs_i_mmap_rwsem_key);
494 inode->i_mapping->a_ops = &hugetlbfs_aops;
495 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
496 inode->i_mapping->private_data = resv_map;
497 info = HUGETLBFS_I(inode);
498 /*
499 * The policy is initialized here even if we are creating a
500 * private inode because initialization simply creates an
501 * an empty rb tree and calls spin_lock_init(), later when we
502 * call mpol_free_shared_policy() it will just return because
503 * the rb tree will still be empty.
504 */
505 mpol_shared_policy_init(&info->policy, NULL);
506 switch (mode & S_IFMT) {
507 default:
508 init_special_inode(inode, mode, dev);
509 break;
510 case S_IFREG:
511 inode->i_op = &hugetlbfs_inode_operations;
512 inode->i_fop = &hugetlbfs_file_operations;
513 break;
514 case S_IFDIR:
515 inode->i_op = &hugetlbfs_dir_inode_operations;
516 inode->i_fop = &simple_dir_operations;
517
518 /* directory inodes start off with i_nlink == 2 (for "." entry) */
519 inc_nlink(inode);
520 break;
521 case S_IFLNK:
522 inode->i_op = &page_symlink_inode_operations;
523 break;
524 }
525 lockdep_annotate_inode_mutex_key(inode);
526 } else
527 kref_put(&resv_map->refs, resv_map_release);
528
529 return inode;
530 }
531
532 /*
533 * File creation. Allocate an inode, and we're done..
534 */
535 static int hugetlbfs_mknod(struct inode *dir,
536 struct dentry *dentry, umode_t mode, dev_t dev)
537 {
538 struct inode *inode;
539 int error = -ENOSPC;
540
541 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
542 if (inode) {
543 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
544 d_instantiate(dentry, inode);
545 dget(dentry); /* Extra count - pin the dentry in core */
546 error = 0;
547 }
548 return error;
549 }
550
551 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
552 {
553 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
554 if (!retval)
555 inc_nlink(dir);
556 return retval;
557 }
558
559 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
560 {
561 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
562 }
563
564 static int hugetlbfs_symlink(struct inode *dir,
565 struct dentry *dentry, const char *symname)
566 {
567 struct inode *inode;
568 int error = -ENOSPC;
569
570 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
571 if (inode) {
572 int l = strlen(symname)+1;
573 error = page_symlink(inode, symname, l);
574 if (!error) {
575 d_instantiate(dentry, inode);
576 dget(dentry);
577 } else
578 iput(inode);
579 }
580 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
581
582 return error;
583 }
584
585 /*
586 * mark the head page dirty
587 */
588 static int hugetlbfs_set_page_dirty(struct page *page)
589 {
590 struct page *head = compound_head(page);
591
592 SetPageDirty(head);
593 return 0;
594 }
595
596 static int hugetlbfs_migrate_page(struct address_space *mapping,
597 struct page *newpage, struct page *page,
598 enum migrate_mode mode)
599 {
600 int rc;
601
602 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
603 if (rc != MIGRATEPAGE_SUCCESS)
604 return rc;
605 migrate_page_copy(newpage, page);
606
607 return MIGRATEPAGE_SUCCESS;
608 }
609
610 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
611 {
612 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
613 struct hstate *h = hstate_inode(dentry->d_inode);
614
615 buf->f_type = HUGETLBFS_MAGIC;
616 buf->f_bsize = huge_page_size(h);
617 if (sbinfo) {
618 spin_lock(&sbinfo->stat_lock);
619 /* If no limits set, just report 0 for max/free/used
620 * blocks, like simple_statfs() */
621 if (sbinfo->spool) {
622 long free_pages;
623
624 spin_lock(&sbinfo->spool->lock);
625 buf->f_blocks = sbinfo->spool->max_hpages;
626 free_pages = sbinfo->spool->max_hpages
627 - sbinfo->spool->used_hpages;
628 buf->f_bavail = buf->f_bfree = free_pages;
629 spin_unlock(&sbinfo->spool->lock);
630 buf->f_files = sbinfo->max_inodes;
631 buf->f_ffree = sbinfo->free_inodes;
632 }
633 spin_unlock(&sbinfo->stat_lock);
634 }
635 buf->f_namelen = NAME_MAX;
636 return 0;
637 }
638
639 static void hugetlbfs_put_super(struct super_block *sb)
640 {
641 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
642
643 if (sbi) {
644 sb->s_fs_info = NULL;
645
646 if (sbi->spool)
647 hugepage_put_subpool(sbi->spool);
648
649 kfree(sbi);
650 }
651 }
652
653 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
654 {
655 if (sbinfo->free_inodes >= 0) {
656 spin_lock(&sbinfo->stat_lock);
657 if (unlikely(!sbinfo->free_inodes)) {
658 spin_unlock(&sbinfo->stat_lock);
659 return 0;
660 }
661 sbinfo->free_inodes--;
662 spin_unlock(&sbinfo->stat_lock);
663 }
664
665 return 1;
666 }
667
668 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
669 {
670 if (sbinfo->free_inodes >= 0) {
671 spin_lock(&sbinfo->stat_lock);
672 sbinfo->free_inodes++;
673 spin_unlock(&sbinfo->stat_lock);
674 }
675 }
676
677
678 static struct kmem_cache *hugetlbfs_inode_cachep;
679
680 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
681 {
682 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
683 struct hugetlbfs_inode_info *p;
684
685 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
686 return NULL;
687 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
688 if (unlikely(!p)) {
689 hugetlbfs_inc_free_inodes(sbinfo);
690 return NULL;
691 }
692 return &p->vfs_inode;
693 }
694
695 static void hugetlbfs_i_callback(struct rcu_head *head)
696 {
697 struct inode *inode = container_of(head, struct inode, i_rcu);
698 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
699 }
700
701 static void hugetlbfs_destroy_inode(struct inode *inode)
702 {
703 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
704 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
705 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
706 }
707
708 static const struct address_space_operations hugetlbfs_aops = {
709 .write_begin = hugetlbfs_write_begin,
710 .write_end = hugetlbfs_write_end,
711 .set_page_dirty = hugetlbfs_set_page_dirty,
712 .migratepage = hugetlbfs_migrate_page,
713 };
714
715
716 static void init_once(void *foo)
717 {
718 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
719
720 inode_init_once(&ei->vfs_inode);
721 }
722
723 const struct file_operations hugetlbfs_file_operations = {
724 .read = hugetlbfs_read,
725 .mmap = hugetlbfs_file_mmap,
726 .fsync = noop_fsync,
727 .get_unmapped_area = hugetlb_get_unmapped_area,
728 .llseek = default_llseek,
729 };
730
731 static const struct inode_operations hugetlbfs_dir_inode_operations = {
732 .create = hugetlbfs_create,
733 .lookup = simple_lookup,
734 .link = simple_link,
735 .unlink = simple_unlink,
736 .symlink = hugetlbfs_symlink,
737 .mkdir = hugetlbfs_mkdir,
738 .rmdir = simple_rmdir,
739 .mknod = hugetlbfs_mknod,
740 .rename = simple_rename,
741 .setattr = hugetlbfs_setattr,
742 };
743
744 static const struct inode_operations hugetlbfs_inode_operations = {
745 .setattr = hugetlbfs_setattr,
746 };
747
748 static const struct super_operations hugetlbfs_ops = {
749 .alloc_inode = hugetlbfs_alloc_inode,
750 .destroy_inode = hugetlbfs_destroy_inode,
751 .evict_inode = hugetlbfs_evict_inode,
752 .statfs = hugetlbfs_statfs,
753 .put_super = hugetlbfs_put_super,
754 .show_options = generic_show_options,
755 };
756
757 static int
758 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
759 {
760 char *p, *rest;
761 substring_t args[MAX_OPT_ARGS];
762 int option;
763 unsigned long long size = 0;
764 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
765
766 if (!options)
767 return 0;
768
769 while ((p = strsep(&options, ",")) != NULL) {
770 int token;
771 if (!*p)
772 continue;
773
774 token = match_token(p, tokens, args);
775 switch (token) {
776 case Opt_uid:
777 if (match_int(&args[0], &option))
778 goto bad_val;
779 pconfig->uid = make_kuid(current_user_ns(), option);
780 if (!uid_valid(pconfig->uid))
781 goto bad_val;
782 break;
783
784 case Opt_gid:
785 if (match_int(&args[0], &option))
786 goto bad_val;
787 pconfig->gid = make_kgid(current_user_ns(), option);
788 if (!gid_valid(pconfig->gid))
789 goto bad_val;
790 break;
791
792 case Opt_mode:
793 if (match_octal(&args[0], &option))
794 goto bad_val;
795 pconfig->mode = option & 01777U;
796 break;
797
798 case Opt_size: {
799 /* memparse() will accept a K/M/G without a digit */
800 if (!isdigit(*args[0].from))
801 goto bad_val;
802 size = memparse(args[0].from, &rest);
803 setsize = SIZE_STD;
804 if (*rest == '%')
805 setsize = SIZE_PERCENT;
806 break;
807 }
808
809 case Opt_nr_inodes:
810 /* memparse() will accept a K/M/G without a digit */
811 if (!isdigit(*args[0].from))
812 goto bad_val;
813 pconfig->nr_inodes = memparse(args[0].from, &rest);
814 break;
815
816 case Opt_pagesize: {
817 unsigned long ps;
818 ps = memparse(args[0].from, &rest);
819 pconfig->hstate = size_to_hstate(ps);
820 if (!pconfig->hstate) {
821 pr_err("Unsupported page size %lu MB\n",
822 ps >> 20);
823 return -EINVAL;
824 }
825 break;
826 }
827
828 default:
829 pr_err("Bad mount option: \"%s\"\n", p);
830 return -EINVAL;
831 break;
832 }
833 }
834
835 /* Do size after hstate is set up */
836 if (setsize > NO_SIZE) {
837 struct hstate *h = pconfig->hstate;
838 if (setsize == SIZE_PERCENT) {
839 size <<= huge_page_shift(h);
840 size *= h->max_huge_pages;
841 do_div(size, 100);
842 }
843 pconfig->nr_blocks = (size >> huge_page_shift(h));
844 }
845
846 return 0;
847
848 bad_val:
849 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
850 return -EINVAL;
851 }
852
853 static int
854 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
855 {
856 int ret;
857 struct hugetlbfs_config config;
858 struct hugetlbfs_sb_info *sbinfo;
859
860 save_mount_options(sb, data);
861
862 config.nr_blocks = -1; /* No limit on size by default */
863 config.nr_inodes = -1; /* No limit on number of inodes by default */
864 config.uid = current_fsuid();
865 config.gid = current_fsgid();
866 config.mode = 0755;
867 config.hstate = &default_hstate;
868 ret = hugetlbfs_parse_options(data, &config);
869 if (ret)
870 return ret;
871
872 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
873 if (!sbinfo)
874 return -ENOMEM;
875 sb->s_fs_info = sbinfo;
876 sbinfo->hstate = config.hstate;
877 spin_lock_init(&sbinfo->stat_lock);
878 sbinfo->max_inodes = config.nr_inodes;
879 sbinfo->free_inodes = config.nr_inodes;
880 sbinfo->spool = NULL;
881 if (config.nr_blocks != -1) {
882 sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
883 if (!sbinfo->spool)
884 goto out_free;
885 }
886 sb->s_maxbytes = MAX_LFS_FILESIZE;
887 sb->s_blocksize = huge_page_size(config.hstate);
888 sb->s_blocksize_bits = huge_page_shift(config.hstate);
889 sb->s_magic = HUGETLBFS_MAGIC;
890 sb->s_op = &hugetlbfs_ops;
891 sb->s_time_gran = 1;
892 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
893 if (!sb->s_root)
894 goto out_free;
895 return 0;
896 out_free:
897 kfree(sbinfo->spool);
898 kfree(sbinfo);
899 return -ENOMEM;
900 }
901
902 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
903 int flags, const char *dev_name, void *data)
904 {
905 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
906 }
907
908 static struct file_system_type hugetlbfs_fs_type = {
909 .name = "hugetlbfs",
910 .mount = hugetlbfs_mount,
911 .kill_sb = kill_litter_super,
912 };
913 MODULE_ALIAS_FS("hugetlbfs");
914
915 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
916
917 static int can_do_hugetlb_shm(void)
918 {
919 kgid_t shm_group;
920 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
921 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
922 }
923
924 static int get_hstate_idx(int page_size_log)
925 {
926 struct hstate *h = hstate_sizelog(page_size_log);
927
928 if (!h)
929 return -1;
930 return h - hstates;
931 }
932
933 static const struct dentry_operations anon_ops = {
934 .d_dname = simple_dname
935 };
936
937 /*
938 * Note that size should be aligned to proper hugepage size in caller side,
939 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
940 */
941 struct file *hugetlb_file_setup(const char *name, size_t size,
942 vm_flags_t acctflag, struct user_struct **user,
943 int creat_flags, int page_size_log)
944 {
945 struct file *file = ERR_PTR(-ENOMEM);
946 struct inode *inode;
947 struct path path;
948 struct super_block *sb;
949 struct qstr quick_string;
950 int hstate_idx;
951
952 hstate_idx = get_hstate_idx(page_size_log);
953 if (hstate_idx < 0)
954 return ERR_PTR(-ENODEV);
955
956 *user = NULL;
957 if (!hugetlbfs_vfsmount[hstate_idx])
958 return ERR_PTR(-ENOENT);
959
960 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
961 *user = current_user();
962 if (user_shm_lock(size, *user)) {
963 task_lock(current);
964 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
965 current->comm, current->pid);
966 task_unlock(current);
967 } else {
968 *user = NULL;
969 return ERR_PTR(-EPERM);
970 }
971 }
972
973 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
974 quick_string.name = name;
975 quick_string.len = strlen(quick_string.name);
976 quick_string.hash = 0;
977 path.dentry = d_alloc_pseudo(sb, &quick_string);
978 if (!path.dentry)
979 goto out_shm_unlock;
980
981 d_set_d_op(path.dentry, &anon_ops);
982 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
983 file = ERR_PTR(-ENOSPC);
984 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
985 if (!inode)
986 goto out_dentry;
987
988 file = ERR_PTR(-ENOMEM);
989 if (hugetlb_reserve_pages(inode, 0,
990 size >> huge_page_shift(hstate_inode(inode)), NULL,
991 acctflag))
992 goto out_inode;
993
994 d_instantiate(path.dentry, inode);
995 inode->i_size = size;
996 clear_nlink(inode);
997
998 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
999 &hugetlbfs_file_operations);
1000 if (IS_ERR(file))
1001 goto out_dentry; /* inode is already attached */
1002
1003 return file;
1004
1005 out_inode:
1006 iput(inode);
1007 out_dentry:
1008 path_put(&path);
1009 out_shm_unlock:
1010 if (*user) {
1011 user_shm_unlock(size, *user);
1012 *user = NULL;
1013 }
1014 return file;
1015 }
1016
1017 static int __init init_hugetlbfs_fs(void)
1018 {
1019 struct hstate *h;
1020 int error;
1021 int i;
1022
1023 if (!hugepages_supported()) {
1024 pr_info("disabling because there are no supported hugepage sizes\n");
1025 return -ENOTSUPP;
1026 }
1027
1028 error = -ENOMEM;
1029 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1030 sizeof(struct hugetlbfs_inode_info),
1031 0, 0, init_once);
1032 if (hugetlbfs_inode_cachep == NULL)
1033 goto out2;
1034
1035 error = register_filesystem(&hugetlbfs_fs_type);
1036 if (error)
1037 goto out;
1038
1039 i = 0;
1040 for_each_hstate(h) {
1041 char buf[50];
1042 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1043
1044 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1045 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1046 buf);
1047
1048 if (IS_ERR(hugetlbfs_vfsmount[i])) {
1049 pr_err("Cannot mount internal hugetlbfs for "
1050 "page size %uK", ps_kb);
1051 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1052 hugetlbfs_vfsmount[i] = NULL;
1053 }
1054 i++;
1055 }
1056 /* Non default hstates are optional */
1057 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1058 return 0;
1059
1060 out:
1061 kmem_cache_destroy(hugetlbfs_inode_cachep);
1062 out2:
1063 return error;
1064 }
1065
1066 static void __exit exit_hugetlbfs_fs(void)
1067 {
1068 struct hstate *h;
1069 int i;
1070
1071
1072 /*
1073 * Make sure all delayed rcu free inodes are flushed before we
1074 * destroy cache.
1075 */
1076 rcu_barrier();
1077 kmem_cache_destroy(hugetlbfs_inode_cachep);
1078 i = 0;
1079 for_each_hstate(h)
1080 kern_unmount(hugetlbfs_vfsmount[i++]);
1081 unregister_filesystem(&hugetlbfs_fs_type);
1082 }
1083
1084 module_init(init_hugetlbfs_fs)
1085 module_exit(exit_hugetlbfs_fs)
1086
1087 MODULE_LICENSE("GPL");
This page took 0.052048 seconds and 5 git commands to generate.