4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
31 #include <linux/nsproxy.h>
32 #include <linux/capability.h>
33 #include <linux/cpu.h>
34 #include <linux/cgroup.h>
35 #include <linux/security.h>
36 #include <linux/hugetlb.h>
37 #include <linux/swap.h>
38 #include <linux/syscalls.h>
39 #include <linux/jiffies.h>
40 #include <linux/tracehook.h>
41 #include <linux/futex.h>
42 #include <linux/compat.h>
43 #include <linux/kthread.h>
44 #include <linux/task_io_accounting_ops.h>
45 #include <linux/rcupdate.h>
46 #include <linux/ptrace.h>
47 #include <linux/mount.h>
48 #include <linux/audit.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/profile.h>
52 #include <linux/rmap.h>
53 #include <linux/ksm.h>
54 #include <linux/acct.h>
55 #include <linux/tsacct_kern.h>
56 #include <linux/cn_proc.h>
57 #include <linux/freezer.h>
58 #include <linux/delayacct.h>
59 #include <linux/taskstats_kern.h>
60 #include <linux/random.h>
61 #include <linux/tty.h>
62 #include <linux/blkdev.h>
63 #include <linux/fs_struct.h>
64 #include <linux/magic.h>
65 #include <linux/perf_event.h>
66 #include <linux/posix-timers.h>
67 #include <linux/user-return-notifier.h>
68 #include <linux/oom.h>
69 #include <linux/khugepaged.h>
71 #include <asm/pgtable.h>
72 #include <asm/pgalloc.h>
73 #include <asm/uaccess.h>
74 #include <asm/mmu_context.h>
75 #include <asm/cacheflush.h>
76 #include <asm/tlbflush.h>
78 #include <trace/events/sched.h>
81 * Protected counters by write_lock_irq(&tasklist_lock)
83 unsigned long total_forks
; /* Handle normal Linux uptimes. */
84 int nr_threads
; /* The idle threads do not count.. */
86 int max_threads
; /* tunable limit on nr_threads */
88 DEFINE_PER_CPU(unsigned long, process_counts
) = 0;
90 __cacheline_aligned
DEFINE_RWLOCK(tasklist_lock
); /* outer */
92 #ifdef CONFIG_PROVE_RCU
93 int lockdep_tasklist_lock_is_held(void)
95 return lockdep_is_held(&tasklist_lock
);
97 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held
);
98 #endif /* #ifdef CONFIG_PROVE_RCU */
100 int nr_processes(void)
105 for_each_possible_cpu(cpu
)
106 total
+= per_cpu(process_counts
, cpu
);
111 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
112 # define alloc_task_struct_node(node) \
113 kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
114 # define free_task_struct(tsk) \
115 kmem_cache_free(task_struct_cachep, (tsk))
116 static struct kmem_cache
*task_struct_cachep
;
119 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
120 static struct thread_info
*alloc_thread_info_node(struct task_struct
*tsk
,
123 #ifdef CONFIG_DEBUG_STACK_USAGE
124 gfp_t mask
= GFP_KERNEL
| __GFP_ZERO
;
126 gfp_t mask
= GFP_KERNEL
;
128 struct page
*page
= alloc_pages_node(node
, mask
, THREAD_SIZE_ORDER
);
130 return page
? page_address(page
) : NULL
;
133 static inline void free_thread_info(struct thread_info
*ti
)
135 free_pages((unsigned long)ti
, THREAD_SIZE_ORDER
);
139 /* SLAB cache for signal_struct structures (tsk->signal) */
140 static struct kmem_cache
*signal_cachep
;
142 /* SLAB cache for sighand_struct structures (tsk->sighand) */
143 struct kmem_cache
*sighand_cachep
;
145 /* SLAB cache for files_struct structures (tsk->files) */
146 struct kmem_cache
*files_cachep
;
148 /* SLAB cache for fs_struct structures (tsk->fs) */
149 struct kmem_cache
*fs_cachep
;
151 /* SLAB cache for vm_area_struct structures */
152 struct kmem_cache
*vm_area_cachep
;
154 /* SLAB cache for mm_struct structures (tsk->mm) */
155 static struct kmem_cache
*mm_cachep
;
157 static void account_kernel_stack(struct thread_info
*ti
, int account
)
159 struct zone
*zone
= page_zone(virt_to_page(ti
));
161 mod_zone_page_state(zone
, NR_KERNEL_STACK
, account
);
164 void free_task(struct task_struct
*tsk
)
166 prop_local_destroy_single(&tsk
->dirties
);
167 account_kernel_stack(tsk
->stack
, -1);
168 free_thread_info(tsk
->stack
);
169 rt_mutex_debug_task_free(tsk
);
170 ftrace_graph_exit_task(tsk
);
171 free_task_struct(tsk
);
173 EXPORT_SYMBOL(free_task
);
175 static inline void free_signal_struct(struct signal_struct
*sig
)
177 taskstats_tgid_free(sig
);
178 sched_autogroup_exit(sig
);
179 kmem_cache_free(signal_cachep
, sig
);
182 static inline void put_signal_struct(struct signal_struct
*sig
)
184 if (atomic_dec_and_test(&sig
->sigcnt
))
185 free_signal_struct(sig
);
188 void __put_task_struct(struct task_struct
*tsk
)
190 WARN_ON(!tsk
->exit_state
);
191 WARN_ON(atomic_read(&tsk
->usage
));
192 WARN_ON(tsk
== current
);
195 delayacct_tsk_free(tsk
);
196 put_signal_struct(tsk
->signal
);
198 if (!profile_handoff_task(tsk
))
201 EXPORT_SYMBOL_GPL(__put_task_struct
);
204 * macro override instead of weak attribute alias, to workaround
205 * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
207 #ifndef arch_task_cache_init
208 #define arch_task_cache_init()
211 void __init
fork_init(unsigned long mempages
)
213 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
214 #ifndef ARCH_MIN_TASKALIGN
215 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
217 /* create a slab on which task_structs can be allocated */
219 kmem_cache_create("task_struct", sizeof(struct task_struct
),
220 ARCH_MIN_TASKALIGN
, SLAB_PANIC
| SLAB_NOTRACK
, NULL
);
223 /* do the arch specific task caches init */
224 arch_task_cache_init();
227 * The default maximum number of threads is set to a safe
228 * value: the thread structures can take up at most half
231 max_threads
= mempages
/ (8 * THREAD_SIZE
/ PAGE_SIZE
);
234 * we need to allow at least 20 threads to boot a system
239 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_cur
= max_threads
/2;
240 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_max
= max_threads
/2;
241 init_task
.signal
->rlim
[RLIMIT_SIGPENDING
] =
242 init_task
.signal
->rlim
[RLIMIT_NPROC
];
245 int __attribute__((weak
)) arch_dup_task_struct(struct task_struct
*dst
,
246 struct task_struct
*src
)
252 static struct task_struct
*dup_task_struct(struct task_struct
*orig
)
254 struct task_struct
*tsk
;
255 struct thread_info
*ti
;
256 unsigned long *stackend
;
257 int node
= tsk_fork_get_node(orig
);
260 prepare_to_copy(orig
);
262 tsk
= alloc_task_struct_node(node
);
266 ti
= alloc_thread_info_node(tsk
, node
);
268 free_task_struct(tsk
);
272 err
= arch_dup_task_struct(tsk
, orig
);
278 err
= prop_local_init_single(&tsk
->dirties
);
282 setup_thread_stack(tsk
, orig
);
283 clear_user_return_notifier(tsk
);
284 clear_tsk_need_resched(tsk
);
285 stackend
= end_of_stack(tsk
);
286 *stackend
= STACK_END_MAGIC
; /* for overflow detection */
288 #ifdef CONFIG_CC_STACKPROTECTOR
289 tsk
->stack_canary
= get_random_int();
292 /* One for us, one for whoever does the "release_task()" (usually parent) */
293 atomic_set(&tsk
->usage
,2);
294 atomic_set(&tsk
->fs_excl
, 0);
295 #ifdef CONFIG_BLK_DEV_IO_TRACE
298 tsk
->splice_pipe
= NULL
;
300 account_kernel_stack(ti
, 1);
305 free_thread_info(ti
);
306 free_task_struct(tsk
);
311 static int dup_mmap(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
313 struct vm_area_struct
*mpnt
, *tmp
, *prev
, **pprev
;
314 struct rb_node
**rb_link
, *rb_parent
;
316 unsigned long charge
;
317 struct mempolicy
*pol
;
319 down_write(&oldmm
->mmap_sem
);
320 flush_cache_dup_mm(oldmm
);
322 * Not linked in yet - no deadlock potential:
324 down_write_nested(&mm
->mmap_sem
, SINGLE_DEPTH_NESTING
);
328 mm
->mmap_cache
= NULL
;
329 mm
->free_area_cache
= oldmm
->mmap_base
;
330 mm
->cached_hole_size
= ~0UL;
332 cpumask_clear(mm_cpumask(mm
));
334 rb_link
= &mm
->mm_rb
.rb_node
;
337 retval
= ksm_fork(mm
, oldmm
);
340 retval
= khugepaged_fork(mm
, oldmm
);
345 for (mpnt
= oldmm
->mmap
; mpnt
; mpnt
= mpnt
->vm_next
) {
348 if (mpnt
->vm_flags
& VM_DONTCOPY
) {
349 long pages
= vma_pages(mpnt
);
350 mm
->total_vm
-= pages
;
351 vm_stat_account(mm
, mpnt
->vm_flags
, mpnt
->vm_file
,
356 if (mpnt
->vm_flags
& VM_ACCOUNT
) {
357 unsigned int len
= (mpnt
->vm_end
- mpnt
->vm_start
) >> PAGE_SHIFT
;
358 if (security_vm_enough_memory(len
))
362 tmp
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
366 INIT_LIST_HEAD(&tmp
->anon_vma_chain
);
367 pol
= mpol_dup(vma_policy(mpnt
));
368 retval
= PTR_ERR(pol
);
370 goto fail_nomem_policy
;
371 vma_set_policy(tmp
, pol
);
373 if (anon_vma_fork(tmp
, mpnt
))
374 goto fail_nomem_anon_vma_fork
;
375 tmp
->vm_flags
&= ~VM_LOCKED
;
376 tmp
->vm_next
= tmp
->vm_prev
= NULL
;
379 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
380 struct address_space
*mapping
= file
->f_mapping
;
383 if (tmp
->vm_flags
& VM_DENYWRITE
)
384 atomic_dec(&inode
->i_writecount
);
385 mutex_lock(&mapping
->i_mmap_mutex
);
386 if (tmp
->vm_flags
& VM_SHARED
)
387 mapping
->i_mmap_writable
++;
388 flush_dcache_mmap_lock(mapping
);
389 /* insert tmp into the share list, just after mpnt */
390 vma_prio_tree_add(tmp
, mpnt
);
391 flush_dcache_mmap_unlock(mapping
);
392 mutex_unlock(&mapping
->i_mmap_mutex
);
396 * Clear hugetlb-related page reserves for children. This only
397 * affects MAP_PRIVATE mappings. Faults generated by the child
398 * are not guaranteed to succeed, even if read-only
400 if (is_vm_hugetlb_page(tmp
))
401 reset_vma_resv_huge_pages(tmp
);
404 * Link in the new vma and copy the page table entries.
407 pprev
= &tmp
->vm_next
;
411 __vma_link_rb(mm
, tmp
, rb_link
, rb_parent
);
412 rb_link
= &tmp
->vm_rb
.rb_right
;
413 rb_parent
= &tmp
->vm_rb
;
416 retval
= copy_page_range(mm
, oldmm
, mpnt
);
418 if (tmp
->vm_ops
&& tmp
->vm_ops
->open
)
419 tmp
->vm_ops
->open(tmp
);
424 /* a new mm has just been created */
425 arch_dup_mmap(oldmm
, mm
);
428 up_write(&mm
->mmap_sem
);
430 up_write(&oldmm
->mmap_sem
);
432 fail_nomem_anon_vma_fork
:
435 kmem_cache_free(vm_area_cachep
, tmp
);
438 vm_unacct_memory(charge
);
442 static inline int mm_alloc_pgd(struct mm_struct
* mm
)
444 mm
->pgd
= pgd_alloc(mm
);
445 if (unlikely(!mm
->pgd
))
450 static inline void mm_free_pgd(struct mm_struct
* mm
)
452 pgd_free(mm
, mm
->pgd
);
455 #define dup_mmap(mm, oldmm) (0)
456 #define mm_alloc_pgd(mm) (0)
457 #define mm_free_pgd(mm)
458 #endif /* CONFIG_MMU */
460 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(mmlist_lock
);
462 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
463 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
465 static unsigned long default_dump_filter
= MMF_DUMP_FILTER_DEFAULT
;
467 static int __init
coredump_filter_setup(char *s
)
469 default_dump_filter
=
470 (simple_strtoul(s
, NULL
, 0) << MMF_DUMP_FILTER_SHIFT
) &
471 MMF_DUMP_FILTER_MASK
;
475 __setup("coredump_filter=", coredump_filter_setup
);
477 #include <linux/init_task.h>
479 static void mm_init_aio(struct mm_struct
*mm
)
482 spin_lock_init(&mm
->ioctx_lock
);
483 INIT_HLIST_HEAD(&mm
->ioctx_list
);
487 int mm_init_cpumask(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
489 #ifdef CONFIG_CPUMASK_OFFSTACK
490 if (!alloc_cpumask_var(&mm
->cpu_vm_mask_var
, GFP_KERNEL
))
494 cpumask_copy(mm_cpumask(mm
), mm_cpumask(oldmm
));
496 memset(mm_cpumask(mm
), 0, cpumask_size());
501 static struct mm_struct
* mm_init(struct mm_struct
* mm
, struct task_struct
*p
)
503 atomic_set(&mm
->mm_users
, 1);
504 atomic_set(&mm
->mm_count
, 1);
505 init_rwsem(&mm
->mmap_sem
);
506 INIT_LIST_HEAD(&mm
->mmlist
);
507 mm
->flags
= (current
->mm
) ?
508 (current
->mm
->flags
& MMF_INIT_MASK
) : default_dump_filter
;
509 mm
->core_state
= NULL
;
511 memset(&mm
->rss_stat
, 0, sizeof(mm
->rss_stat
));
512 spin_lock_init(&mm
->page_table_lock
);
513 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
514 mm
->cached_hole_size
= ~0UL;
516 mm_init_owner(mm
, p
);
517 atomic_set(&mm
->oom_disable_count
, 0);
519 if (likely(!mm_alloc_pgd(mm
))) {
521 mmu_notifier_mm_init(mm
);
530 * Allocate and initialize an mm_struct.
532 struct mm_struct
* mm_alloc(void)
534 struct mm_struct
* mm
;
540 memset(mm
, 0, sizeof(*mm
));
541 mm
= mm_init(mm
, current
);
545 if (mm_init_cpumask(mm
, NULL
)) {
555 * Called when the last reference to the mm
556 * is dropped: either by a lazy thread or by
557 * mmput. Free the page directory and the mm.
559 void __mmdrop(struct mm_struct
*mm
)
561 BUG_ON(mm
== &init_mm
);
562 free_cpumask_var(mm
->cpu_vm_mask_var
);
565 mmu_notifier_mm_destroy(mm
);
566 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
567 VM_BUG_ON(mm
->pmd_huge_pte
);
571 EXPORT_SYMBOL_GPL(__mmdrop
);
574 * Decrement the use count and release all resources for an mm.
576 void mmput(struct mm_struct
*mm
)
580 if (atomic_dec_and_test(&mm
->mm_users
)) {
583 khugepaged_exit(mm
); /* must run before exit_mmap */
585 set_mm_exe_file(mm
, NULL
);
586 if (!list_empty(&mm
->mmlist
)) {
587 spin_lock(&mmlist_lock
);
588 list_del(&mm
->mmlist
);
589 spin_unlock(&mmlist_lock
);
593 module_put(mm
->binfmt
->module
);
597 EXPORT_SYMBOL_GPL(mmput
);
600 * We added or removed a vma mapping the executable. The vmas are only mapped
601 * during exec and are not mapped with the mmap system call.
602 * Callers must hold down_write() on the mm's mmap_sem for these
604 void added_exe_file_vma(struct mm_struct
*mm
)
606 mm
->num_exe_file_vmas
++;
609 void removed_exe_file_vma(struct mm_struct
*mm
)
611 mm
->num_exe_file_vmas
--;
612 if ((mm
->num_exe_file_vmas
== 0) && mm
->exe_file
){
619 void set_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
622 get_file(new_exe_file
);
625 mm
->exe_file
= new_exe_file
;
626 mm
->num_exe_file_vmas
= 0;
629 struct file
*get_mm_exe_file(struct mm_struct
*mm
)
631 struct file
*exe_file
;
633 /* We need mmap_sem to protect against races with removal of
634 * VM_EXECUTABLE vmas */
635 down_read(&mm
->mmap_sem
);
636 exe_file
= mm
->exe_file
;
639 up_read(&mm
->mmap_sem
);
643 static void dup_mm_exe_file(struct mm_struct
*oldmm
, struct mm_struct
*newmm
)
645 /* It's safe to write the exe_file pointer without exe_file_lock because
646 * this is called during fork when the task is not yet in /proc */
647 newmm
->exe_file
= get_mm_exe_file(oldmm
);
651 * get_task_mm - acquire a reference to the task's mm
653 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
654 * this kernel workthread has transiently adopted a user mm with use_mm,
655 * to do its AIO) is not set and if so returns a reference to it, after
656 * bumping up the use count. User must release the mm via mmput()
657 * after use. Typically used by /proc and ptrace.
659 struct mm_struct
*get_task_mm(struct task_struct
*task
)
661 struct mm_struct
*mm
;
666 if (task
->flags
& PF_KTHREAD
)
669 atomic_inc(&mm
->mm_users
);
674 EXPORT_SYMBOL_GPL(get_task_mm
);
676 /* Please note the differences between mmput and mm_release.
677 * mmput is called whenever we stop holding onto a mm_struct,
678 * error success whatever.
680 * mm_release is called after a mm_struct has been removed
681 * from the current process.
683 * This difference is important for error handling, when we
684 * only half set up a mm_struct for a new process and need to restore
685 * the old one. Because we mmput the new mm_struct before
686 * restoring the old one. . .
687 * Eric Biederman 10 January 1998
689 void mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
691 struct completion
*vfork_done
= tsk
->vfork_done
;
693 /* Get rid of any futexes when releasing the mm */
695 if (unlikely(tsk
->robust_list
)) {
696 exit_robust_list(tsk
);
697 tsk
->robust_list
= NULL
;
700 if (unlikely(tsk
->compat_robust_list
)) {
701 compat_exit_robust_list(tsk
);
702 tsk
->compat_robust_list
= NULL
;
705 if (unlikely(!list_empty(&tsk
->pi_state_list
)))
706 exit_pi_state_list(tsk
);
709 /* Get rid of any cached register state */
710 deactivate_mm(tsk
, mm
);
712 /* notify parent sleeping on vfork() */
714 tsk
->vfork_done
= NULL
;
715 complete(vfork_done
);
719 * If we're exiting normally, clear a user-space tid field if
720 * requested. We leave this alone when dying by signal, to leave
721 * the value intact in a core dump, and to save the unnecessary
722 * trouble otherwise. Userland only wants this done for a sys_exit.
724 if (tsk
->clear_child_tid
) {
725 if (!(tsk
->flags
& PF_SIGNALED
) &&
726 atomic_read(&mm
->mm_users
) > 1) {
728 * We don't check the error code - if userspace has
729 * not set up a proper pointer then tough luck.
731 put_user(0, tsk
->clear_child_tid
);
732 sys_futex(tsk
->clear_child_tid
, FUTEX_WAKE
,
735 tsk
->clear_child_tid
= NULL
;
740 * Allocate a new mm structure and copy contents from the
741 * mm structure of the passed in task structure.
743 struct mm_struct
*dup_mm(struct task_struct
*tsk
)
745 struct mm_struct
*mm
, *oldmm
= current
->mm
;
755 memcpy(mm
, oldmm
, sizeof(*mm
));
757 /* Initializing for Swap token stuff */
758 mm
->token_priority
= 0;
759 mm
->last_interval
= 0;
761 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
762 mm
->pmd_huge_pte
= NULL
;
765 if (!mm_init(mm
, tsk
))
768 if (mm_init_cpumask(mm
, oldmm
))
771 if (init_new_context(tsk
, mm
))
774 dup_mm_exe_file(oldmm
, mm
);
776 err
= dup_mmap(mm
, oldmm
);
780 mm
->hiwater_rss
= get_mm_rss(mm
);
781 mm
->hiwater_vm
= mm
->total_vm
;
783 if (mm
->binfmt
&& !try_module_get(mm
->binfmt
->module
))
789 /* don't put binfmt in mmput, we haven't got module yet */
797 free_cpumask_var(mm
->cpu_vm_mask_var
);
801 * If init_new_context() failed, we cannot use mmput() to free the mm
802 * because it calls destroy_context()
809 static int copy_mm(unsigned long clone_flags
, struct task_struct
* tsk
)
811 struct mm_struct
* mm
, *oldmm
;
814 tsk
->min_flt
= tsk
->maj_flt
= 0;
815 tsk
->nvcsw
= tsk
->nivcsw
= 0;
816 #ifdef CONFIG_DETECT_HUNG_TASK
817 tsk
->last_switch_count
= tsk
->nvcsw
+ tsk
->nivcsw
;
821 tsk
->active_mm
= NULL
;
824 * Are we cloning a kernel thread?
826 * We need to steal a active VM for that..
832 if (clone_flags
& CLONE_VM
) {
833 atomic_inc(&oldmm
->mm_users
);
844 /* Initializing for Swap token stuff */
845 mm
->token_priority
= 0;
846 mm
->last_interval
= 0;
847 if (tsk
->signal
->oom_score_adj
== OOM_SCORE_ADJ_MIN
)
848 atomic_inc(&mm
->oom_disable_count
);
858 static int copy_fs(unsigned long clone_flags
, struct task_struct
*tsk
)
860 struct fs_struct
*fs
= current
->fs
;
861 if (clone_flags
& CLONE_FS
) {
862 /* tsk->fs is already what we want */
863 spin_lock(&fs
->lock
);
865 spin_unlock(&fs
->lock
);
869 spin_unlock(&fs
->lock
);
872 tsk
->fs
= copy_fs_struct(fs
);
878 static int copy_files(unsigned long clone_flags
, struct task_struct
* tsk
)
880 struct files_struct
*oldf
, *newf
;
884 * A background process may not have any files ...
886 oldf
= current
->files
;
890 if (clone_flags
& CLONE_FILES
) {
891 atomic_inc(&oldf
->count
);
895 newf
= dup_fd(oldf
, &error
);
905 static int copy_io(unsigned long clone_flags
, struct task_struct
*tsk
)
908 struct io_context
*ioc
= current
->io_context
;
913 * Share io context with parent, if CLONE_IO is set
915 if (clone_flags
& CLONE_IO
) {
916 tsk
->io_context
= ioc_task_link(ioc
);
917 if (unlikely(!tsk
->io_context
))
919 } else if (ioprio_valid(ioc
->ioprio
)) {
920 tsk
->io_context
= alloc_io_context(GFP_KERNEL
, -1);
921 if (unlikely(!tsk
->io_context
))
924 tsk
->io_context
->ioprio
= ioc
->ioprio
;
930 static int copy_sighand(unsigned long clone_flags
, struct task_struct
*tsk
)
932 struct sighand_struct
*sig
;
934 if (clone_flags
& CLONE_SIGHAND
) {
935 atomic_inc(¤t
->sighand
->count
);
938 sig
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
939 rcu_assign_pointer(tsk
->sighand
, sig
);
942 atomic_set(&sig
->count
, 1);
943 memcpy(sig
->action
, current
->sighand
->action
, sizeof(sig
->action
));
947 void __cleanup_sighand(struct sighand_struct
*sighand
)
949 if (atomic_dec_and_test(&sighand
->count
))
950 kmem_cache_free(sighand_cachep
, sighand
);
955 * Initialize POSIX timer handling for a thread group.
957 static void posix_cpu_timers_init_group(struct signal_struct
*sig
)
959 unsigned long cpu_limit
;
961 /* Thread group counters. */
962 thread_group_cputime_init(sig
);
964 cpu_limit
= ACCESS_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
965 if (cpu_limit
!= RLIM_INFINITY
) {
966 sig
->cputime_expires
.prof_exp
= secs_to_cputime(cpu_limit
);
967 sig
->cputimer
.running
= 1;
970 /* The timer lists. */
971 INIT_LIST_HEAD(&sig
->cpu_timers
[0]);
972 INIT_LIST_HEAD(&sig
->cpu_timers
[1]);
973 INIT_LIST_HEAD(&sig
->cpu_timers
[2]);
976 static int copy_signal(unsigned long clone_flags
, struct task_struct
*tsk
)
978 struct signal_struct
*sig
;
980 if (clone_flags
& CLONE_THREAD
)
983 sig
= kmem_cache_zalloc(signal_cachep
, GFP_KERNEL
);
989 atomic_set(&sig
->live
, 1);
990 atomic_set(&sig
->sigcnt
, 1);
991 init_waitqueue_head(&sig
->wait_chldexit
);
992 if (clone_flags
& CLONE_NEWPID
)
993 sig
->flags
|= SIGNAL_UNKILLABLE
;
994 sig
->curr_target
= tsk
;
995 init_sigpending(&sig
->shared_pending
);
996 INIT_LIST_HEAD(&sig
->posix_timers
);
998 hrtimer_init(&sig
->real_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
999 sig
->real_timer
.function
= it_real_fn
;
1001 task_lock(current
->group_leader
);
1002 memcpy(sig
->rlim
, current
->signal
->rlim
, sizeof sig
->rlim
);
1003 task_unlock(current
->group_leader
);
1005 posix_cpu_timers_init_group(sig
);
1007 tty_audit_fork(sig
);
1008 sched_autogroup_fork(sig
);
1010 #ifdef CONFIG_CGROUPS
1011 init_rwsem(&sig
->threadgroup_fork_lock
);
1014 sig
->oom_adj
= current
->signal
->oom_adj
;
1015 sig
->oom_score_adj
= current
->signal
->oom_score_adj
;
1016 sig
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
1018 mutex_init(&sig
->cred_guard_mutex
);
1023 static void copy_flags(unsigned long clone_flags
, struct task_struct
*p
)
1025 unsigned long new_flags
= p
->flags
;
1027 new_flags
&= ~(PF_SUPERPRIV
| PF_WQ_WORKER
);
1028 new_flags
|= PF_FORKNOEXEC
;
1029 new_flags
|= PF_STARTING
;
1030 p
->flags
= new_flags
;
1031 clear_freeze_flag(p
);
1034 SYSCALL_DEFINE1(set_tid_address
, int __user
*, tidptr
)
1036 current
->clear_child_tid
= tidptr
;
1038 return task_pid_vnr(current
);
1041 static void rt_mutex_init_task(struct task_struct
*p
)
1043 raw_spin_lock_init(&p
->pi_lock
);
1044 #ifdef CONFIG_RT_MUTEXES
1045 plist_head_init_raw(&p
->pi_waiters
, &p
->pi_lock
);
1046 p
->pi_blocked_on
= NULL
;
1050 #ifdef CONFIG_MM_OWNER
1051 void mm_init_owner(struct mm_struct
*mm
, struct task_struct
*p
)
1055 #endif /* CONFIG_MM_OWNER */
1058 * Initialize POSIX timer handling for a single task.
1060 static void posix_cpu_timers_init(struct task_struct
*tsk
)
1062 tsk
->cputime_expires
.prof_exp
= cputime_zero
;
1063 tsk
->cputime_expires
.virt_exp
= cputime_zero
;
1064 tsk
->cputime_expires
.sched_exp
= 0;
1065 INIT_LIST_HEAD(&tsk
->cpu_timers
[0]);
1066 INIT_LIST_HEAD(&tsk
->cpu_timers
[1]);
1067 INIT_LIST_HEAD(&tsk
->cpu_timers
[2]);
1071 * This creates a new process as a copy of the old one,
1072 * but does not actually start it yet.
1074 * It copies the registers, and all the appropriate
1075 * parts of the process environment (as per the clone
1076 * flags). The actual kick-off is left to the caller.
1078 static struct task_struct
*copy_process(unsigned long clone_flags
,
1079 unsigned long stack_start
,
1080 struct pt_regs
*regs
,
1081 unsigned long stack_size
,
1082 int __user
*child_tidptr
,
1087 struct task_struct
*p
;
1088 int cgroup_callbacks_done
= 0;
1090 if ((clone_flags
& (CLONE_NEWNS
|CLONE_FS
)) == (CLONE_NEWNS
|CLONE_FS
))
1091 return ERR_PTR(-EINVAL
);
1094 * Thread groups must share signals as well, and detached threads
1095 * can only be started up within the thread group.
1097 if ((clone_flags
& CLONE_THREAD
) && !(clone_flags
& CLONE_SIGHAND
))
1098 return ERR_PTR(-EINVAL
);
1101 * Shared signal handlers imply shared VM. By way of the above,
1102 * thread groups also imply shared VM. Blocking this case allows
1103 * for various simplifications in other code.
1105 if ((clone_flags
& CLONE_SIGHAND
) && !(clone_flags
& CLONE_VM
))
1106 return ERR_PTR(-EINVAL
);
1109 * Siblings of global init remain as zombies on exit since they are
1110 * not reaped by their parent (swapper). To solve this and to avoid
1111 * multi-rooted process trees, prevent global and container-inits
1112 * from creating siblings.
1114 if ((clone_flags
& CLONE_PARENT
) &&
1115 current
->signal
->flags
& SIGNAL_UNKILLABLE
)
1116 return ERR_PTR(-EINVAL
);
1118 retval
= security_task_create(clone_flags
);
1123 p
= dup_task_struct(current
);
1127 ftrace_graph_init_task(p
);
1129 rt_mutex_init_task(p
);
1131 #ifdef CONFIG_PROVE_LOCKING
1132 DEBUG_LOCKS_WARN_ON(!p
->hardirqs_enabled
);
1133 DEBUG_LOCKS_WARN_ON(!p
->softirqs_enabled
);
1136 if (atomic_read(&p
->real_cred
->user
->processes
) >=
1137 task_rlimit(p
, RLIMIT_NPROC
)) {
1138 if (!capable(CAP_SYS_ADMIN
) && !capable(CAP_SYS_RESOURCE
) &&
1139 p
->real_cred
->user
!= INIT_USER
)
1143 retval
= copy_creds(p
, clone_flags
);
1148 * If multiple threads are within copy_process(), then this check
1149 * triggers too late. This doesn't hurt, the check is only there
1150 * to stop root fork bombs.
1153 if (nr_threads
>= max_threads
)
1154 goto bad_fork_cleanup_count
;
1156 if (!try_module_get(task_thread_info(p
)->exec_domain
->module
))
1157 goto bad_fork_cleanup_count
;
1160 delayacct_tsk_init(p
); /* Must remain after dup_task_struct() */
1161 copy_flags(clone_flags
, p
);
1162 INIT_LIST_HEAD(&p
->children
);
1163 INIT_LIST_HEAD(&p
->sibling
);
1164 rcu_copy_process(p
);
1165 p
->vfork_done
= NULL
;
1166 spin_lock_init(&p
->alloc_lock
);
1168 init_sigpending(&p
->pending
);
1170 p
->utime
= cputime_zero
;
1171 p
->stime
= cputime_zero
;
1172 p
->gtime
= cputime_zero
;
1173 p
->utimescaled
= cputime_zero
;
1174 p
->stimescaled
= cputime_zero
;
1175 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1176 p
->prev_utime
= cputime_zero
;
1177 p
->prev_stime
= cputime_zero
;
1179 #if defined(SPLIT_RSS_COUNTING)
1180 memset(&p
->rss_stat
, 0, sizeof(p
->rss_stat
));
1183 p
->default_timer_slack_ns
= current
->timer_slack_ns
;
1185 task_io_accounting_init(&p
->ioac
);
1186 acct_clear_integrals(p
);
1188 posix_cpu_timers_init(p
);
1190 do_posix_clock_monotonic_gettime(&p
->start_time
);
1191 p
->real_start_time
= p
->start_time
;
1192 monotonic_to_bootbased(&p
->real_start_time
);
1193 p
->io_context
= NULL
;
1194 p
->audit_context
= NULL
;
1195 if (clone_flags
& CLONE_THREAD
)
1196 threadgroup_fork_read_lock(current
);
1199 p
->mempolicy
= mpol_dup(p
->mempolicy
);
1200 if (IS_ERR(p
->mempolicy
)) {
1201 retval
= PTR_ERR(p
->mempolicy
);
1202 p
->mempolicy
= NULL
;
1203 goto bad_fork_cleanup_cgroup
;
1205 mpol_fix_fork_child_flag(p
);
1207 #ifdef CONFIG_TRACE_IRQFLAGS
1209 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1210 p
->hardirqs_enabled
= 1;
1212 p
->hardirqs_enabled
= 0;
1214 p
->hardirq_enable_ip
= 0;
1215 p
->hardirq_enable_event
= 0;
1216 p
->hardirq_disable_ip
= _THIS_IP_
;
1217 p
->hardirq_disable_event
= 0;
1218 p
->softirqs_enabled
= 1;
1219 p
->softirq_enable_ip
= _THIS_IP_
;
1220 p
->softirq_enable_event
= 0;
1221 p
->softirq_disable_ip
= 0;
1222 p
->softirq_disable_event
= 0;
1223 p
->hardirq_context
= 0;
1224 p
->softirq_context
= 0;
1226 #ifdef CONFIG_LOCKDEP
1227 p
->lockdep_depth
= 0; /* no locks held yet */
1228 p
->curr_chain_key
= 0;
1229 p
->lockdep_recursion
= 0;
1232 #ifdef CONFIG_DEBUG_MUTEXES
1233 p
->blocked_on
= NULL
; /* not blocked yet */
1235 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
1236 p
->memcg_batch
.do_batch
= 0;
1237 p
->memcg_batch
.memcg
= NULL
;
1240 /* Perform scheduler related setup. Assign this task to a CPU. */
1243 retval
= perf_event_init_task(p
);
1245 goto bad_fork_cleanup_policy
;
1247 if ((retval
= audit_alloc(p
)))
1248 goto bad_fork_cleanup_policy
;
1249 /* copy all the process information */
1250 if ((retval
= copy_semundo(clone_flags
, p
)))
1251 goto bad_fork_cleanup_audit
;
1252 if ((retval
= copy_files(clone_flags
, p
)))
1253 goto bad_fork_cleanup_semundo
;
1254 if ((retval
= copy_fs(clone_flags
, p
)))
1255 goto bad_fork_cleanup_files
;
1256 if ((retval
= copy_sighand(clone_flags
, p
)))
1257 goto bad_fork_cleanup_fs
;
1258 if ((retval
= copy_signal(clone_flags
, p
)))
1259 goto bad_fork_cleanup_sighand
;
1260 if ((retval
= copy_mm(clone_flags
, p
)))
1261 goto bad_fork_cleanup_signal
;
1262 if ((retval
= copy_namespaces(clone_flags
, p
)))
1263 goto bad_fork_cleanup_mm
;
1264 if ((retval
= copy_io(clone_flags
, p
)))
1265 goto bad_fork_cleanup_namespaces
;
1266 retval
= copy_thread(clone_flags
, stack_start
, stack_size
, p
, regs
);
1268 goto bad_fork_cleanup_io
;
1270 if (pid
!= &init_struct_pid
) {
1272 pid
= alloc_pid(p
->nsproxy
->pid_ns
);
1274 goto bad_fork_cleanup_io
;
1277 p
->pid
= pid_nr(pid
);
1279 if (clone_flags
& CLONE_THREAD
)
1280 p
->tgid
= current
->tgid
;
1282 p
->set_child_tid
= (clone_flags
& CLONE_CHILD_SETTID
) ? child_tidptr
: NULL
;
1284 * Clear TID on mm_release()?
1286 p
->clear_child_tid
= (clone_flags
& CLONE_CHILD_CLEARTID
) ? child_tidptr
: NULL
;
1291 p
->robust_list
= NULL
;
1292 #ifdef CONFIG_COMPAT
1293 p
->compat_robust_list
= NULL
;
1295 INIT_LIST_HEAD(&p
->pi_state_list
);
1296 p
->pi_state_cache
= NULL
;
1299 * sigaltstack should be cleared when sharing the same VM
1301 if ((clone_flags
& (CLONE_VM
|CLONE_VFORK
)) == CLONE_VM
)
1302 p
->sas_ss_sp
= p
->sas_ss_size
= 0;
1305 * Syscall tracing and stepping should be turned off in the
1306 * child regardless of CLONE_PTRACE.
1308 user_disable_single_step(p
);
1309 clear_tsk_thread_flag(p
, TIF_SYSCALL_TRACE
);
1310 #ifdef TIF_SYSCALL_EMU
1311 clear_tsk_thread_flag(p
, TIF_SYSCALL_EMU
);
1313 clear_all_latency_tracing(p
);
1315 /* ok, now we should be set up.. */
1316 p
->exit_signal
= (clone_flags
& CLONE_THREAD
) ? -1 : (clone_flags
& CSIGNAL
);
1317 p
->pdeath_signal
= 0;
1321 * Ok, make it visible to the rest of the system.
1322 * We dont wake it up yet.
1324 p
->group_leader
= p
;
1325 INIT_LIST_HEAD(&p
->thread_group
);
1327 /* Now that the task is set up, run cgroup callbacks if
1328 * necessary. We need to run them before the task is visible
1329 * on the tasklist. */
1330 cgroup_fork_callbacks(p
);
1331 cgroup_callbacks_done
= 1;
1333 /* Need tasklist lock for parent etc handling! */
1334 write_lock_irq(&tasklist_lock
);
1336 /* CLONE_PARENT re-uses the old parent */
1337 if (clone_flags
& (CLONE_PARENT
|CLONE_THREAD
)) {
1338 p
->real_parent
= current
->real_parent
;
1339 p
->parent_exec_id
= current
->parent_exec_id
;
1341 p
->real_parent
= current
;
1342 p
->parent_exec_id
= current
->self_exec_id
;
1345 spin_lock(¤t
->sighand
->siglock
);
1348 * Process group and session signals need to be delivered to just the
1349 * parent before the fork or both the parent and the child after the
1350 * fork. Restart if a signal comes in before we add the new process to
1351 * it's process group.
1352 * A fatal signal pending means that current will exit, so the new
1353 * thread can't slip out of an OOM kill (or normal SIGKILL).
1355 recalc_sigpending();
1356 if (signal_pending(current
)) {
1357 spin_unlock(¤t
->sighand
->siglock
);
1358 write_unlock_irq(&tasklist_lock
);
1359 retval
= -ERESTARTNOINTR
;
1360 goto bad_fork_free_pid
;
1363 if (clone_flags
& CLONE_THREAD
) {
1364 current
->signal
->nr_threads
++;
1365 atomic_inc(¤t
->signal
->live
);
1366 atomic_inc(¤t
->signal
->sigcnt
);
1367 p
->group_leader
= current
->group_leader
;
1368 list_add_tail_rcu(&p
->thread_group
, &p
->group_leader
->thread_group
);
1371 if (likely(p
->pid
)) {
1372 tracehook_finish_clone(p
, clone_flags
, trace
);
1374 if (thread_group_leader(p
)) {
1375 if (is_child_reaper(pid
))
1376 p
->nsproxy
->pid_ns
->child_reaper
= p
;
1378 p
->signal
->leader_pid
= pid
;
1379 p
->signal
->tty
= tty_kref_get(current
->signal
->tty
);
1380 attach_pid(p
, PIDTYPE_PGID
, task_pgrp(current
));
1381 attach_pid(p
, PIDTYPE_SID
, task_session(current
));
1382 list_add_tail(&p
->sibling
, &p
->real_parent
->children
);
1383 list_add_tail_rcu(&p
->tasks
, &init_task
.tasks
);
1384 __this_cpu_inc(process_counts
);
1386 attach_pid(p
, PIDTYPE_PID
, pid
);
1391 spin_unlock(¤t
->sighand
->siglock
);
1392 write_unlock_irq(&tasklist_lock
);
1393 proc_fork_connector(p
);
1394 cgroup_post_fork(p
);
1395 if (clone_flags
& CLONE_THREAD
)
1396 threadgroup_fork_read_unlock(current
);
1401 if (pid
!= &init_struct_pid
)
1403 bad_fork_cleanup_io
:
1406 bad_fork_cleanup_namespaces
:
1407 exit_task_namespaces(p
);
1408 bad_fork_cleanup_mm
:
1411 if (p
->signal
->oom_score_adj
== OOM_SCORE_ADJ_MIN
)
1412 atomic_dec(&p
->mm
->oom_disable_count
);
1416 bad_fork_cleanup_signal
:
1417 if (!(clone_flags
& CLONE_THREAD
))
1418 free_signal_struct(p
->signal
);
1419 bad_fork_cleanup_sighand
:
1420 __cleanup_sighand(p
->sighand
);
1421 bad_fork_cleanup_fs
:
1422 exit_fs(p
); /* blocking */
1423 bad_fork_cleanup_files
:
1424 exit_files(p
); /* blocking */
1425 bad_fork_cleanup_semundo
:
1427 bad_fork_cleanup_audit
:
1429 bad_fork_cleanup_policy
:
1430 perf_event_free_task(p
);
1432 mpol_put(p
->mempolicy
);
1433 bad_fork_cleanup_cgroup
:
1435 if (clone_flags
& CLONE_THREAD
)
1436 threadgroup_fork_read_unlock(current
);
1437 cgroup_exit(p
, cgroup_callbacks_done
);
1438 delayacct_tsk_free(p
);
1439 module_put(task_thread_info(p
)->exec_domain
->module
);
1440 bad_fork_cleanup_count
:
1441 atomic_dec(&p
->cred
->user
->processes
);
1446 return ERR_PTR(retval
);
1449 noinline
struct pt_regs
* __cpuinit
__attribute__((weak
)) idle_regs(struct pt_regs
*regs
)
1451 memset(regs
, 0, sizeof(struct pt_regs
));
1455 static inline void init_idle_pids(struct pid_link
*links
)
1459 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
) {
1460 INIT_HLIST_NODE(&links
[type
].node
); /* not really needed */
1461 links
[type
].pid
= &init_struct_pid
;
1465 struct task_struct
* __cpuinit
fork_idle(int cpu
)
1467 struct task_struct
*task
;
1468 struct pt_regs regs
;
1470 task
= copy_process(CLONE_VM
, 0, idle_regs(®s
), 0, NULL
,
1471 &init_struct_pid
, 0);
1472 if (!IS_ERR(task
)) {
1473 init_idle_pids(task
->pids
);
1474 init_idle(task
, cpu
);
1481 * Ok, this is the main fork-routine.
1483 * It copies the process, and if successful kick-starts
1484 * it and waits for it to finish using the VM if required.
1486 long do_fork(unsigned long clone_flags
,
1487 unsigned long stack_start
,
1488 struct pt_regs
*regs
,
1489 unsigned long stack_size
,
1490 int __user
*parent_tidptr
,
1491 int __user
*child_tidptr
)
1493 struct task_struct
*p
;
1498 * Do some preliminary argument and permissions checking before we
1499 * actually start allocating stuff
1501 if (clone_flags
& CLONE_NEWUSER
) {
1502 if (clone_flags
& CLONE_THREAD
)
1504 /* hopefully this check will go away when userns support is
1507 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SETUID
) ||
1508 !capable(CAP_SETGID
))
1513 * When called from kernel_thread, don't do user tracing stuff.
1515 if (likely(user_mode(regs
)))
1516 trace
= tracehook_prepare_clone(clone_flags
);
1518 p
= copy_process(clone_flags
, stack_start
, regs
, stack_size
,
1519 child_tidptr
, NULL
, trace
);
1521 * Do this prior waking up the new thread - the thread pointer
1522 * might get invalid after that point, if the thread exits quickly.
1525 struct completion vfork
;
1527 trace_sched_process_fork(current
, p
);
1529 nr
= task_pid_vnr(p
);
1531 if (clone_flags
& CLONE_PARENT_SETTID
)
1532 put_user(nr
, parent_tidptr
);
1534 if (clone_flags
& CLONE_VFORK
) {
1535 p
->vfork_done
= &vfork
;
1536 init_completion(&vfork
);
1539 audit_finish_fork(p
);
1540 tracehook_report_clone(regs
, clone_flags
, nr
, p
);
1543 * We set PF_STARTING at creation in case tracing wants to
1544 * use this to distinguish a fully live task from one that
1545 * hasn't gotten to tracehook_report_clone() yet. Now we
1546 * clear it and set the child going.
1548 p
->flags
&= ~PF_STARTING
;
1550 wake_up_new_task(p
);
1552 tracehook_report_clone_complete(trace
, regs
,
1553 clone_flags
, nr
, p
);
1555 if (clone_flags
& CLONE_VFORK
) {
1556 freezer_do_not_count();
1557 wait_for_completion(&vfork
);
1559 tracehook_report_vfork_done(p
, nr
);
1567 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1568 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1571 static void sighand_ctor(void *data
)
1573 struct sighand_struct
*sighand
= data
;
1575 spin_lock_init(&sighand
->siglock
);
1576 init_waitqueue_head(&sighand
->signalfd_wqh
);
1579 void __init
proc_caches_init(void)
1581 sighand_cachep
= kmem_cache_create("sighand_cache",
1582 sizeof(struct sighand_struct
), 0,
1583 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_DESTROY_BY_RCU
|
1584 SLAB_NOTRACK
, sighand_ctor
);
1585 signal_cachep
= kmem_cache_create("signal_cache",
1586 sizeof(struct signal_struct
), 0,
1587 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1588 files_cachep
= kmem_cache_create("files_cache",
1589 sizeof(struct files_struct
), 0,
1590 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1591 fs_cachep
= kmem_cache_create("fs_cache",
1592 sizeof(struct fs_struct
), 0,
1593 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1594 mm_cachep
= kmem_cache_create("mm_struct",
1595 sizeof(struct mm_struct
), ARCH_MIN_MMSTRUCT_ALIGN
,
1596 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_NOTRACK
, NULL
);
1597 vm_area_cachep
= KMEM_CACHE(vm_area_struct
, SLAB_PANIC
);
1602 * Check constraints on flags passed to the unshare system call.
1604 static int check_unshare_flags(unsigned long unshare_flags
)
1606 if (unshare_flags
& ~(CLONE_THREAD
|CLONE_FS
|CLONE_NEWNS
|CLONE_SIGHAND
|
1607 CLONE_VM
|CLONE_FILES
|CLONE_SYSVSEM
|
1608 CLONE_NEWUTS
|CLONE_NEWIPC
|CLONE_NEWNET
))
1611 * Not implemented, but pretend it works if there is nothing to
1612 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1613 * needs to unshare vm.
1615 if (unshare_flags
& (CLONE_THREAD
| CLONE_SIGHAND
| CLONE_VM
)) {
1616 /* FIXME: get_task_mm() increments ->mm_users */
1617 if (atomic_read(¤t
->mm
->mm_users
) > 1)
1625 * Unshare the filesystem structure if it is being shared
1627 static int unshare_fs(unsigned long unshare_flags
, struct fs_struct
**new_fsp
)
1629 struct fs_struct
*fs
= current
->fs
;
1631 if (!(unshare_flags
& CLONE_FS
) || !fs
)
1634 /* don't need lock here; in the worst case we'll do useless copy */
1638 *new_fsp
= copy_fs_struct(fs
);
1646 * Unshare file descriptor table if it is being shared
1648 static int unshare_fd(unsigned long unshare_flags
, struct files_struct
**new_fdp
)
1650 struct files_struct
*fd
= current
->files
;
1653 if ((unshare_flags
& CLONE_FILES
) &&
1654 (fd
&& atomic_read(&fd
->count
) > 1)) {
1655 *new_fdp
= dup_fd(fd
, &error
);
1664 * unshare allows a process to 'unshare' part of the process
1665 * context which was originally shared using clone. copy_*
1666 * functions used by do_fork() cannot be used here directly
1667 * because they modify an inactive task_struct that is being
1668 * constructed. Here we are modifying the current, active,
1671 SYSCALL_DEFINE1(unshare
, unsigned long, unshare_flags
)
1673 struct fs_struct
*fs
, *new_fs
= NULL
;
1674 struct files_struct
*fd
, *new_fd
= NULL
;
1675 struct nsproxy
*new_nsproxy
= NULL
;
1679 err
= check_unshare_flags(unshare_flags
);
1681 goto bad_unshare_out
;
1684 * If unsharing namespace, must also unshare filesystem information.
1686 if (unshare_flags
& CLONE_NEWNS
)
1687 unshare_flags
|= CLONE_FS
;
1689 * CLONE_NEWIPC must also detach from the undolist: after switching
1690 * to a new ipc namespace, the semaphore arrays from the old
1691 * namespace are unreachable.
1693 if (unshare_flags
& (CLONE_NEWIPC
|CLONE_SYSVSEM
))
1695 if ((err
= unshare_fs(unshare_flags
, &new_fs
)))
1696 goto bad_unshare_out
;
1697 if ((err
= unshare_fd(unshare_flags
, &new_fd
)))
1698 goto bad_unshare_cleanup_fs
;
1699 if ((err
= unshare_nsproxy_namespaces(unshare_flags
, &new_nsproxy
,
1701 goto bad_unshare_cleanup_fd
;
1703 if (new_fs
|| new_fd
|| do_sysvsem
|| new_nsproxy
) {
1706 * CLONE_SYSVSEM is equivalent to sys_exit().
1712 switch_task_namespaces(current
, new_nsproxy
);
1720 spin_lock(&fs
->lock
);
1721 current
->fs
= new_fs
;
1726 spin_unlock(&fs
->lock
);
1730 fd
= current
->files
;
1731 current
->files
= new_fd
;
1735 task_unlock(current
);
1739 put_nsproxy(new_nsproxy
);
1741 bad_unshare_cleanup_fd
:
1743 put_files_struct(new_fd
);
1745 bad_unshare_cleanup_fs
:
1747 free_fs_struct(new_fs
);
1754 * Helper to unshare the files of the current task.
1755 * We don't want to expose copy_files internals to
1756 * the exec layer of the kernel.
1759 int unshare_files(struct files_struct
**displaced
)
1761 struct task_struct
*task
= current
;
1762 struct files_struct
*copy
= NULL
;
1765 error
= unshare_fd(CLONE_FILES
, ©
);
1766 if (error
|| !copy
) {
1770 *displaced
= task
->files
;