1 | /* |
---|
2 | * linux/kernel/fork.c |
---|
3 | * |
---|
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
---|
5 | */ |
---|
6 | |
---|
7 | /* |
---|
8 | * 'fork.c' contains the help-routines for the 'fork' system call |
---|
9 | * (see also entry.S and others). |
---|
10 | * Fork is rather simple, once you get the hang of it, but the memory |
---|
11 | * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' |
---|
12 | */ |
---|
13 | |
---|
14 | #include <linux/slab.h> |
---|
15 | #include <linux/init.h> |
---|
16 | #include <linux/unistd.h> |
---|
17 | #include <linux/smp_lock.h> |
---|
18 | #include <linux/module.h> |
---|
19 | #include <linux/vmalloc.h> |
---|
20 | #include <linux/completion.h> |
---|
21 | #include <linux/namespace.h> |
---|
22 | #include <linux/personality.h> |
---|
23 | #include <linux/mempolicy.h> |
---|
24 | #include <linux/sem.h> |
---|
25 | #include <linux/file.h> |
---|
26 | #include <linux/key.h> |
---|
27 | #include <linux/binfmts.h> |
---|
28 | #include <linux/mman.h> |
---|
29 | #include <linux/fs.h> |
---|
30 | #include <linux/capability.h> |
---|
31 | #include <linux/cpu.h> |
---|
32 | #include <linux/cpuset.h> |
---|
33 | #include <linux/security.h> |
---|
34 | #include <linux/swap.h> |
---|
35 | #include <linux/syscalls.h> |
---|
36 | #include <linux/jiffies.h> |
---|
37 | #include <linux/futex.h> |
---|
38 | #include <linux/rcupdate.h> |
---|
39 | #include <linux/ptrace.h> |
---|
40 | #include <linux/mount.h> |
---|
41 | #include <linux/audit.h> |
---|
42 | #include <linux/profile.h> |
---|
43 | #include <linux/rmap.h> |
---|
44 | #include <linux/acct.h> |
---|
45 | #include <linux/cn_proc.h> |
---|
46 | #include <linux/delayacct.h> |
---|
47 | #include <linux/taskstats_kern.h> |
---|
48 | |
---|
49 | #include <asm/pgtable.h> |
---|
50 | #include <asm/pgalloc.h> |
---|
51 | #include <asm/uaccess.h> |
---|
52 | #include <asm/mmu_context.h> |
---|
53 | #include <asm/cacheflush.h> |
---|
54 | #include <asm/tlbflush.h> |
---|
55 | |
---|
56 | /* |
---|
57 | * Protected counters by write_lock_irq(&tasklist_lock) |
---|
58 | */ |
---|
59 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
---|
60 | int nr_threads; /* The idle threads do not count.. */ |
---|
61 | |
---|
62 | int max_threads; /* tunable limit on nr_threads */ |
---|
63 | |
---|
64 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
---|
65 | |
---|
66 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
---|
67 | |
---|
68 | int nr_processes(void) |
---|
69 | { |
---|
70 | int cpu; |
---|
71 | int total = 0; |
---|
72 | |
---|
73 | for_each_online_cpu(cpu) |
---|
74 | total += per_cpu(process_counts, cpu); |
---|
75 | |
---|
76 | return total; |
---|
77 | } |
---|
78 | |
---|
79 | #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR |
---|
80 | # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) |
---|
81 | # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) |
---|
82 | static kmem_cache_t *task_struct_cachep; |
---|
83 | #endif |
---|
84 | |
---|
85 | /* SLAB cache for signal_struct structures (tsk->signal) */ |
---|
86 | static kmem_cache_t *signal_cachep; |
---|
87 | |
---|
88 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ |
---|
89 | kmem_cache_t *sighand_cachep; |
---|
90 | |
---|
91 | /* SLAB cache for files_struct structures (tsk->files) */ |
---|
92 | kmem_cache_t *files_cachep; |
---|
93 | |
---|
94 | /* SLAB cache for fs_struct structures (tsk->fs) */ |
---|
95 | kmem_cache_t *fs_cachep; |
---|
96 | |
---|
97 | /* SLAB cache for vm_area_struct structures */ |
---|
98 | kmem_cache_t *vm_area_cachep; |
---|
99 | |
---|
100 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
---|
101 | static kmem_cache_t *mm_cachep; |
---|
102 | |
---|
103 | void free_task(struct task_struct *tsk) |
---|
104 | { |
---|
105 | free_thread_info(tsk->thread_info); |
---|
106 | rt_mutex_debug_task_free(tsk); |
---|
107 | free_task_struct(tsk); |
---|
108 | } |
---|
109 | EXPORT_SYMBOL(free_task); |
---|
110 | |
---|
111 | void __put_task_struct(struct task_struct *tsk) |
---|
112 | { |
---|
113 | WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); |
---|
114 | WARN_ON(atomic_read(&tsk->usage)); |
---|
115 | WARN_ON(tsk == current); |
---|
116 | |
---|
117 | security_task_free(tsk); |
---|
118 | free_uid(tsk->user); |
---|
119 | put_group_info(tsk->group_info); |
---|
120 | delayacct_tsk_free(tsk); |
---|
121 | |
---|
122 | if (!profile_handoff_task(tsk)) |
---|
123 | free_task(tsk); |
---|
124 | } |
---|
125 | |
---|
126 | void __init fork_init(unsigned long mempages) |
---|
127 | { |
---|
128 | #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR |
---|
129 | #ifndef ARCH_MIN_TASKALIGN |
---|
130 | #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES |
---|
131 | #endif |
---|
132 | /* create a slab on which task_structs can be allocated */ |
---|
133 | task_struct_cachep = |
---|
134 | kmem_cache_create("task_struct", sizeof(struct task_struct), |
---|
135 | ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL); |
---|
136 | #endif |
---|
137 | |
---|
138 | /* |
---|
139 | * The default maximum number of threads is set to a safe |
---|
140 | * value: the thread structures can take up at most half |
---|
141 | * of memory. |
---|
142 | */ |
---|
143 | max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); |
---|
144 | |
---|
145 | /* |
---|
146 | * we need to allow at least 20 threads to boot a system |
---|
147 | */ |
---|
148 | if(max_threads < 20) |
---|
149 | max_threads = 20; |
---|
150 | |
---|
151 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
---|
152 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; |
---|
153 | init_task.signal->rlim[RLIMIT_SIGPENDING] = |
---|
154 | init_task.signal->rlim[RLIMIT_NPROC]; |
---|
155 | } |
---|
156 | |
---|
157 | static struct task_struct *dup_task_struct(struct task_struct *orig) |
---|
158 | { |
---|
159 | struct task_struct *tsk; |
---|
160 | struct thread_info *ti; |
---|
161 | |
---|
162 | prepare_to_copy(orig); |
---|
163 | |
---|
164 | tsk = alloc_task_struct(); |
---|
165 | if (!tsk) |
---|
166 | return NULL; |
---|
167 | |
---|
168 | ti = alloc_thread_info(tsk); |
---|
169 | if (!ti) { |
---|
170 | free_task_struct(tsk); |
---|
171 | return NULL; |
---|
172 | } |
---|
173 | |
---|
174 | *tsk = *orig; |
---|
175 | tsk->thread_info = ti; |
---|
176 | setup_thread_stack(tsk, orig); |
---|
177 | |
---|
178 | /* One for us, one for whoever does the "release_task()" (usually parent) */ |
---|
179 | atomic_set(&tsk->usage,2); |
---|
180 | atomic_set(&tsk->fs_excl, 0); |
---|
181 | tsk->btrace_seq = 0; |
---|
182 | tsk->splice_pipe = NULL; |
---|
183 | return tsk; |
---|
184 | } |
---|
185 | |
---|
186 | #ifdef CONFIG_MMU |
---|
187 | static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
---|
188 | { |
---|
189 | struct vm_area_struct *mpnt, *tmp, **pprev; |
---|
190 | struct rb_node **rb_link, *rb_parent; |
---|
191 | int retval; |
---|
192 | unsigned long charge; |
---|
193 | struct mempolicy *pol; |
---|
194 | |
---|
195 | down_write(&oldmm->mmap_sem); |
---|
196 | flush_cache_mm(oldmm); |
---|
197 | /* |
---|
198 | * Not linked in yet - no deadlock potential: |
---|
199 | */ |
---|
200 | down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); |
---|
201 | |
---|
202 | mm->locked_vm = 0; |
---|
203 | mm->mmap = NULL; |
---|
204 | mm->mmap_cache = NULL; |
---|
205 | mm->free_area_cache = oldmm->mmap_base; |
---|
206 | mm->cached_hole_size = ~0UL; |
---|
207 | mm->map_count = 0; |
---|
208 | cpus_clear(mm->cpu_vm_mask); |
---|
209 | mm->mm_rb = RB_ROOT; |
---|
210 | rb_link = &mm->mm_rb.rb_node; |
---|
211 | rb_parent = NULL; |
---|
212 | pprev = &mm->mmap; |
---|
213 | |
---|
214 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
---|
215 | struct file *file; |
---|
216 | |
---|
217 | if (mpnt->vm_flags & VM_DONTCOPY) { |
---|
218 | long pages = vma_pages(mpnt); |
---|
219 | mm->total_vm -= pages; |
---|
220 | vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, |
---|
221 | -pages); |
---|
222 | continue; |
---|
223 | } |
---|
224 | charge = 0; |
---|
225 | if (mpnt->vm_flags & VM_ACCOUNT) { |
---|
226 | unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; |
---|
227 | if (security_vm_enough_memory(len)) |
---|
228 | goto fail_nomem; |
---|
229 | charge = len; |
---|
230 | } |
---|
231 | tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); |
---|
232 | if (!tmp) |
---|
233 | goto fail_nomem; |
---|
234 | *tmp = *mpnt; |
---|
235 | pol = mpol_copy(vma_policy(mpnt)); |
---|
236 | retval = PTR_ERR(pol); |
---|
237 | if (IS_ERR(pol)) |
---|
238 | goto fail_nomem_policy; |
---|
239 | vma_set_policy(tmp, pol); |
---|
240 | tmp->vm_flags &= ~VM_LOCKED; |
---|
241 | tmp->vm_mm = mm; |
---|
242 | tmp->vm_next = NULL; |
---|
243 | anon_vma_link(tmp); |
---|
244 | file = tmp->vm_file; |
---|
245 | if (file) { |
---|
246 | struct inode *inode = file->f_dentry->d_inode; |
---|
247 | get_file(file); |
---|
248 | if (tmp->vm_flags & VM_DENYWRITE) |
---|
249 | atomic_dec(&inode->i_writecount); |
---|
250 | |
---|
251 | /* insert tmp into the share list, just after mpnt */ |
---|
252 | spin_lock(&file->f_mapping->i_mmap_lock); |
---|
253 | tmp->vm_truncate_count = mpnt->vm_truncate_count; |
---|
254 | flush_dcache_mmap_lock(file->f_mapping); |
---|
255 | vma_prio_tree_add(tmp, mpnt); |
---|
256 | flush_dcache_mmap_unlock(file->f_mapping); |
---|
257 | spin_unlock(&file->f_mapping->i_mmap_lock); |
---|
258 | } |
---|
259 | |
---|
260 | /* |
---|
261 | * Link in the new vma and copy the page table entries. |
---|
262 | */ |
---|
263 | *pprev = tmp; |
---|
264 | pprev = &tmp->vm_next; |
---|
265 | |
---|
266 | __vma_link_rb(mm, tmp, rb_link, rb_parent); |
---|
267 | rb_link = &tmp->vm_rb.rb_right; |
---|
268 | rb_parent = &tmp->vm_rb; |
---|
269 | |
---|
270 | mm->map_count++; |
---|
271 | retval = copy_page_range(mm, oldmm, mpnt); |
---|
272 | |
---|
273 | if (tmp->vm_ops && tmp->vm_ops->open) |
---|
274 | tmp->vm_ops->open(tmp); |
---|
275 | |
---|
276 | if (retval) |
---|
277 | goto out; |
---|
278 | } |
---|
279 | #ifdef arch_dup_mmap |
---|
280 | arch_dup_mmap(mm, oldmm); |
---|
281 | #endif |
---|
282 | retval = 0; |
---|
283 | out: |
---|
284 | up_write(&mm->mmap_sem); |
---|
285 | flush_tlb_mm(oldmm); |
---|
286 | up_write(&oldmm->mmap_sem); |
---|
287 | return retval; |
---|
288 | fail_nomem_policy: |
---|
289 | kmem_cache_free(vm_area_cachep, tmp); |
---|
290 | fail_nomem: |
---|
291 | retval = -ENOMEM; |
---|
292 | vm_unacct_memory(charge); |
---|
293 | goto out; |
---|
294 | } |
---|
295 | |
---|
296 | static inline int mm_alloc_pgd(struct mm_struct * mm) |
---|
297 | { |
---|
298 | mm->pgd = pgd_alloc(mm); |
---|
299 | if (unlikely(!mm->pgd)) |
---|
300 | return -ENOMEM; |
---|
301 | return 0; |
---|
302 | } |
---|
303 | |
---|
304 | static inline void mm_free_pgd(struct mm_struct * mm) |
---|
305 | { |
---|
306 | pgd_free(mm->pgd); |
---|
307 | } |
---|
308 | #else |
---|
309 | #define dup_mmap(mm, oldmm) (0) |
---|
310 | #define mm_alloc_pgd(mm) (0) |
---|
311 | #define mm_free_pgd(mm) |
---|
312 | #endif /* CONFIG_MMU */ |
---|
313 | |
---|
314 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); |
---|
315 | |
---|
316 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) |
---|
317 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) |
---|
318 | |
---|
319 | #include <linux/init_task.h> |
---|
320 | |
---|
321 | static struct mm_struct * mm_init(struct mm_struct * mm) |
---|
322 | { |
---|
323 | atomic_set(&mm->mm_users, 1); |
---|
324 | atomic_set(&mm->mm_count, 1); |
---|
325 | init_rwsem(&mm->mmap_sem); |
---|
326 | INIT_LIST_HEAD(&mm->mmlist); |
---|
327 | mm->core_waiters = 0; |
---|
328 | mm->nr_ptes = 0; |
---|
329 | set_mm_counter(mm, file_rss, 0); |
---|
330 | set_mm_counter(mm, anon_rss, 0); |
---|
331 | spin_lock_init(&mm->page_table_lock); |
---|
332 | rwlock_init(&mm->ioctx_list_lock); |
---|
333 | mm->ioctx_list = NULL; |
---|
334 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
---|
335 | mm->cached_hole_size = ~0UL; |
---|
336 | |
---|
337 | if (likely(!mm_alloc_pgd(mm))) { |
---|
338 | mm->def_flags = 0; |
---|
339 | return mm; |
---|
340 | } |
---|
341 | free_mm(mm); |
---|
342 | return NULL; |
---|
343 | } |
---|
344 | |
---|
345 | /* |
---|
346 | * Allocate and initialize an mm_struct. |
---|
347 | */ |
---|
348 | struct mm_struct * mm_alloc(void) |
---|
349 | { |
---|
350 | struct mm_struct * mm; |
---|
351 | |
---|
352 | mm = allocate_mm(); |
---|
353 | if (mm) { |
---|
354 | memset(mm, 0, sizeof(*mm)); |
---|
355 | mm = mm_init(mm); |
---|
356 | } |
---|
357 | return mm; |
---|
358 | } |
---|
359 | |
---|
360 | /* |
---|
361 | * Called when the last reference to the mm |
---|
362 | * is dropped: either by a lazy thread or by |
---|
363 | * mmput. Free the page directory and the mm. |
---|
364 | */ |
---|
365 | void fastcall __mmdrop(struct mm_struct *mm) |
---|
366 | { |
---|
367 | BUG_ON(mm == &init_mm); |
---|
368 | mm_free_pgd(mm); |
---|
369 | destroy_context(mm); |
---|
370 | free_mm(mm); |
---|
371 | } |
---|
372 | |
---|
373 | /* |
---|
374 | * Decrement the use count and release all resources for an mm. |
---|
375 | */ |
---|
376 | void mmput(struct mm_struct *mm) |
---|
377 | { |
---|
378 | might_sleep(); |
---|
379 | |
---|
380 | if (atomic_dec_and_test(&mm->mm_users)) { |
---|
381 | exit_aio(mm); |
---|
382 | exit_mmap(mm); |
---|
383 | if (!list_empty(&mm->mmlist)) { |
---|
384 | spin_lock(&mmlist_lock); |
---|
385 | list_del(&mm->mmlist); |
---|
386 | spin_unlock(&mmlist_lock); |
---|
387 | } |
---|
388 | put_swap_token(mm); |
---|
389 | mmdrop(mm); |
---|
390 | } |
---|
391 | } |
---|
392 | EXPORT_SYMBOL_GPL(mmput); |
---|
393 | |
---|
394 | /** |
---|
395 | * get_task_mm - acquire a reference to the task's mm |
---|
396 | * |
---|
397 | * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning |
---|
398 | * this kernel workthread has transiently adopted a user mm with use_mm, |
---|
399 | * to do its AIO) is not set and if so returns a reference to it, after |
---|
400 | * bumping up the use count. User must release the mm via mmput() |
---|
401 | * after use. Typically used by /proc and ptrace. |
---|
402 | */ |
---|
403 | struct mm_struct *get_task_mm(struct task_struct *task) |
---|
404 | { |
---|
405 | struct mm_struct *mm; |
---|
406 | |
---|
407 | task_lock(task); |
---|
408 | mm = task->mm; |
---|
409 | if (mm) { |
---|
410 | if (task->flags & PF_BORROWED_MM) |
---|
411 | mm = NULL; |
---|
412 | else |
---|
413 | atomic_inc(&mm->mm_users); |
---|
414 | } |
---|
415 | task_unlock(task); |
---|
416 | return mm; |
---|
417 | } |
---|
418 | EXPORT_SYMBOL_GPL(get_task_mm); |
---|
419 | |
---|
420 | /* Please note the differences between mmput and mm_release. |
---|
421 | * mmput is called whenever we stop holding onto a mm_struct, |
---|
422 | * error success whatever. |
---|
423 | * |
---|
424 | * mm_release is called after a mm_struct has been removed |
---|
425 | * from the current process. |
---|
426 | * |
---|
427 | * This difference is important for error handling, when we |
---|
428 | * only half set up a mm_struct for a new process and need to restore |
---|
429 | * the old one. Because we mmput the new mm_struct before |
---|
430 | * restoring the old one. . . |
---|
431 | * Eric Biederman 10 January 1998 |
---|
432 | */ |
---|
433 | void mm_release(struct task_struct *tsk, struct mm_struct *mm) |
---|
434 | { |
---|
435 | struct completion *vfork_done = tsk->vfork_done; |
---|
436 | |
---|
437 | /* Get rid of any cached register state */ |
---|
438 | deactivate_mm(tsk, mm); |
---|
439 | |
---|
440 | /* notify parent sleeping on vfork() */ |
---|
441 | if (vfork_done) { |
---|
442 | tsk->vfork_done = NULL; |
---|
443 | complete(vfork_done); |
---|
444 | } |
---|
445 | if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) { |
---|
446 | u32 __user * tidptr = tsk->clear_child_tid; |
---|
447 | tsk->clear_child_tid = NULL; |
---|
448 | |
---|
449 | /* |
---|
450 | * We don't check the error code - if userspace has |
---|
451 | * not set up a proper pointer then tough luck. |
---|
452 | */ |
---|
453 | put_user(0, tidptr); |
---|
454 | sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); |
---|
455 | } |
---|
456 | } |
---|
457 | |
---|
458 | /* |
---|
459 | * Allocate a new mm structure and copy contents from the |
---|
460 | * mm structure of the passed in task structure. |
---|
461 | */ |
---|
462 | static struct mm_struct *dup_mm(struct task_struct *tsk) |
---|
463 | { |
---|
464 | struct mm_struct *mm, *oldmm = current->mm; |
---|
465 | int err; |
---|
466 | |
---|
467 | if (!oldmm) |
---|
468 | return NULL; |
---|
469 | |
---|
470 | mm = allocate_mm(); |
---|
471 | if (!mm) |
---|
472 | goto fail_nomem; |
---|
473 | |
---|
474 | memcpy(mm, oldmm, sizeof(*mm)); |
---|
475 | |
---|
476 | if (!mm_init(mm)) |
---|
477 | goto fail_nomem; |
---|
478 | |
---|
479 | if (init_new_context(tsk, mm)) |
---|
480 | goto fail_nocontext; |
---|
481 | |
---|
482 | err = dup_mmap(mm, oldmm); |
---|
483 | if (err) |
---|
484 | goto free_pt; |
---|
485 | |
---|
486 | mm->hiwater_rss = get_mm_rss(mm); |
---|
487 | mm->hiwater_vm = mm->total_vm; |
---|
488 | |
---|
489 | return mm; |
---|
490 | |
---|
491 | free_pt: |
---|
492 | mmput(mm); |
---|
493 | |
---|
494 | fail_nomem: |
---|
495 | return NULL; |
---|
496 | |
---|
497 | fail_nocontext: |
---|
498 | /* |
---|
499 | * If init_new_context() failed, we cannot use mmput() to free the mm |
---|
500 | * because it calls destroy_context() |
---|
501 | */ |
---|
502 | mm_free_pgd(mm); |
---|
503 | free_mm(mm); |
---|
504 | return NULL; |
---|
505 | } |
---|
506 | |
---|
507 | static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) |
---|
508 | { |
---|
509 | struct mm_struct * mm, *oldmm; |
---|
510 | int retval; |
---|
511 | |
---|
512 | tsk->min_flt = tsk->maj_flt = 0; |
---|
513 | tsk->nvcsw = tsk->nivcsw = 0; |
---|
514 | |
---|
515 | tsk->mm = NULL; |
---|
516 | tsk->active_mm = NULL; |
---|
517 | |
---|
518 | /* |
---|
519 | * Are we cloning a kernel thread? |
---|
520 | * |
---|
521 | * We need to steal a active VM for that.. |
---|
522 | */ |
---|
523 | oldmm = current->mm; |
---|
524 | if (!oldmm) |
---|
525 | return 0; |
---|
526 | |
---|
527 | if (clone_flags & CLONE_VM) { |
---|
528 | atomic_inc(&oldmm->mm_users); |
---|
529 | mm = oldmm; |
---|
530 | goto good_mm; |
---|
531 | } |
---|
532 | |
---|
533 | retval = -ENOMEM; |
---|
534 | mm = dup_mm(tsk); |
---|
535 | if (!mm) |
---|
536 | goto fail_nomem; |
---|
537 | |
---|
538 | good_mm: |
---|
539 | tsk->mm = mm; |
---|
540 | tsk->active_mm = mm; |
---|
541 | return 0; |
---|
542 | |
---|
543 | fail_nomem: |
---|
544 | return retval; |
---|
545 | } |
---|
546 | |
---|
547 | static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) |
---|
548 | { |
---|
549 | struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); |
---|
550 | /* We don't need to lock fs - think why ;-) */ |
---|
551 | if (fs) { |
---|
552 | atomic_set(&fs->count, 1); |
---|
553 | rwlock_init(&fs->lock); |
---|
554 | fs->umask = old->umask; |
---|
555 | read_lock(&old->lock); |
---|
556 | fs->rootmnt = mntget(old->rootmnt); |
---|
557 | fs->root = dget(old->root); |
---|
558 | fs->pwdmnt = mntget(old->pwdmnt); |
---|
559 | fs->pwd = dget(old->pwd); |
---|
560 | if (old->altroot) { |
---|
561 | fs->altrootmnt = mntget(old->altrootmnt); |
---|
562 | fs->altroot = dget(old->altroot); |
---|
563 | } else { |
---|
564 | fs->altrootmnt = NULL; |
---|
565 | fs->altroot = NULL; |
---|
566 | } |
---|
567 | read_unlock(&old->lock); |
---|
568 | } |
---|
569 | return fs; |
---|
570 | } |
---|
571 | |
---|
572 | struct fs_struct *copy_fs_struct(struct fs_struct *old) |
---|
573 | { |
---|
574 | return __copy_fs_struct(old); |
---|
575 | } |
---|
576 | |
---|
577 | EXPORT_SYMBOL_GPL(copy_fs_struct); |
---|
578 | |
---|
579 | static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk) |
---|
580 | { |
---|
581 | if (clone_flags & CLONE_FS) { |
---|
582 | atomic_inc(¤t->fs->count); |
---|
583 | return 0; |
---|
584 | } |
---|
585 | tsk->fs = __copy_fs_struct(current->fs); |
---|
586 | if (!tsk->fs) |
---|
587 | return -ENOMEM; |
---|
588 | return 0; |
---|
589 | } |
---|
590 | |
---|
591 | static int count_open_files(struct fdtable *fdt) |
---|
592 | { |
---|
593 | int size = fdt->max_fdset; |
---|
594 | int i; |
---|
595 | |
---|
596 | /* Find the last open fd */ |
---|
597 | for (i = size/(8*sizeof(long)); i > 0; ) { |
---|
598 | if (fdt->open_fds->fds_bits[--i]) |
---|
599 | break; |
---|
600 | } |
---|
601 | i = (i+1) * 8 * sizeof(long); |
---|
602 | return i; |
---|
603 | } |
---|
604 | |
---|
605 | static struct files_struct *alloc_files(void) |
---|
606 | { |
---|
607 | struct files_struct *newf; |
---|
608 | struct fdtable *fdt; |
---|
609 | |
---|
610 | newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL); |
---|
611 | if (!newf) |
---|
612 | goto out; |
---|
613 | |
---|
614 | atomic_set(&newf->count, 1); |
---|
615 | |
---|
616 | spin_lock_init(&newf->file_lock); |
---|
617 | newf->next_fd = 0; |
---|
618 | fdt = &newf->fdtab; |
---|
619 | fdt->max_fds = NR_OPEN_DEFAULT; |
---|
620 | fdt->max_fdset = EMBEDDED_FD_SET_SIZE; |
---|
621 | fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; |
---|
622 | fdt->open_fds = (fd_set *)&newf->open_fds_init; |
---|
623 | fdt->fd = &newf->fd_array[0]; |
---|
624 | INIT_RCU_HEAD(&fdt->rcu); |
---|
625 | fdt->free_files = NULL; |
---|
626 | fdt->next = NULL; |
---|
627 | rcu_assign_pointer(newf->fdt, fdt); |
---|
628 | out: |
---|
629 | return newf; |
---|
630 | } |
---|
631 | |
---|
632 | /* |
---|
633 | * Allocate a new files structure and copy contents from the |
---|
634 | * passed in files structure. |
---|
635 | * errorp will be valid only when the returned files_struct is NULL. |
---|
636 | */ |
---|
637 | static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) |
---|
638 | { |
---|
639 | struct files_struct *newf; |
---|
640 | struct file **old_fds, **new_fds; |
---|
641 | int open_files, size, i, expand; |
---|
642 | struct fdtable *old_fdt, *new_fdt; |
---|
643 | |
---|
644 | *errorp = -ENOMEM; |
---|
645 | newf = alloc_files(); |
---|
646 | if (!newf) |
---|
647 | goto out; |
---|
648 | |
---|
649 | spin_lock(&oldf->file_lock); |
---|
650 | old_fdt = files_fdtable(oldf); |
---|
651 | new_fdt = files_fdtable(newf); |
---|
652 | size = old_fdt->max_fdset; |
---|
653 | open_files = count_open_files(old_fdt); |
---|
654 | expand = 0; |
---|
655 | |
---|
656 | /* |
---|
657 | * Check whether we need to allocate a larger fd array or fd set. |
---|
658 | * Note: we're not a clone task, so the open count won't change. |
---|
659 | */ |
---|
660 | if (open_files > new_fdt->max_fdset) { |
---|
661 | new_fdt->max_fdset = 0; |
---|
662 | expand = 1; |
---|
663 | } |
---|
664 | if (open_files > new_fdt->max_fds) { |
---|
665 | new_fdt->max_fds = 0; |
---|
666 | expand = 1; |
---|
667 | } |
---|
668 | |
---|
669 | /* if the old fdset gets grown now, we'll only copy up to "size" fds */ |
---|
670 | if (expand) { |
---|
671 | spin_unlock(&oldf->file_lock); |
---|
672 | spin_lock(&newf->file_lock); |
---|
673 | *errorp = expand_files(newf, open_files-1); |
---|
674 | spin_unlock(&newf->file_lock); |
---|
675 | if (*errorp < 0) |
---|
676 | goto out_release; |
---|
677 | new_fdt = files_fdtable(newf); |
---|
678 | /* |
---|
679 | * Reacquire the oldf lock and a pointer to its fd table |
---|
680 | * who knows it may have a new bigger fd table. We need |
---|
681 | * the latest pointer. |
---|
682 | */ |
---|
683 | spin_lock(&oldf->file_lock); |
---|
684 | old_fdt = files_fdtable(oldf); |
---|
685 | } |
---|
686 | |
---|
687 | old_fds = old_fdt->fd; |
---|
688 | new_fds = new_fdt->fd; |
---|
689 | |
---|
690 | memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8); |
---|
691 | memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8); |
---|
692 | |
---|
693 | for (i = open_files; i != 0; i--) { |
---|
694 | struct file *f = *old_fds++; |
---|
695 | if (f) { |
---|
696 | get_file(f); |
---|
697 | } else { |
---|
698 | /* |
---|
699 | * The fd may be claimed in the fd bitmap but not yet |
---|
700 | * instantiated in the files array if a sibling thread |
---|
701 | * is partway through open(). So make sure that this |
---|
702 | * fd is available to the new process. |
---|
703 | */ |
---|
704 | FD_CLR(open_files - i, new_fdt->open_fds); |
---|
705 | } |
---|
706 | rcu_assign_pointer(*new_fds++, f); |
---|
707 | } |
---|
708 | spin_unlock(&oldf->file_lock); |
---|
709 | |
---|
710 | /* compute the remainder to be cleared */ |
---|
711 | size = (new_fdt->max_fds - open_files) * sizeof(struct file *); |
---|
712 | |
---|
713 | /* This is long word aligned thus could use a optimized version */ |
---|
714 | memset(new_fds, 0, size); |
---|
715 | |
---|
716 | if (new_fdt->max_fdset > open_files) { |
---|
717 | int left = (new_fdt->max_fdset-open_files)/8; |
---|
718 | int start = open_files / (8 * sizeof(unsigned long)); |
---|
719 | |
---|
720 | memset(&new_fdt->open_fds->fds_bits[start], 0, left); |
---|
721 | memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); |
---|
722 | } |
---|
723 | |
---|
724 | out: |
---|
725 | return newf; |
---|
726 | |
---|
727 | out_release: |
---|
728 | free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset); |
---|
729 | free_fdset (new_fdt->open_fds, new_fdt->max_fdset); |
---|
730 | free_fd_array(new_fdt->fd, new_fdt->max_fds); |
---|
731 | kmem_cache_free(files_cachep, newf); |
---|
732 | return NULL; |
---|
733 | } |
---|
734 | |
---|
735 | static int copy_files(unsigned long clone_flags, struct task_struct * tsk) |
---|
736 | { |
---|
737 | struct files_struct *oldf, *newf; |
---|
738 | int error = 0; |
---|
739 | |
---|
740 | /* |
---|
741 | * A background process may not have any files ... |
---|
742 | */ |
---|
743 | oldf = current->files; |
---|
744 | if (!oldf) |
---|
745 | goto out; |
---|
746 | |
---|
747 | if (clone_flags & CLONE_FILES) { |
---|
748 | atomic_inc(&oldf->count); |
---|
749 | goto out; |
---|
750 | } |
---|
751 | |
---|
752 | /* |
---|
753 | * Note: we may be using current for both targets (See exec.c) |
---|
754 | * This works because we cache current->files (old) as oldf. Don't |
---|
755 | * break this. |
---|
756 | */ |
---|
757 | tsk->files = NULL; |
---|
758 | newf = dup_fd(oldf, &error); |
---|
759 | if (!newf) |
---|
760 | goto out; |
---|
761 | |
---|
762 | tsk->files = newf; |
---|
763 | error = 0; |
---|
764 | out: |
---|
765 | return error; |
---|
766 | } |
---|
767 | |
---|
768 | /* |
---|
769 | * Helper to unshare the files of the current task. |
---|
770 | * We don't want to expose copy_files internals to |
---|
771 | * the exec layer of the kernel. |
---|
772 | */ |
---|
773 | |
---|
774 | int unshare_files(void) |
---|
775 | { |
---|
776 | struct files_struct *files = current->files; |
---|
777 | int rc; |
---|
778 | |
---|
779 | BUG_ON(!files); |
---|
780 | |
---|
781 | /* This can race but the race causes us to copy when we don't |
---|
782 | need to and drop the copy */ |
---|
783 | if(atomic_read(&files->count) == 1) |
---|
784 | { |
---|
785 | atomic_inc(&files->count); |
---|
786 | return 0; |
---|
787 | } |
---|
788 | rc = copy_files(0, current); |
---|
789 | if(rc) |
---|
790 | current->files = files; |
---|
791 | return rc; |
---|
792 | } |
---|
793 | |
---|
794 | EXPORT_SYMBOL(unshare_files); |
---|
795 | |
---|
796 | static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) |
---|
797 | { |
---|
798 | struct sighand_struct *sig; |
---|
799 | |
---|
800 | if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) { |
---|
801 | atomic_inc(¤t->sighand->count); |
---|
802 | return 0; |
---|
803 | } |
---|
804 | sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); |
---|
805 | rcu_assign_pointer(tsk->sighand, sig); |
---|
806 | if (!sig) |
---|
807 | return -ENOMEM; |
---|
808 | atomic_set(&sig->count, 1); |
---|
809 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); |
---|
810 | return 0; |
---|
811 | } |
---|
812 | |
---|
813 | void __cleanup_sighand(struct sighand_struct *sighand) |
---|
814 | { |
---|
815 | if (atomic_dec_and_test(&sighand->count)) |
---|
816 | kmem_cache_free(sighand_cachep, sighand); |
---|
817 | } |
---|
818 | |
---|
819 | static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) |
---|
820 | { |
---|
821 | struct signal_struct *sig; |
---|
822 | int ret; |
---|
823 | |
---|
824 | if (clone_flags & CLONE_THREAD) { |
---|
825 | atomic_inc(¤t->signal->count); |
---|
826 | atomic_inc(¤t->signal->live); |
---|
827 | taskstats_tgid_alloc(current->signal); |
---|
828 | return 0; |
---|
829 | } |
---|
830 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
---|
831 | tsk->signal = sig; |
---|
832 | if (!sig) |
---|
833 | return -ENOMEM; |
---|
834 | |
---|
835 | ret = copy_thread_group_keys(tsk); |
---|
836 | if (ret < 0) { |
---|
837 | kmem_cache_free(signal_cachep, sig); |
---|
838 | return ret; |
---|
839 | } |
---|
840 | |
---|
841 | atomic_set(&sig->count, 1); |
---|
842 | atomic_set(&sig->live, 1); |
---|
843 | init_waitqueue_head(&sig->wait_chldexit); |
---|
844 | sig->flags = 0; |
---|
845 | sig->group_exit_code = 0; |
---|
846 | sig->group_exit_task = NULL; |
---|
847 | sig->group_stop_count = 0; |
---|
848 | sig->curr_target = NULL; |
---|
849 | init_sigpending(&sig->shared_pending); |
---|
850 | INIT_LIST_HEAD(&sig->posix_timers); |
---|
851 | |
---|
852 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); |
---|
853 | sig->it_real_incr.tv64 = 0; |
---|
854 | sig->real_timer.function = it_real_fn; |
---|
855 | sig->tsk = tsk; |
---|
856 | |
---|
857 | sig->it_virt_expires = cputime_zero; |
---|
858 | sig->it_virt_incr = cputime_zero; |
---|
859 | sig->it_prof_expires = cputime_zero; |
---|
860 | sig->it_prof_incr = cputime_zero; |
---|
861 | |
---|
862 | sig->leader = 0; /* session leadership doesn't inherit */ |
---|
863 | sig->tty_old_pgrp = 0; |
---|
864 | |
---|
865 | sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; |
---|
866 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; |
---|
867 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; |
---|
868 | sig->sched_time = 0; |
---|
869 | INIT_LIST_HEAD(&sig->cpu_timers[0]); |
---|
870 | INIT_LIST_HEAD(&sig->cpu_timers[1]); |
---|
871 | INIT_LIST_HEAD(&sig->cpu_timers[2]); |
---|
872 | taskstats_tgid_init(sig); |
---|
873 | |
---|
874 | task_lock(current->group_leader); |
---|
875 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
---|
876 | task_unlock(current->group_leader); |
---|
877 | |
---|
878 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
---|
879 | /* |
---|
880 | * New sole thread in the process gets an expiry time |
---|
881 | * of the whole CPU time limit. |
---|
882 | */ |
---|
883 | tsk->it_prof_expires = |
---|
884 | secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); |
---|
885 | } |
---|
886 | acct_init_pacct(&sig->pacct); |
---|
887 | |
---|
888 | return 0; |
---|
889 | } |
---|
890 | |
---|
891 | void __cleanup_signal(struct signal_struct *sig) |
---|
892 | { |
---|
893 | exit_thread_group_keys(sig); |
---|
894 | taskstats_tgid_free(sig); |
---|
895 | kmem_cache_free(signal_cachep, sig); |
---|
896 | } |
---|
897 | |
---|
898 | static inline void cleanup_signal(struct task_struct *tsk) |
---|
899 | { |
---|
900 | struct signal_struct *sig = tsk->signal; |
---|
901 | |
---|
902 | atomic_dec(&sig->live); |
---|
903 | |
---|
904 | if (atomic_dec_and_test(&sig->count)) |
---|
905 | __cleanup_signal(sig); |
---|
906 | } |
---|
907 | |
---|
908 | static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) |
---|
909 | { |
---|
910 | unsigned long new_flags = p->flags; |
---|
911 | |
---|
912 | new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE); |
---|
913 | new_flags |= PF_FORKNOEXEC; |
---|
914 | if (!(clone_flags & CLONE_PTRACE)) |
---|
915 | p->ptrace = 0; |
---|
916 | p->flags = new_flags; |
---|
917 | } |
---|
918 | |
---|
919 | asmlinkage long sys_set_tid_address(int __user *tidptr) |
---|
920 | { |
---|
921 | current->clear_child_tid = tidptr; |
---|
922 | |
---|
923 | return current->pid; |
---|
924 | } |
---|
925 | |
---|
926 | static inline void rt_mutex_init_task(struct task_struct *p) |
---|
927 | { |
---|
928 | #ifdef CONFIG_RT_MUTEXES |
---|
929 | spin_lock_init(&p->pi_lock); |
---|
930 | plist_head_init(&p->pi_waiters, &p->pi_lock); |
---|
931 | p->pi_blocked_on = NULL; |
---|
932 | #endif |
---|
933 | } |
---|
934 | |
---|
935 | /* |
---|
936 | * This creates a new process as a copy of the old one, |
---|
937 | * but does not actually start it yet. |
---|
938 | * |
---|
939 | * It copies the registers, and all the appropriate |
---|
940 | * parts of the process environment (as per the clone |
---|
941 | * flags). The actual kick-off is left to the caller. |
---|
942 | */ |
---|
943 | static struct task_struct *copy_process(unsigned long clone_flags, |
---|
944 | unsigned long stack_start, |
---|
945 | struct pt_regs *regs, |
---|
946 | unsigned long stack_size, |
---|
947 | int __user *parent_tidptr, |
---|
948 | int __user *child_tidptr, |
---|
949 | int pid) |
---|
950 | { |
---|
951 | int retval; |
---|
952 | struct task_struct *p = NULL; |
---|
953 | |
---|
954 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) |
---|
955 | return ERR_PTR(-EINVAL); |
---|
956 | |
---|
957 | /* |
---|
958 | * Thread groups must share signals as well, and detached threads |
---|
959 | * can only be started up within the thread group. |
---|
960 | */ |
---|
961 | if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) |
---|
962 | return ERR_PTR(-EINVAL); |
---|
963 | |
---|
964 | /* |
---|
965 | * Shared signal handlers imply shared VM. By way of the above, |
---|
966 | * thread groups also imply shared VM. Blocking this case allows |
---|
967 | * for various simplifications in other code. |
---|
968 | */ |
---|
969 | if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) |
---|
970 | return ERR_PTR(-EINVAL); |
---|
971 | |
---|
972 | retval = security_task_create(clone_flags); |
---|
973 | if (retval) |
---|
974 | goto fork_out; |
---|
975 | |
---|
976 | retval = -ENOMEM; |
---|
977 | p = dup_task_struct(current); |
---|
978 | if (!p) |
---|
979 | goto fork_out; |
---|
980 | |
---|
981 | #ifdef CONFIG_TRACE_IRQFLAGS |
---|
982 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); |
---|
983 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
---|
984 | #endif |
---|
985 | retval = -EAGAIN; |
---|
986 | if (atomic_read(&p->user->processes) >= |
---|
987 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { |
---|
988 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && |
---|
989 | p->user != &root_user) |
---|
990 | goto bad_fork_free; |
---|
991 | } |
---|
992 | |
---|
993 | atomic_inc(&p->user->__count); |
---|
994 | atomic_inc(&p->user->processes); |
---|
995 | get_group_info(p->group_info); |
---|
996 | |
---|
997 | /* |
---|
998 | * If multiple threads are within copy_process(), then this check |
---|
999 | * triggers too late. This doesn't hurt, the check is only there |
---|
1000 | * to stop root fork bombs. |
---|
1001 | */ |
---|
1002 | if (nr_threads >= max_threads) |
---|
1003 | goto bad_fork_cleanup_count; |
---|
1004 | |
---|
1005 | if (!try_module_get(task_thread_info(p)->exec_domain->module)) |
---|
1006 | goto bad_fork_cleanup_count; |
---|
1007 | |
---|
1008 | if (p->binfmt && !try_module_get(p->binfmt->module)) |
---|
1009 | goto bad_fork_cleanup_put_domain; |
---|
1010 | |
---|
1011 | p->did_exec = 0; |
---|
1012 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ |
---|
1013 | copy_flags(clone_flags, p); |
---|
1014 | p->pid = pid; |
---|
1015 | retval = -EFAULT; |
---|
1016 | if (clone_flags & CLONE_PARENT_SETTID) |
---|
1017 | if (put_user(p->pid, parent_tidptr)) |
---|
1018 | goto bad_fork_cleanup_delays_binfmt; |
---|
1019 | |
---|
1020 | INIT_LIST_HEAD(&p->children); |
---|
1021 | INIT_LIST_HEAD(&p->sibling); |
---|
1022 | p->vfork_done = NULL; |
---|
1023 | spin_lock_init(&p->alloc_lock); |
---|
1024 | |
---|
1025 | clear_tsk_thread_flag(p, TIF_SIGPENDING); |
---|
1026 | init_sigpending(&p->pending); |
---|
1027 | |
---|
1028 | p->utime = cputime_zero; |
---|
1029 | p->stime = cputime_zero; |
---|
1030 | p->sched_time = 0; |
---|
1031 | p->rchar = 0; /* I/O counter: bytes read */ |
---|
1032 | p->wchar = 0; /* I/O counter: bytes written */ |
---|
1033 | p->syscr = 0; /* I/O counter: read syscalls */ |
---|
1034 | p->syscw = 0; /* I/O counter: write syscalls */ |
---|
1035 | acct_clear_integrals(p); |
---|
1036 | |
---|
1037 | p->it_virt_expires = cputime_zero; |
---|
1038 | p->it_prof_expires = cputime_zero; |
---|
1039 | p->it_sched_expires = 0; |
---|
1040 | INIT_LIST_HEAD(&p->cpu_timers[0]); |
---|
1041 | INIT_LIST_HEAD(&p->cpu_timers[1]); |
---|
1042 | INIT_LIST_HEAD(&p->cpu_timers[2]); |
---|
1043 | |
---|
1044 | p->lock_depth = -1; /* -1 = no lock */ |
---|
1045 | do_posix_clock_monotonic_gettime(&p->start_time); |
---|
1046 | p->security = NULL; |
---|
1047 | p->io_context = NULL; |
---|
1048 | p->io_wait = NULL; |
---|
1049 | p->audit_context = NULL; |
---|
1050 | cpuset_fork(p); |
---|
1051 | #ifdef CONFIG_NUMA |
---|
1052 | p->mempolicy = mpol_copy(p->mempolicy); |
---|
1053 | if (IS_ERR(p->mempolicy)) { |
---|
1054 | retval = PTR_ERR(p->mempolicy); |
---|
1055 | p->mempolicy = NULL; |
---|
1056 | goto bad_fork_cleanup_cpuset; |
---|
1057 | } |
---|
1058 | mpol_fix_fork_child_flag(p); |
---|
1059 | #endif |
---|
1060 | #ifdef CONFIG_TRACE_IRQFLAGS |
---|
1061 | p->irq_events = 0; |
---|
1062 | p->hardirqs_enabled = 0; |
---|
1063 | p->hardirq_enable_ip = 0; |
---|
1064 | p->hardirq_enable_event = 0; |
---|
1065 | p->hardirq_disable_ip = _THIS_IP_; |
---|
1066 | p->hardirq_disable_event = 0; |
---|
1067 | p->softirqs_enabled = 1; |
---|
1068 | p->softirq_enable_ip = _THIS_IP_; |
---|
1069 | p->softirq_enable_event = 0; |
---|
1070 | p->softirq_disable_ip = 0; |
---|
1071 | p->softirq_disable_event = 0; |
---|
1072 | p->hardirq_context = 0; |
---|
1073 | p->softirq_context = 0; |
---|
1074 | #endif |
---|
1075 | #ifdef CONFIG_LOCKDEP |
---|
1076 | p->lockdep_depth = 0; /* no locks held yet */ |
---|
1077 | p->curr_chain_key = 0; |
---|
1078 | p->lockdep_recursion = 0; |
---|
1079 | #endif |
---|
1080 | |
---|
1081 | rt_mutex_init_task(p); |
---|
1082 | |
---|
1083 | #ifdef CONFIG_DEBUG_MUTEXES |
---|
1084 | p->blocked_on = NULL; /* not blocked yet */ |
---|
1085 | #endif |
---|
1086 | |
---|
1087 | p->tgid = p->pid; |
---|
1088 | if (clone_flags & CLONE_THREAD) |
---|
1089 | p->tgid = current->tgid; |
---|
1090 | |
---|
1091 | if ((retval = security_task_alloc(p))) |
---|
1092 | goto bad_fork_cleanup_policy; |
---|
1093 | if ((retval = audit_alloc(p))) |
---|
1094 | goto bad_fork_cleanup_security; |
---|
1095 | /* copy all the process information */ |
---|
1096 | if ((retval = copy_semundo(clone_flags, p))) |
---|
1097 | goto bad_fork_cleanup_audit; |
---|
1098 | if ((retval = copy_files(clone_flags, p))) |
---|
1099 | goto bad_fork_cleanup_semundo; |
---|
1100 | if ((retval = copy_fs(clone_flags, p))) |
---|
1101 | goto bad_fork_cleanup_files; |
---|
1102 | if ((retval = copy_sighand(clone_flags, p))) |
---|
1103 | goto bad_fork_cleanup_fs; |
---|
1104 | if ((retval = copy_signal(clone_flags, p))) |
---|
1105 | goto bad_fork_cleanup_sighand; |
---|
1106 | if ((retval = copy_mm(clone_flags, p))) |
---|
1107 | goto bad_fork_cleanup_signal; |
---|
1108 | if ((retval = copy_keys(clone_flags, p))) |
---|
1109 | goto bad_fork_cleanup_mm; |
---|
1110 | if ((retval = copy_namespace(clone_flags, p))) |
---|
1111 | goto bad_fork_cleanup_keys; |
---|
1112 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); |
---|
1113 | if (retval) |
---|
1114 | goto bad_fork_cleanup_namespace; |
---|
1115 | |
---|
1116 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
---|
1117 | /* |
---|
1118 | * Clear TID on mm_release()? |
---|
1119 | */ |
---|
1120 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; |
---|
1121 | p->robust_list = NULL; |
---|
1122 | #ifdef CONFIG_COMPAT |
---|
1123 | p->compat_robust_list = NULL; |
---|
1124 | #endif |
---|
1125 | INIT_LIST_HEAD(&p->pi_state_list); |
---|
1126 | p->pi_state_cache = NULL; |
---|
1127 | |
---|
1128 | /* |
---|
1129 | * sigaltstack should be cleared when sharing the same VM |
---|
1130 | */ |
---|
1131 | if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) |
---|
1132 | p->sas_ss_sp = p->sas_ss_size = 0; |
---|
1133 | |
---|
1134 | /* |
---|
1135 | * Syscall tracing should be turned off in the child regardless |
---|
1136 | * of CLONE_PTRACE. |
---|
1137 | */ |
---|
1138 | clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); |
---|
1139 | #ifdef TIF_SYSCALL_EMU |
---|
1140 | clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); |
---|
1141 | #endif |
---|
1142 | |
---|
1143 | /* Our parent execution domain becomes current domain |
---|
1144 | These must match for thread signalling to apply */ |
---|
1145 | |
---|
1146 | p->parent_exec_id = p->self_exec_id; |
---|
1147 | |
---|
1148 | /* ok, now we should be set up.. */ |
---|
1149 | p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); |
---|
1150 | p->pdeath_signal = 0; |
---|
1151 | p->exit_state = 0; |
---|
1152 | |
---|
1153 | /* |
---|
1154 | * Ok, make it visible to the rest of the system. |
---|
1155 | * We dont wake it up yet. |
---|
1156 | */ |
---|
1157 | p->group_leader = p; |
---|
1158 | INIT_LIST_HEAD(&p->thread_group); |
---|
1159 | INIT_LIST_HEAD(&p->ptrace_children); |
---|
1160 | INIT_LIST_HEAD(&p->ptrace_list); |
---|
1161 | |
---|
1162 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
---|
1163 | sched_fork(p, clone_flags); |
---|
1164 | |
---|
1165 | /* Need tasklist lock for parent etc handling! */ |
---|
1166 | write_lock_irq(&tasklist_lock); |
---|
1167 | |
---|
1168 | /* |
---|
1169 | * The task hasn't been attached yet, so its cpus_allowed mask will |
---|
1170 | * not be changed, nor will its assigned CPU. |
---|
1171 | * |
---|
1172 | * The cpus_allowed mask of the parent may have changed after it was |
---|
1173 | * copied first time - so re-copy it here, then check the child's CPU |
---|
1174 | * to ensure it is on a valid CPU (and if not, just force it back to |
---|
1175 | * parent's CPU). This avoids alot of nasty races. |
---|
1176 | */ |
---|
1177 | p->cpus_allowed = current->cpus_allowed; |
---|
1178 | if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || |
---|
1179 | !cpu_online(task_cpu(p)))) |
---|
1180 | set_task_cpu(p, smp_processor_id()); |
---|
1181 | |
---|
1182 | /* CLONE_PARENT re-uses the old parent */ |
---|
1183 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) |
---|
1184 | p->real_parent = current->real_parent; |
---|
1185 | else |
---|
1186 | p->real_parent = current; |
---|
1187 | p->parent = p->real_parent; |
---|
1188 | |
---|
1189 | spin_lock(¤t->sighand->siglock); |
---|
1190 | |
---|
1191 | /* |
---|
1192 | * Process group and session signals need to be delivered to just the |
---|
1193 | * parent before the fork or both the parent and the child after the |
---|
1194 | * fork. Restart if a signal comes in before we add the new process to |
---|
1195 | * it's process group. |
---|
1196 | * A fatal signal pending means that current will exit, so the new |
---|
1197 | * thread can't slip out of an OOM kill (or normal SIGKILL). |
---|
1198 | */ |
---|
1199 | recalc_sigpending(); |
---|
1200 | if (signal_pending(current)) { |
---|
1201 | spin_unlock(¤t->sighand->siglock); |
---|
1202 | write_unlock_irq(&tasklist_lock); |
---|
1203 | retval = -ERESTARTNOINTR; |
---|
1204 | goto bad_fork_cleanup_namespace; |
---|
1205 | } |
---|
1206 | |
---|
1207 | if (clone_flags & CLONE_THREAD) { |
---|
1208 | p->group_leader = current->group_leader; |
---|
1209 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
---|
1210 | |
---|
1211 | if (!cputime_eq(current->signal->it_virt_expires, |
---|
1212 | cputime_zero) || |
---|
1213 | !cputime_eq(current->signal->it_prof_expires, |
---|
1214 | cputime_zero) || |
---|
1215 | current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY || |
---|
1216 | !list_empty(¤t->signal->cpu_timers[0]) || |
---|
1217 | !list_empty(¤t->signal->cpu_timers[1]) || |
---|
1218 | !list_empty(¤t->signal->cpu_timers[2])) { |
---|
1219 | /* |
---|
1220 | * Have child wake up on its first tick to check |
---|
1221 | * for process CPU timers. |
---|
1222 | */ |
---|
1223 | p->it_prof_expires = jiffies_to_cputime(1); |
---|
1224 | } |
---|
1225 | } |
---|
1226 | |
---|
1227 | /* |
---|
1228 | * inherit ioprio |
---|
1229 | */ |
---|
1230 | p->ioprio = current->ioprio; |
---|
1231 | |
---|
1232 | if (likely(p->pid)) { |
---|
1233 | add_parent(p); |
---|
1234 | if (unlikely(p->ptrace & PT_PTRACED)) |
---|
1235 | __ptrace_link(p, current->parent); |
---|
1236 | |
---|
1237 | if (thread_group_leader(p)) { |
---|
1238 | p->signal->tty = current->signal->tty; |
---|
1239 | p->signal->pgrp = process_group(current); |
---|
1240 | p->signal->session = current->signal->session; |
---|
1241 | attach_pid(p, PIDTYPE_PGID, process_group(p)); |
---|
1242 | attach_pid(p, PIDTYPE_SID, p->signal->session); |
---|
1243 | |
---|
1244 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
---|
1245 | __get_cpu_var(process_counts)++; |
---|
1246 | } |
---|
1247 | attach_pid(p, PIDTYPE_PID, p->pid); |
---|
1248 | nr_threads++; |
---|
1249 | } |
---|
1250 | |
---|
1251 | total_forks++; |
---|
1252 | spin_unlock(¤t->sighand->siglock); |
---|
1253 | write_unlock_irq(&tasklist_lock); |
---|
1254 | proc_fork_connector(p); |
---|
1255 | return p; |
---|
1256 | |
---|
1257 | bad_fork_cleanup_namespace: |
---|
1258 | exit_namespace(p); |
---|
1259 | bad_fork_cleanup_keys: |
---|
1260 | exit_keys(p); |
---|
1261 | bad_fork_cleanup_mm: |
---|
1262 | if (p->mm) |
---|
1263 | mmput(p->mm); |
---|
1264 | bad_fork_cleanup_signal: |
---|
1265 | cleanup_signal(p); |
---|
1266 | bad_fork_cleanup_sighand: |
---|
1267 | __cleanup_sighand(p->sighand); |
---|
1268 | bad_fork_cleanup_fs: |
---|
1269 | exit_fs(p); /* blocking */ |
---|
1270 | bad_fork_cleanup_files: |
---|
1271 | exit_files(p); /* blocking */ |
---|
1272 | bad_fork_cleanup_semundo: |
---|
1273 | exit_sem(p); |
---|
1274 | bad_fork_cleanup_audit: |
---|
1275 | audit_free(p); |
---|
1276 | bad_fork_cleanup_security: |
---|
1277 | security_task_free(p); |
---|
1278 | bad_fork_cleanup_policy: |
---|
1279 | #ifdef CONFIG_NUMA |
---|
1280 | mpol_free(p->mempolicy); |
---|
1281 | bad_fork_cleanup_cpuset: |
---|
1282 | #endif |
---|
1283 | cpuset_exit(p); |
---|
1284 | bad_fork_cleanup_delays_binfmt: |
---|
1285 | delayacct_tsk_free(p); |
---|
1286 | if (p->binfmt) |
---|
1287 | module_put(p->binfmt->module); |
---|
1288 | bad_fork_cleanup_put_domain: |
---|
1289 | module_put(task_thread_info(p)->exec_domain->module); |
---|
1290 | bad_fork_cleanup_count: |
---|
1291 | put_group_info(p->group_info); |
---|
1292 | atomic_dec(&p->user->processes); |
---|
1293 | free_uid(p->user); |
---|
1294 | bad_fork_free: |
---|
1295 | free_task(p); |
---|
1296 | fork_out: |
---|
1297 | return ERR_PTR(retval); |
---|
1298 | } |
---|
1299 | |
---|
1300 | struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) |
---|
1301 | { |
---|
1302 | memset(regs, 0, sizeof(struct pt_regs)); |
---|
1303 | return regs; |
---|
1304 | } |
---|
1305 | |
---|
1306 | struct task_struct * __devinit fork_idle(int cpu) |
---|
1307 | { |
---|
1308 | struct task_struct *task; |
---|
1309 | struct pt_regs regs; |
---|
1310 | |
---|
1311 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); |
---|
1312 | if (!task) |
---|
1313 | return ERR_PTR(-ENOMEM); |
---|
1314 | init_idle(task, cpu); |
---|
1315 | |
---|
1316 | return task; |
---|
1317 | } |
---|
1318 | |
---|
1319 | static inline int fork_traceflag (unsigned clone_flags) |
---|
1320 | { |
---|
1321 | if (clone_flags & CLONE_UNTRACED) |
---|
1322 | return 0; |
---|
1323 | else if (clone_flags & CLONE_VFORK) { |
---|
1324 | if (current->ptrace & PT_TRACE_VFORK) |
---|
1325 | return PTRACE_EVENT_VFORK; |
---|
1326 | } else if ((clone_flags & CSIGNAL) != SIGCHLD) { |
---|
1327 | if (current->ptrace & PT_TRACE_CLONE) |
---|
1328 | return PTRACE_EVENT_CLONE; |
---|
1329 | } else if (current->ptrace & PT_TRACE_FORK) |
---|
1330 | return PTRACE_EVENT_FORK; |
---|
1331 | |
---|
1332 | return 0; |
---|
1333 | } |
---|
1334 | |
---|
1335 | /* |
---|
1336 | * Ok, this is the main fork-routine. |
---|
1337 | * |
---|
1338 | * It copies the process, and if successful kick-starts |
---|
1339 | * it and waits for it to finish using the VM if required. |
---|
1340 | */ |
---|
1341 | long do_fork(unsigned long clone_flags, |
---|
1342 | unsigned long stack_start, |
---|
1343 | struct pt_regs *regs, |
---|
1344 | unsigned long stack_size, |
---|
1345 | int __user *parent_tidptr, |
---|
1346 | int __user *child_tidptr) |
---|
1347 | { |
---|
1348 | struct task_struct *p; |
---|
1349 | int trace = 0; |
---|
1350 | struct pid *pid = alloc_pid(); |
---|
1351 | long nr; |
---|
1352 | |
---|
1353 | if (!pid) |
---|
1354 | return -EAGAIN; |
---|
1355 | nr = pid->nr; |
---|
1356 | if (unlikely(current->ptrace)) { |
---|
1357 | trace = fork_traceflag (clone_flags); |
---|
1358 | if (trace) |
---|
1359 | clone_flags |= CLONE_PTRACE; |
---|
1360 | } |
---|
1361 | |
---|
1362 | p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr); |
---|
1363 | /* |
---|
1364 | * Do this prior waking up the new thread - the thread pointer |
---|
1365 | * might get invalid after that point, if the thread exits quickly. |
---|
1366 | */ |
---|
1367 | if (!IS_ERR(p)) { |
---|
1368 | struct completion vfork; |
---|
1369 | |
---|
1370 | if (clone_flags & CLONE_VFORK) { |
---|
1371 | p->vfork_done = &vfork; |
---|
1372 | init_completion(&vfork); |
---|
1373 | } |
---|
1374 | |
---|
1375 | if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { |
---|
1376 | /* |
---|
1377 | * We'll start up with an immediate SIGSTOP. |
---|
1378 | */ |
---|
1379 | sigaddset(&p->pending.signal, SIGSTOP); |
---|
1380 | set_tsk_thread_flag(p, TIF_SIGPENDING); |
---|
1381 | } |
---|
1382 | |
---|
1383 | if (!(clone_flags & CLONE_STOPPED)) |
---|
1384 | wake_up_new_task(p, clone_flags); |
---|
1385 | else |
---|
1386 | p->state = TASK_STOPPED; |
---|
1387 | |
---|
1388 | if (unlikely (trace)) { |
---|
1389 | current->ptrace_message = nr; |
---|
1390 | ptrace_notify ((trace << 8) | SIGTRAP); |
---|
1391 | } |
---|
1392 | |
---|
1393 | if (clone_flags & CLONE_VFORK) { |
---|
1394 | wait_for_completion(&vfork); |
---|
1395 | if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { |
---|
1396 | current->ptrace_message = nr; |
---|
1397 | ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); |
---|
1398 | } |
---|
1399 | } |
---|
1400 | } else { |
---|
1401 | free_pid(pid); |
---|
1402 | nr = PTR_ERR(p); |
---|
1403 | } |
---|
1404 | return nr; |
---|
1405 | } |
---|
1406 | |
---|
1407 | #ifndef ARCH_MIN_MMSTRUCT_ALIGN |
---|
1408 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 |
---|
1409 | #endif |
---|
1410 | |
---|
1411 | static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) |
---|
1412 | { |
---|
1413 | struct sighand_struct *sighand = data; |
---|
1414 | |
---|
1415 | if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == |
---|
1416 | SLAB_CTOR_CONSTRUCTOR) |
---|
1417 | spin_lock_init(&sighand->siglock); |
---|
1418 | } |
---|
1419 | |
---|
1420 | void __init proc_caches_init(void) |
---|
1421 | { |
---|
1422 | sighand_cachep = kmem_cache_create("sighand_cache", |
---|
1423 | sizeof(struct sighand_struct), 0, |
---|
1424 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, |
---|
1425 | sighand_ctor, NULL); |
---|
1426 | signal_cachep = kmem_cache_create("signal_cache", |
---|
1427 | sizeof(struct signal_struct), 0, |
---|
1428 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
---|
1429 | files_cachep = kmem_cache_create("files_cache", |
---|
1430 | sizeof(struct files_struct), 0, |
---|
1431 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
---|
1432 | fs_cachep = kmem_cache_create("fs_cache", |
---|
1433 | sizeof(struct fs_struct), 0, |
---|
1434 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
---|
1435 | vm_area_cachep = kmem_cache_create("vm_area_struct", |
---|
1436 | sizeof(struct vm_area_struct), 0, |
---|
1437 | SLAB_PANIC, NULL, NULL); |
---|
1438 | mm_cachep = kmem_cache_create("mm_struct", |
---|
1439 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
---|
1440 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
---|
1441 | } |
---|
1442 | |
---|
1443 | |
---|
1444 | /* |
---|
1445 | * Check constraints on flags passed to the unshare system call and |
---|
1446 | * force unsharing of additional process context as appropriate. |
---|
1447 | */ |
---|
1448 | static inline void check_unshare_flags(unsigned long *flags_ptr) |
---|
1449 | { |
---|
1450 | /* |
---|
1451 | * If unsharing a thread from a thread group, must also |
---|
1452 | * unshare vm. |
---|
1453 | */ |
---|
1454 | if (*flags_ptr & CLONE_THREAD) |
---|
1455 | *flags_ptr |= CLONE_VM; |
---|
1456 | |
---|
1457 | /* |
---|
1458 | * If unsharing vm, must also unshare signal handlers. |
---|
1459 | */ |
---|
1460 | if (*flags_ptr & CLONE_VM) |
---|
1461 | *flags_ptr |= CLONE_SIGHAND; |
---|
1462 | |
---|
1463 | /* |
---|
1464 | * If unsharing signal handlers and the task was created |
---|
1465 | * using CLONE_THREAD, then must unshare the thread |
---|
1466 | */ |
---|
1467 | if ((*flags_ptr & CLONE_SIGHAND) && |
---|
1468 | (atomic_read(¤t->signal->count) > 1)) |
---|
1469 | *flags_ptr |= CLONE_THREAD; |
---|
1470 | |
---|
1471 | /* |
---|
1472 | * If unsharing namespace, must also unshare filesystem information. |
---|
1473 | */ |
---|
1474 | if (*flags_ptr & CLONE_NEWNS) |
---|
1475 | *flags_ptr |= CLONE_FS; |
---|
1476 | } |
---|
1477 | |
---|
1478 | /* |
---|
1479 | * Unsharing of tasks created with CLONE_THREAD is not supported yet |
---|
1480 | */ |
---|
1481 | static int unshare_thread(unsigned long unshare_flags) |
---|
1482 | { |
---|
1483 | if (unshare_flags & CLONE_THREAD) |
---|
1484 | return -EINVAL; |
---|
1485 | |
---|
1486 | return 0; |
---|
1487 | } |
---|
1488 | |
---|
1489 | /* |
---|
1490 | * Unshare the filesystem structure if it is being shared |
---|
1491 | */ |
---|
1492 | static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
---|
1493 | { |
---|
1494 | struct fs_struct *fs = current->fs; |
---|
1495 | |
---|
1496 | if ((unshare_flags & CLONE_FS) && |
---|
1497 | (fs && atomic_read(&fs->count) > 1)) { |
---|
1498 | *new_fsp = __copy_fs_struct(current->fs); |
---|
1499 | if (!*new_fsp) |
---|
1500 | return -ENOMEM; |
---|
1501 | } |
---|
1502 | |
---|
1503 | return 0; |
---|
1504 | } |
---|
1505 | |
---|
1506 | /* |
---|
1507 | * Unshare the namespace structure if it is being shared |
---|
1508 | */ |
---|
1509 | static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs) |
---|
1510 | { |
---|
1511 | struct namespace *ns = current->namespace; |
---|
1512 | |
---|
1513 | if ((unshare_flags & CLONE_NEWNS) && |
---|
1514 | (ns && atomic_read(&ns->count) > 1)) { |
---|
1515 | if (!capable(CAP_SYS_ADMIN)) |
---|
1516 | return -EPERM; |
---|
1517 | |
---|
1518 | *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs); |
---|
1519 | if (!*new_nsp) |
---|
1520 | return -ENOMEM; |
---|
1521 | } |
---|
1522 | |
---|
1523 | return 0; |
---|
1524 | } |
---|
1525 | |
---|
1526 | /* |
---|
1527 | * Unsharing of sighand for tasks created with CLONE_SIGHAND is not |
---|
1528 | * supported yet |
---|
1529 | */ |
---|
1530 | static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) |
---|
1531 | { |
---|
1532 | struct sighand_struct *sigh = current->sighand; |
---|
1533 | |
---|
1534 | if ((unshare_flags & CLONE_SIGHAND) && |
---|
1535 | (sigh && atomic_read(&sigh->count) > 1)) |
---|
1536 | return -EINVAL; |
---|
1537 | else |
---|
1538 | return 0; |
---|
1539 | } |
---|
1540 | |
---|
1541 | /* |
---|
1542 | * Unshare vm if it is being shared |
---|
1543 | */ |
---|
1544 | static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) |
---|
1545 | { |
---|
1546 | struct mm_struct *mm = current->mm; |
---|
1547 | |
---|
1548 | if ((unshare_flags & CLONE_VM) && |
---|
1549 | (mm && atomic_read(&mm->mm_users) > 1)) { |
---|
1550 | return -EINVAL; |
---|
1551 | } |
---|
1552 | |
---|
1553 | return 0; |
---|
1554 | } |
---|
1555 | |
---|
1556 | /* |
---|
1557 | * Unshare file descriptor table if it is being shared |
---|
1558 | */ |
---|
1559 | static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) |
---|
1560 | { |
---|
1561 | struct files_struct *fd = current->files; |
---|
1562 | int error = 0; |
---|
1563 | |
---|
1564 | if ((unshare_flags & CLONE_FILES) && |
---|
1565 | (fd && atomic_read(&fd->count) > 1)) { |
---|
1566 | *new_fdp = dup_fd(fd, &error); |
---|
1567 | if (!*new_fdp) |
---|
1568 | return error; |
---|
1569 | } |
---|
1570 | |
---|
1571 | return 0; |
---|
1572 | } |
---|
1573 | |
---|
1574 | /* |
---|
1575 | * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not |
---|
1576 | * supported yet |
---|
1577 | */ |
---|
1578 | static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp) |
---|
1579 | { |
---|
1580 | if (unshare_flags & CLONE_SYSVSEM) |
---|
1581 | return -EINVAL; |
---|
1582 | |
---|
1583 | return 0; |
---|
1584 | } |
---|
1585 | |
---|
1586 | /* |
---|
1587 | * unshare allows a process to 'unshare' part of the process |
---|
1588 | * context which was originally shared using clone. copy_* |
---|
1589 | * functions used by do_fork() cannot be used here directly |
---|
1590 | * because they modify an inactive task_struct that is being |
---|
1591 | * constructed. Here we are modifying the current, active, |
---|
1592 | * task_struct. |
---|
1593 | */ |
---|
1594 | asmlinkage long sys_unshare(unsigned long unshare_flags) |
---|
1595 | { |
---|
1596 | int err = 0; |
---|
1597 | struct fs_struct *fs, *new_fs = NULL; |
---|
1598 | struct namespace *ns, *new_ns = NULL; |
---|
1599 | struct sighand_struct *sigh, *new_sigh = NULL; |
---|
1600 | struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; |
---|
1601 | struct files_struct *fd, *new_fd = NULL; |
---|
1602 | struct sem_undo_list *new_ulist = NULL; |
---|
1603 | |
---|
1604 | check_unshare_flags(&unshare_flags); |
---|
1605 | |
---|
1606 | /* Return -EINVAL for all unsupported flags */ |
---|
1607 | err = -EINVAL; |
---|
1608 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| |
---|
1609 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM)) |
---|
1610 | goto bad_unshare_out; |
---|
1611 | |
---|
1612 | if ((err = unshare_thread(unshare_flags))) |
---|
1613 | goto bad_unshare_out; |
---|
1614 | if ((err = unshare_fs(unshare_flags, &new_fs))) |
---|
1615 | goto bad_unshare_cleanup_thread; |
---|
1616 | if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs))) |
---|
1617 | goto bad_unshare_cleanup_fs; |
---|
1618 | if ((err = unshare_sighand(unshare_flags, &new_sigh))) |
---|
1619 | goto bad_unshare_cleanup_ns; |
---|
1620 | if ((err = unshare_vm(unshare_flags, &new_mm))) |
---|
1621 | goto bad_unshare_cleanup_sigh; |
---|
1622 | if ((err = unshare_fd(unshare_flags, &new_fd))) |
---|
1623 | goto bad_unshare_cleanup_vm; |
---|
1624 | if ((err = unshare_semundo(unshare_flags, &new_ulist))) |
---|
1625 | goto bad_unshare_cleanup_fd; |
---|
1626 | |
---|
1627 | if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist) { |
---|
1628 | |
---|
1629 | task_lock(current); |
---|
1630 | |
---|
1631 | if (new_fs) { |
---|
1632 | fs = current->fs; |
---|
1633 | current->fs = new_fs; |
---|
1634 | new_fs = fs; |
---|
1635 | } |
---|
1636 | |
---|
1637 | if (new_ns) { |
---|
1638 | ns = current->namespace; |
---|
1639 | current->namespace = new_ns; |
---|
1640 | new_ns = ns; |
---|
1641 | } |
---|
1642 | |
---|
1643 | if (new_sigh) { |
---|
1644 | sigh = current->sighand; |
---|
1645 | rcu_assign_pointer(current->sighand, new_sigh); |
---|
1646 | new_sigh = sigh; |
---|
1647 | } |
---|
1648 | |
---|
1649 | if (new_mm) { |
---|
1650 | mm = current->mm; |
---|
1651 | active_mm = current->active_mm; |
---|
1652 | current->mm = new_mm; |
---|
1653 | current->active_mm = new_mm; |
---|
1654 | activate_mm(active_mm, new_mm); |
---|
1655 | new_mm = mm; |
---|
1656 | } |
---|
1657 | |
---|
1658 | if (new_fd) { |
---|
1659 | fd = current->files; |
---|
1660 | current->files = new_fd; |
---|
1661 | new_fd = fd; |
---|
1662 | } |
---|
1663 | |
---|
1664 | task_unlock(current); |
---|
1665 | } |
---|
1666 | |
---|
1667 | bad_unshare_cleanup_fd: |
---|
1668 | if (new_fd) |
---|
1669 | put_files_struct(new_fd); |
---|
1670 | |
---|
1671 | bad_unshare_cleanup_vm: |
---|
1672 | if (new_mm) |
---|
1673 | mmput(new_mm); |
---|
1674 | |
---|
1675 | bad_unshare_cleanup_sigh: |
---|
1676 | if (new_sigh) |
---|
1677 | if (atomic_dec_and_test(&new_sigh->count)) |
---|
1678 | kmem_cache_free(sighand_cachep, new_sigh); |
---|
1679 | |
---|
1680 | bad_unshare_cleanup_ns: |
---|
1681 | if (new_ns) |
---|
1682 | put_namespace(new_ns); |
---|
1683 | |
---|
1684 | bad_unshare_cleanup_fs: |
---|
1685 | if (new_fs) |
---|
1686 | put_fs_struct(new_fs); |
---|
1687 | |
---|
1688 | bad_unshare_cleanup_thread: |
---|
1689 | bad_unshare_out: |
---|
1690 | return err; |
---|
1691 | } |
---|