00001
00002
00003
00004
00005
00006
00007 #include <linux/mm.h>
00008 #include <linux/slab.h>
00009 #include <linux/interrupt.h>
00010 #include <linux/module.h>
00011 #include <linux/capability.h>
00012 #include <linux/completion.h>
00013 #include <linux/personality.h>
00014 #include <linux/tty.h>
00015 #include <linux/mnt_namespace.h>
00016 #include <linux/iocontext.h>
00017 #include <linux/key.h>
00018 #include <linux/security.h>
00019 #include <linux/cpu.h>
00020 #include <linux/acct.h>
00021 #include <linux/tsacct_kern.h>
00022 #include <linux/file.h>
00023 #include <linux/fdtable.h>
00024 #include <linux/binfmts.h>
00025 #include <linux/nsproxy.h>
00026 #include <linux/pid_namespace.h>
00027 #include <linux/ptrace.h>
00028 #include <linux/profile.h>
00029 #include <linux/mount.h>
00030 #include <linux/proc_fs.h>
00031 #include <linux/kthread.h>
00032 #include <linux/mempolicy.h>
00033 #include <linux/taskstats_kern.h>
00034 #include <linux/delayacct.h>
00035 #include <linux/freezer.h>
00036 #include <linux/cgroup.h>
00037 #include <linux/syscalls.h>
00038 #include <linux/signal.h>
00039 #include <linux/posix-timers.h>
00040 #include <linux/cn_proc.h>
00041 #include <linux/mutex.h>
00042 #include <linux/futex.h>
00043 #include <linux/pipe_fs_i.h>
00044 #include <linux/audit.h>
00045 #include <linux/resource.h>
00046 #include <linux/blkdev.h>
00047 #include <linux/task_io_accounting_ops.h>
00048 #include <linux/tracehook.h>
00049 #include <linux/init_task.h>
00050 #include <trace/sched.h>
00051
00052 #include <asm/uaccess.h>
00053 #include <asm/unistd.h>
00054 #include <asm/pgtable.h>
00055 #include <asm/mmu_context.h>
00056 #include "cred-internals.h"
00057
00058 DEFINE_TRACE(sched_process_free);
00059 DEFINE_TRACE(sched_process_exit);
00060 DEFINE_TRACE(sched_process_wait);
00061
00062 #ifndef DDE_LINUX
00063 static void exit_mm(struct task_struct * tsk);
00064
00065 static inline int task_detached(struct task_struct *p)
00066 {
00067 return p->exit_signal == -1;
00068 }
00069
00070 static void __unhash_process(struct task_struct *p)
00071 {
00072 nr_threads--;
00073 detach_pid(p, PIDTYPE_PID);
00074 if (thread_group_leader(p)) {
00075 detach_pid(p, PIDTYPE_PGID);
00076 detach_pid(p, PIDTYPE_SID);
00077
00078 list_del_rcu(&p->tasks);
00079 __get_cpu_var(process_counts)--;
00080 }
00081 list_del_rcu(&p->thread_group);
00082 list_del_init(&p->sibling);
00083 }
00084
00085
00086
00087
00088 static void __exit_signal(struct task_struct *tsk)
00089 {
00090 struct signal_struct *sig = tsk->signal;
00091 struct sighand_struct *sighand;
00092
00093 BUG_ON(!sig);
00094 BUG_ON(!atomic_read(&sig->count));
00095
00096 sighand = rcu_dereference(tsk->sighand);
00097 spin_lock(&sighand->siglock);
00098
00099 posix_cpu_timers_exit(tsk);
00100 if (atomic_dec_and_test(&sig->count))
00101 posix_cpu_timers_exit_group(tsk);
00102 else {
00103
00104
00105
00106
00107 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
00108 wake_up_process(sig->group_exit_task);
00109
00110 if (tsk == sig->curr_target)
00111 sig->curr_target = next_thread(tsk);
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122 sig->utime = cputime_add(sig->utime, task_utime(tsk));
00123 sig->stime = cputime_add(sig->stime, task_stime(tsk));
00124 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
00125 sig->min_flt += tsk->min_flt;
00126 sig->maj_flt += tsk->maj_flt;
00127 sig->nvcsw += tsk->nvcsw;
00128 sig->nivcsw += tsk->nivcsw;
00129 sig->inblock += task_io_get_inblock(tsk);
00130 sig->oublock += task_io_get_oublock(tsk);
00131 task_io_accounting_add(&sig->ioac, &tsk->ioac);
00132 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
00133 sig = NULL;
00134 }
00135
00136 __unhash_process(tsk);
00137
00138
00139
00140
00141
00142 flush_sigqueue(&tsk->pending);
00143
00144 tsk->signal = NULL;
00145 tsk->sighand = NULL;
00146 spin_unlock(&sighand->siglock);
00147
00148 __cleanup_sighand(sighand);
00149 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
00150 if (sig) {
00151 flush_sigqueue(&sig->shared_pending);
00152 taskstats_tgid_free(sig);
00153
00154
00155
00156
00157 task_rq_unlock_wait(tsk);
00158 __cleanup_signal(sig);
00159 }
00160 }
00161
00162 static void delayed_put_task_struct(struct rcu_head *rhp)
00163 {
00164 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
00165
00166 trace_sched_process_free(tsk);
00167 put_task_struct(tsk);
00168 }
00169
00170
00171 void release_task(struct task_struct * p)
00172 {
00173 struct task_struct *leader;
00174 int zap_leader;
00175 repeat:
00176 tracehook_prepare_release_task(p);
00177
00178
00179 atomic_dec(&__task_cred(p)->user->processes);
00180
00181 proc_flush_task(p);
00182 write_lock_irq(&tasklist_lock);
00183 tracehook_finish_release_task(p);
00184 __exit_signal(p);
00185
00186
00187
00188
00189
00190
00191 zap_leader = 0;
00192 leader = p->group_leader;
00193 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
00194 BUG_ON(task_detached(leader));
00195 do_notify_parent(leader, leader->exit_signal);
00196
00197
00198
00199
00200
00201
00202
00203
00204 zap_leader = task_detached(leader);
00205
00206
00207
00208
00209
00210 if (zap_leader)
00211 leader->exit_state = EXIT_DEAD;
00212 }
00213
00214 write_unlock_irq(&tasklist_lock);
00215 release_thread(p);
00216 call_rcu(&p->rcu, delayed_put_task_struct);
00217
00218 p = leader;
00219 if (unlikely(zap_leader))
00220 goto repeat;
00221 }
00222
00223
00224
00225
00226
00227
00228
00229
00230 struct pid *session_of_pgrp(struct pid *pgrp)
00231 {
00232 struct task_struct *p;
00233 struct pid *sid = NULL;
00234
00235 p = pid_task(pgrp, PIDTYPE_PGID);
00236 if (p == NULL)
00237 p = pid_task(pgrp, PIDTYPE_PID);
00238 if (p != NULL)
00239 sid = task_session(p);
00240
00241 return sid;
00242 }
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
00253 {
00254 struct task_struct *p;
00255
00256 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
00257 if ((p == ignored_task) ||
00258 (p->exit_state && thread_group_empty(p)) ||
00259 is_global_init(p->real_parent))
00260 continue;
00261
00262 if (task_pgrp(p->real_parent) != pgrp &&
00263 task_session(p->real_parent) == task_session(p))
00264 return 0;
00265 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
00266
00267 return 1;
00268 }
00269
00270 int is_current_pgrp_orphaned(void)
00271 {
00272 int retval;
00273
00274 read_lock(&tasklist_lock);
00275 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
00276 read_unlock(&tasklist_lock);
00277
00278 return retval;
00279 }
00280
00281 static int has_stopped_jobs(struct pid *pgrp)
00282 {
00283 int retval = 0;
00284 struct task_struct *p;
00285
00286 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
00287 if (!task_is_stopped(p))
00288 continue;
00289 retval = 1;
00290 break;
00291 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
00292 return retval;
00293 }
00294
00295
00296
00297
00298
00299
00300 static void
00301 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
00302 {
00303 struct pid *pgrp = task_pgrp(tsk);
00304 struct task_struct *ignored_task = tsk;
00305
00306 if (!parent)
00307
00308
00309
00310 parent = tsk->real_parent;
00311 else
00312
00313
00314
00315 ignored_task = NULL;
00316
00317 if (task_pgrp(parent) != pgrp &&
00318 task_session(parent) == task_session(tsk) &&
00319 will_become_orphaned_pgrp(pgrp, ignored_task) &&
00320 has_stopped_jobs(pgrp)) {
00321 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
00322 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
00323 }
00324 }
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338 static void reparent_to_kthreadd(void)
00339 {
00340 write_lock_irq(&tasklist_lock);
00341
00342 ptrace_unlink(current);
00343
00344 current->real_parent = current->parent = kthreadd_task;
00345 list_move_tail(¤t->sibling, ¤t->real_parent->children);
00346
00347
00348 current->exit_signal = SIGCHLD;
00349
00350 if (task_nice(current) < 0)
00351 set_user_nice(current, 0);
00352
00353
00354
00355 memcpy(current->signal->rlim, init_task.signal->rlim,
00356 sizeof(current->signal->rlim));
00357
00358 #ifndef DDE_LINUX
00359 atomic_inc(&init_cred.usage);
00360 commit_creds(&init_cred);
00361 #endif
00362 write_unlock_irq(&tasklist_lock);
00363 }
00364
00365 void __set_special_pids(struct pid *pid)
00366 {
00367 struct task_struct *curr = current->group_leader;
00368 pid_t nr = pid_nr(pid);
00369
00370 if (task_session(curr) != pid) {
00371 change_pid(curr, PIDTYPE_SID, pid);
00372 set_task_session(curr, nr);
00373 }
00374 if (task_pgrp(curr) != pid) {
00375 change_pid(curr, PIDTYPE_PGID, pid);
00376 set_task_pgrp(curr, nr);
00377 }
00378 }
00379
00380 static void set_special_pids(struct pid *pid)
00381 {
00382 write_lock_irq(&tasklist_lock);
00383 __set_special_pids(pid);
00384 write_unlock_irq(&tasklist_lock);
00385 }
00386
00387
00388
00389
00390
00391
00392 int allow_signal(int sig)
00393 {
00394 if (!valid_signal(sig) || sig < 1)
00395 return -EINVAL;
00396
00397 spin_lock_irq(¤t->sighand->siglock);
00398 sigdelset(¤t->blocked, sig);
00399 if (!current->mm) {
00400
00401
00402
00403
00404 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
00405 }
00406 recalc_sigpending();
00407 spin_unlock_irq(¤t->sighand->siglock);
00408 return 0;
00409 }
00410
00411 EXPORT_SYMBOL(allow_signal);
00412
00413 int disallow_signal(int sig)
00414 {
00415 if (!valid_signal(sig) || sig < 1)
00416 return -EINVAL;
00417
00418 spin_lock_irq(¤t->sighand->siglock);
00419 current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
00420 recalc_sigpending();
00421 spin_unlock_irq(¤t->sighand->siglock);
00422 return 0;
00423 }
00424
00425 EXPORT_SYMBOL(disallow_signal);
00426
00427
00428
00429
00430
00431
00432 void daemonize(const char *name, ...)
00433 {
00434 va_list args;
00435 struct fs_struct *fs;
00436 sigset_t blocked;
00437
00438 va_start(args, name);
00439 vsnprintf(current->comm, sizeof(current->comm), name, args);
00440 va_end(args);
00441
00442
00443
00444
00445
00446
00447 exit_mm(current);
00448
00449
00450
00451
00452 current->flags |= (PF_NOFREEZE | PF_KTHREAD);
00453
00454 if (current->nsproxy != &init_nsproxy) {
00455 get_nsproxy(&init_nsproxy);
00456 switch_task_namespaces(current, &init_nsproxy);
00457 }
00458 set_special_pids(&init_struct_pid);
00459 proc_clear_tty(current);
00460
00461
00462 sigfillset(&blocked);
00463 sigprocmask(SIG_BLOCK, &blocked, NULL);
00464 flush_signals(current);
00465
00466
00467
00468 exit_fs(current);
00469 fs = init_task.fs;
00470 current->fs = fs;
00471 atomic_inc(&fs->count);
00472
00473 exit_files(current);
00474 current->files = init_task.files;
00475 atomic_inc(¤t->files->count);
00476
00477 reparent_to_kthreadd();
00478 }
00479
00480 EXPORT_SYMBOL(daemonize);
00481
00482 static void close_files(struct files_struct * files)
00483 {
00484 int i, j;
00485 struct fdtable *fdt;
00486
00487 j = 0;
00488
00489
00490
00491
00492
00493
00494 fdt = files_fdtable(files);
00495 for (;;) {
00496 unsigned long set;
00497 i = j * __NFDBITS;
00498 if (i >= fdt->max_fds)
00499 break;
00500 set = fdt->open_fds->fds_bits[j++];
00501 while (set) {
00502 if (set & 1) {
00503 struct file * file = xchg(&fdt->fd[i], NULL);
00504 if (file) {
00505 filp_close(file, files);
00506 cond_resched();
00507 }
00508 }
00509 i++;
00510 set >>= 1;
00511 }
00512 }
00513 }
00514
00515 struct files_struct *get_files_struct(struct task_struct *task)
00516 {
00517 struct files_struct *files;
00518
00519 task_lock(task);
00520 files = task->files;
00521 if (files)
00522 atomic_inc(&files->count);
00523 task_unlock(task);
00524
00525 return files;
00526 }
00527
00528 void put_files_struct(struct files_struct *files)
00529 {
00530 struct fdtable *fdt;
00531
00532 if (atomic_dec_and_test(&files->count)) {
00533 close_files(files);
00534
00535
00536
00537
00538
00539
00540 fdt = files_fdtable(files);
00541 if (fdt != &files->fdtab)
00542 kmem_cache_free(files_cachep, files);
00543 free_fdtable(fdt);
00544 }
00545 }
00546
00547 void reset_files_struct(struct files_struct *files)
00548 {
00549 struct task_struct *tsk = current;
00550 struct files_struct *old;
00551
00552 old = tsk->files;
00553 task_lock(tsk);
00554 tsk->files = files;
00555 task_unlock(tsk);
00556 put_files_struct(old);
00557 }
00558
00559 void exit_files(struct task_struct *tsk)
00560 {
00561 struct files_struct * files = tsk->files;
00562
00563 if (files) {
00564 task_lock(tsk);
00565 tsk->files = NULL;
00566 task_unlock(tsk);
00567 put_files_struct(files);
00568 }
00569 }
00570
00571 void put_fs_struct(struct fs_struct *fs)
00572 {
00573
00574 if (atomic_dec_and_test(&fs->count)) {
00575 path_put(&fs->root);
00576 path_put(&fs->pwd);
00577 kmem_cache_free(fs_cachep, fs);
00578 }
00579 }
00580
00581 void exit_fs(struct task_struct *tsk)
00582 {
00583 struct fs_struct * fs = tsk->fs;
00584
00585 if (fs) {
00586 task_lock(tsk);
00587 tsk->fs = NULL;
00588 task_unlock(tsk);
00589 put_fs_struct(fs);
00590 }
00591 }
00592
00593 EXPORT_SYMBOL_GPL(exit_fs);
00594
00595 #ifdef CONFIG_MM_OWNER
00596
00597
00598
00599 static inline int
00600 mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
00601 {
00602
00603
00604
00605
00606 if (atomic_read(&mm->mm_users) <= 1)
00607 return 0;
00608 if (mm->owner != p)
00609 return 0;
00610 return 1;
00611 }
00612
00613 void mm_update_next_owner(struct mm_struct *mm)
00614 {
00615 struct task_struct *c, *g, *p = current;
00616
00617 retry:
00618 if (!mm_need_new_owner(mm, p))
00619 return;
00620
00621 read_lock(&tasklist_lock);
00622
00623
00624
00625 list_for_each_entry(c, &p->children, sibling) {
00626 if (c->mm == mm)
00627 goto assign_new_owner;
00628 }
00629
00630
00631
00632
00633 list_for_each_entry(c, &p->parent->children, sibling) {
00634 if (c->mm == mm)
00635 goto assign_new_owner;
00636 }
00637
00638
00639
00640
00641
00642 do_each_thread(g, c) {
00643 if (c->mm == mm)
00644 goto assign_new_owner;
00645 } while_each_thread(g, c);
00646
00647 read_unlock(&tasklist_lock);
00648
00649
00650
00651
00652
00653 mm->owner = NULL;
00654 return;
00655
00656 assign_new_owner:
00657 BUG_ON(c == p);
00658 get_task_struct(c);
00659
00660
00661
00662
00663 task_lock(c);
00664
00665
00666
00667
00668 read_unlock(&tasklist_lock);
00669 if (c->mm != mm) {
00670 task_unlock(c);
00671 put_task_struct(c);
00672 goto retry;
00673 }
00674 mm->owner = c;
00675 task_unlock(c);
00676 put_task_struct(c);
00677 }
00678 #endif
00679
00680
00681
00682
00683
00684 static void exit_mm(struct task_struct * tsk)
00685 {
00686 struct mm_struct *mm = tsk->mm;
00687 struct core_state *core_state;
00688
00689 mm_release(tsk, mm);
00690 if (!mm)
00691 return;
00692
00693
00694
00695
00696
00697
00698
00699 down_read(&mm->mmap_sem);
00700 core_state = mm->core_state;
00701 if (core_state) {
00702 struct core_thread self;
00703 up_read(&mm->mmap_sem);
00704
00705 self.task = tsk;
00706 self.next = xchg(&core_state->dumper.next, &self);
00707
00708
00709
00710
00711 if (atomic_dec_and_test(&core_state->nr_threads))
00712 complete(&core_state->startup);
00713
00714 for (;;) {
00715 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
00716 if (!self.task)
00717 break;
00718 schedule();
00719 }
00720 __set_task_state(tsk, TASK_RUNNING);
00721 down_read(&mm->mmap_sem);
00722 }
00723 atomic_inc(&mm->mm_count);
00724 BUG_ON(mm != tsk->active_mm);
00725
00726 task_lock(tsk);
00727 tsk->mm = NULL;
00728 up_read(&mm->mmap_sem);
00729 enter_lazy_tlb(mm, current);
00730
00731 clear_freeze_flag(tsk);
00732 task_unlock(tsk);
00733 mm_update_next_owner(mm);
00734 mmput(mm);
00735 }
00736
00737
00738
00739
00740
00741
00742 static int ignoring_children(struct task_struct *parent)
00743 {
00744 int ret;
00745 struct sighand_struct *psig = parent->sighand;
00746 unsigned long flags;
00747 spin_lock_irqsave(&psig->siglock, flags);
00748 ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
00749 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
00750 spin_unlock_irqrestore(&psig->siglock, flags);
00751 return ret;
00752 }
00753
00754
00755
00756
00757
00758
00759
00760 static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
00761 {
00762 struct task_struct *p, *n;
00763 int ign = -1;
00764
00765 list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
00766 __ptrace_unlink(p);
00767
00768 if (p->exit_state != EXIT_ZOMBIE)
00769 continue;
00770
00771
00772
00773
00774
00775
00776
00777
00778
00779
00780
00781
00782 if (!task_detached(p) && thread_group_empty(p)) {
00783 if (!same_thread_group(p->real_parent, parent))
00784 do_notify_parent(p, p->exit_signal);
00785 else {
00786 if (ign < 0)
00787 ign = ignoring_children(parent);
00788 if (ign)
00789 p->exit_signal = -1;
00790 }
00791 }
00792
00793 if (task_detached(p)) {
00794
00795
00796
00797 p->exit_state = EXIT_DEAD;
00798 list_add(&p->ptrace_entry, dead);
00799 }
00800 }
00801 }
00802
00803
00804
00805
00806
00807
00808 static void ptrace_exit_finish(struct task_struct *parent,
00809 struct list_head *dead)
00810 {
00811 struct task_struct *p, *n;
00812
00813 BUG_ON(!list_empty(&parent->ptraced));
00814
00815 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
00816 list_del_init(&p->ptrace_entry);
00817 release_task(p);
00818 }
00819 }
00820
00821 static void reparent_thread(struct task_struct *p, struct task_struct *father)
00822 {
00823 if (p->pdeath_signal)
00824
00825 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
00826
00827 list_move_tail(&p->sibling, &p->real_parent->children);
00828
00829
00830
00831
00832 if (same_thread_group(p->real_parent, father))
00833 return;
00834
00835
00836 if (!task_detached(p))
00837 p->exit_signal = SIGCHLD;
00838
00839
00840
00841
00842 if (!ptrace_reparented(p) &&
00843 p->exit_state == EXIT_ZOMBIE &&
00844 !task_detached(p) && thread_group_empty(p))
00845 do_notify_parent(p, p->exit_signal);
00846
00847 kill_orphaned_pgrp(p, father);
00848 }
00849
00850
00851
00852
00853
00854
00855
00856
00857 static struct task_struct *find_new_reaper(struct task_struct *father)
00858 {
00859 struct pid_namespace *pid_ns = task_active_pid_ns(father);
00860 struct task_struct *thread;
00861
00862 thread = father;
00863 while_each_thread(father, thread) {
00864 if (thread->flags & PF_EXITING)
00865 continue;
00866 if (unlikely(pid_ns->child_reaper == father))
00867 pid_ns->child_reaper = thread;
00868 return thread;
00869 }
00870
00871 if (unlikely(pid_ns->child_reaper == father)) {
00872 write_unlock_irq(&tasklist_lock);
00873 if (unlikely(pid_ns == &init_pid_ns))
00874 panic("Attempted to kill init!");
00875
00876 zap_pid_ns_processes(pid_ns);
00877 write_lock_irq(&tasklist_lock);
00878
00879
00880
00881
00882
00883 pid_ns->child_reaper = init_pid_ns.child_reaper;
00884 }
00885
00886 return pid_ns->child_reaper;
00887 }
00888
00889 static void forget_original_parent(struct task_struct *father)
00890 {
00891 struct task_struct *p, *n, *reaper;
00892 LIST_HEAD(ptrace_dead);
00893
00894 write_lock_irq(&tasklist_lock);
00895 reaper = find_new_reaper(father);
00896
00897
00898
00899 ptrace_exit(father, &ptrace_dead);
00900
00901 list_for_each_entry_safe(p, n, &father->children, sibling) {
00902 p->real_parent = reaper;
00903 if (p->parent == father) {
00904 BUG_ON(p->ptrace);
00905 p->parent = p->real_parent;
00906 }
00907 reparent_thread(p, father);
00908 }
00909
00910 write_unlock_irq(&tasklist_lock);
00911 BUG_ON(!list_empty(&father->children));
00912
00913 ptrace_exit_finish(father, &ptrace_dead);
00914 }
00915
00916
00917
00918
00919
00920 static void exit_notify(struct task_struct *tsk, int group_dead)
00921 {
00922 int signal;
00923 void *cookie;
00924
00925
00926
00927
00928
00929
00930
00931
00932
00933 forget_original_parent(tsk);
00934 exit_task_namespaces(tsk);
00935
00936 write_lock_irq(&tasklist_lock);
00937 if (group_dead)
00938 kill_orphaned_pgrp(tsk->group_leader, NULL);
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954 if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
00955 (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
00956 tsk->self_exec_id != tsk->parent_exec_id) &&
00957 !capable(CAP_KILL))
00958 tsk->exit_signal = SIGCHLD;
00959
00960 signal = tracehook_notify_death(tsk, &cookie, group_dead);
00961 if (signal >= 0)
00962 signal = do_notify_parent(tsk, signal);
00963
00964 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
00965
00966
00967 if (thread_group_leader(tsk) &&
00968 tsk->signal->group_exit_task &&
00969 tsk->signal->notify_count < 0)
00970 wake_up_process(tsk->signal->group_exit_task);
00971
00972 write_unlock_irq(&tasklist_lock);
00973
00974 tracehook_report_death(tsk, signal, cookie, group_dead);
00975
00976
00977 if (signal == DEATH_REAP)
00978 release_task(tsk);
00979 }
00980
00981 #ifdef CONFIG_DEBUG_STACK_USAGE
00982 static void check_stack_usage(void)
00983 {
00984 static DEFINE_SPINLOCK(low_water_lock);
00985 static int lowest_to_date = THREAD_SIZE;
00986 unsigned long *n = end_of_stack(current);
00987 unsigned long free;
00988
00989 while (*n == 0)
00990 n++;
00991 free = (unsigned long)n - (unsigned long)end_of_stack(current);
00992
00993 if (free >= lowest_to_date)
00994 return;
00995
00996 spin_lock(&low_water_lock);
00997 if (free < lowest_to_date) {
00998 printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
00999 "left\n",
01000 current->comm, free);
01001 lowest_to_date = free;
01002 }
01003 spin_unlock(&low_water_lock);
01004 }
01005 #else
01006 static inline void check_stack_usage(void) {}
01007 #endif
01008
01009 NORET_TYPE void do_exit(long code)
01010 {
01011 struct task_struct *tsk = current;
01012 int group_dead;
01013
01014 profile_task_exit(tsk);
01015
01016 WARN_ON(atomic_read(&tsk->fs_excl));
01017
01018 if (unlikely(in_interrupt()))
01019 panic("Aiee, killing interrupt handler!");
01020 if (unlikely(!tsk->pid))
01021 panic("Attempted to kill the idle task!");
01022
01023 tracehook_report_exit(&code);
01024
01025
01026
01027
01028
01029 if (unlikely(tsk->flags & PF_EXITING)) {
01030 printk(KERN_ALERT
01031 "Fixing recursive fault but reboot is needed!\n");
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041 tsk->flags |= PF_EXITPIDONE;
01042 set_current_state(TASK_UNINTERRUPTIBLE);
01043 schedule();
01044 }
01045
01046 exit_signals(tsk);
01047
01048
01049
01050
01051 smp_mb();
01052 spin_unlock_wait(&tsk->pi_lock);
01053
01054 if (unlikely(in_atomic()))
01055 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
01056 current->comm, task_pid_nr(current),
01057 preempt_count());
01058
01059 acct_update_integrals(tsk);
01060
01061 group_dead = atomic_dec_and_test(&tsk->signal->live);
01062 if (group_dead) {
01063 hrtimer_cancel(&tsk->signal->real_timer);
01064 exit_itimers(tsk->signal);
01065 }
01066 acct_collect(code, group_dead);
01067 if (group_dead)
01068 tty_audit_exit();
01069 if (unlikely(tsk->audit_context))
01070 audit_free(tsk);
01071
01072 tsk->exit_code = code;
01073 taskstats_exit(tsk, group_dead);
01074
01075 exit_mm(tsk);
01076
01077 if (group_dead)
01078 acct_process();
01079 trace_sched_process_exit(tsk);
01080
01081 exit_sem(tsk);
01082 exit_files(tsk);
01083 exit_fs(tsk);
01084 check_stack_usage();
01085 exit_thread();
01086 cgroup_exit(tsk, 1);
01087
01088 if (group_dead && tsk->signal->leader)
01089 disassociate_ctty(1);
01090
01091 module_put(task_thread_info(tsk)->exec_domain->module);
01092 if (tsk->binfmt)
01093 module_put(tsk->binfmt->module);
01094
01095 proc_exit_connector(tsk);
01096 exit_notify(tsk, group_dead);
01097 #ifdef CONFIG_NUMA
01098 mpol_put(tsk->mempolicy);
01099 tsk->mempolicy = NULL;
01100 #endif
01101 #ifdef CONFIG_FUTEX
01102
01103
01104
01105
01106 if (unlikely(!list_empty(&tsk->pi_state_list)))
01107 exit_pi_state_list(tsk);
01108 if (unlikely(current->pi_state_cache))
01109 kfree(current->pi_state_cache);
01110 #endif
01111
01112
01113
01114 debug_check_no_locks_held(tsk);
01115
01116
01117
01118
01119
01120 tsk->flags |= PF_EXITPIDONE;
01121
01122 if (tsk->io_context)
01123 exit_io_context();
01124
01125 if (tsk->splice_pipe)
01126 __free_pipe_info(tsk->splice_pipe);
01127
01128 preempt_disable();
01129
01130 tsk->state = TASK_DEAD;
01131 schedule();
01132 BUG();
01133
01134 for (;;)
01135 cpu_relax();
01136 }
01137
01138 EXPORT_SYMBOL_GPL(do_exit);
01139
01140 #endif
01141
01142 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
01143 {
01144 if (comp)
01145 complete(comp);
01146
01147 do_exit(code);
01148 }
01149
01150 EXPORT_SYMBOL(complete_and_exit);
01151
01152 #ifndef DDE_LINUX
01153 SYSCALL_DEFINE1(exit, int, error_code)
01154 {
01155 do_exit((error_code&0xff)<<8);
01156 }
01157
01158
01159
01160
01161
01162 NORET_TYPE void
01163 do_group_exit(int exit_code)
01164 {
01165 struct signal_struct *sig = current->signal;
01166
01167 BUG_ON(exit_code & 0x80);
01168
01169 if (signal_group_exit(sig))
01170 exit_code = sig->group_exit_code;
01171 else if (!thread_group_empty(current)) {
01172 struct sighand_struct *const sighand = current->sighand;
01173 spin_lock_irq(&sighand->siglock);
01174 if (signal_group_exit(sig))
01175
01176 exit_code = sig->group_exit_code;
01177 else {
01178 sig->group_exit_code = exit_code;
01179 sig->flags = SIGNAL_GROUP_EXIT;
01180 zap_other_threads(current);
01181 }
01182 spin_unlock_irq(&sighand->siglock);
01183 }
01184
01185 do_exit(exit_code);
01186
01187 }
01188
01189
01190
01191
01192
01193
01194 SYSCALL_DEFINE1(exit_group, int, error_code)
01195 {
01196 do_group_exit((error_code & 0xff) << 8);
01197
01198 return 0;
01199 }
01200
01201 static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
01202 {
01203 struct pid *pid = NULL;
01204 if (type == PIDTYPE_PID)
01205 pid = task->pids[type].pid;
01206 else if (type < PIDTYPE_MAX)
01207 pid = task->group_leader->pids[type].pid;
01208 return pid;
01209 }
01210
01211 static int eligible_child(enum pid_type type, struct pid *pid, int options,
01212 struct task_struct *p)
01213 {
01214 int err;
01215
01216 if (type < PIDTYPE_MAX) {
01217 if (task_pid_type(p, type) != pid)
01218 return 0;
01219 }
01220
01221
01222
01223
01224
01225
01226 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
01227 && !(options & __WALL))
01228 return 0;
01229
01230 err = security_task_wait(p);
01231 if (err)
01232 return err;
01233
01234 return 1;
01235 }
01236
01237 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
01238 int why, int status,
01239 struct siginfo __user *infop,
01240 struct rusage __user *rusagep)
01241 {
01242 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
01243
01244 put_task_struct(p);
01245 if (!retval)
01246 retval = put_user(SIGCHLD, &infop->si_signo);
01247 if (!retval)
01248 retval = put_user(0, &infop->si_errno);
01249 if (!retval)
01250 retval = put_user((short)why, &infop->si_code);
01251 if (!retval)
01252 retval = put_user(pid, &infop->si_pid);
01253 if (!retval)
01254 retval = put_user(uid, &infop->si_uid);
01255 if (!retval)
01256 retval = put_user(status, &infop->si_status);
01257 if (!retval)
01258 retval = pid;
01259 return retval;
01260 }
01261
01262
01263
01264
01265
01266
01267
01268 static int wait_task_zombie(struct task_struct *p, int options,
01269 struct siginfo __user *infop,
01270 int __user *stat_addr, struct rusage __user *ru)
01271 {
01272 unsigned long state;
01273 int retval, status, traced;
01274 pid_t pid = task_pid_vnr(p);
01275 uid_t uid = __task_cred(p)->uid;
01276
01277 if (!likely(options & WEXITED))
01278 return 0;
01279
01280 if (unlikely(options & WNOWAIT)) {
01281 int exit_code = p->exit_code;
01282 int why, status;
01283
01284 get_task_struct(p);
01285 read_unlock(&tasklist_lock);
01286 if ((exit_code & 0x7f) == 0) {
01287 why = CLD_EXITED;
01288 status = exit_code >> 8;
01289 } else {
01290 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
01291 status = exit_code & 0x7f;
01292 }
01293 return wait_noreap_copyout(p, pid, uid, why,
01294 status, infop, ru);
01295 }
01296
01297
01298
01299
01300
01301 state = xchg(&p->exit_state, EXIT_DEAD);
01302 if (state != EXIT_ZOMBIE) {
01303 BUG_ON(state != EXIT_DEAD);
01304 return 0;
01305 }
01306
01307 traced = ptrace_reparented(p);
01308
01309 if (likely(!traced)) {
01310 struct signal_struct *psig;
01311 struct signal_struct *sig;
01312 struct task_cputime cputime;
01313
01314
01315
01316
01317
01318
01319
01320
01321
01322
01323
01324
01325
01326
01327
01328
01329
01330
01331
01332
01333 thread_group_cputime(p, &cputime);
01334 spin_lock_irq(&p->parent->sighand->siglock);
01335 psig = p->parent->signal;
01336 sig = p->signal;
01337 psig->cutime =
01338 cputime_add(psig->cutime,
01339 cputime_add(cputime.utime,
01340 sig->cutime));
01341 psig->cstime =
01342 cputime_add(psig->cstime,
01343 cputime_add(cputime.stime,
01344 sig->cstime));
01345 psig->cgtime =
01346 cputime_add(psig->cgtime,
01347 cputime_add(p->gtime,
01348 cputime_add(sig->gtime,
01349 sig->cgtime)));
01350 psig->cmin_flt +=
01351 p->min_flt + sig->min_flt + sig->cmin_flt;
01352 psig->cmaj_flt +=
01353 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
01354 psig->cnvcsw +=
01355 p->nvcsw + sig->nvcsw + sig->cnvcsw;
01356 psig->cnivcsw +=
01357 p->nivcsw + sig->nivcsw + sig->cnivcsw;
01358 psig->cinblock +=
01359 task_io_get_inblock(p) +
01360 sig->inblock + sig->cinblock;
01361 psig->coublock +=
01362 task_io_get_oublock(p) +
01363 sig->oublock + sig->coublock;
01364 task_io_accounting_add(&psig->ioac, &p->ioac);
01365 task_io_accounting_add(&psig->ioac, &sig->ioac);
01366 spin_unlock_irq(&p->parent->sighand->siglock);
01367 }
01368
01369
01370
01371
01372
01373 read_unlock(&tasklist_lock);
01374
01375 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
01376 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
01377 ? p->signal->group_exit_code : p->exit_code;
01378 if (!retval && stat_addr)
01379 retval = put_user(status, stat_addr);
01380 if (!retval && infop)
01381 retval = put_user(SIGCHLD, &infop->si_signo);
01382 if (!retval && infop)
01383 retval = put_user(0, &infop->si_errno);
01384 if (!retval && infop) {
01385 int why;
01386
01387 if ((status & 0x7f) == 0) {
01388 why = CLD_EXITED;
01389 status >>= 8;
01390 } else {
01391 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
01392 status &= 0x7f;
01393 }
01394 retval = put_user((short)why, &infop->si_code);
01395 if (!retval)
01396 retval = put_user(status, &infop->si_status);
01397 }
01398 if (!retval && infop)
01399 retval = put_user(pid, &infop->si_pid);
01400 if (!retval && infop)
01401 retval = put_user(uid, &infop->si_uid);
01402 if (!retval)
01403 retval = pid;
01404
01405 if (traced) {
01406 write_lock_irq(&tasklist_lock);
01407
01408 ptrace_unlink(p);
01409
01410
01411
01412
01413
01414 if (!task_detached(p)) {
01415 do_notify_parent(p, p->exit_signal);
01416 if (!task_detached(p)) {
01417 p->exit_state = EXIT_ZOMBIE;
01418 p = NULL;
01419 }
01420 }
01421 write_unlock_irq(&tasklist_lock);
01422 }
01423 if (p != NULL)
01424 release_task(p);
01425
01426 return retval;
01427 }
01428
01429
01430
01431
01432
01433
01434
01435 static int wait_task_stopped(int ptrace, struct task_struct *p,
01436 int options, struct siginfo __user *infop,
01437 int __user *stat_addr, struct rusage __user *ru)
01438 {
01439 int retval, exit_code, why;
01440 uid_t uid = 0;
01441 pid_t pid;
01442
01443 if (!(options & WUNTRACED))
01444 return 0;
01445
01446 exit_code = 0;
01447 spin_lock_irq(&p->sighand->siglock);
01448
01449 if (unlikely(!task_is_stopped_or_traced(p)))
01450 goto unlock_sig;
01451
01452 if (!ptrace && p->signal->group_stop_count > 0)
01453
01454
01455
01456
01457 goto unlock_sig;
01458
01459 exit_code = p->exit_code;
01460 if (!exit_code)
01461 goto unlock_sig;
01462
01463 if (!unlikely(options & WNOWAIT))
01464 p->exit_code = 0;
01465
01466
01467 uid = __task_cred(p)->uid;
01468 unlock_sig:
01469 spin_unlock_irq(&p->sighand->siglock);
01470 if (!exit_code)
01471 return 0;
01472
01473
01474
01475
01476
01477
01478
01479
01480 get_task_struct(p);
01481 pid = task_pid_vnr(p);
01482 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
01483 read_unlock(&tasklist_lock);
01484
01485 if (unlikely(options & WNOWAIT))
01486 return wait_noreap_copyout(p, pid, uid,
01487 why, exit_code,
01488 infop, ru);
01489
01490 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
01491 if (!retval && stat_addr)
01492 retval = put_user((exit_code << 8) | 0x7f, stat_addr);
01493 if (!retval && infop)
01494 retval = put_user(SIGCHLD, &infop->si_signo);
01495 if (!retval && infop)
01496 retval = put_user(0, &infop->si_errno);
01497 if (!retval && infop)
01498 retval = put_user((short)why, &infop->si_code);
01499 if (!retval && infop)
01500 retval = put_user(exit_code, &infop->si_status);
01501 if (!retval && infop)
01502 retval = put_user(pid, &infop->si_pid);
01503 if (!retval && infop)
01504 retval = put_user(uid, &infop->si_uid);
01505 if (!retval)
01506 retval = pid;
01507 put_task_struct(p);
01508
01509 BUG_ON(!retval);
01510 return retval;
01511 }
01512
01513
01514
01515
01516
01517
01518
01519 static int wait_task_continued(struct task_struct *p, int options,
01520 struct siginfo __user *infop,
01521 int __user *stat_addr, struct rusage __user *ru)
01522 {
01523 int retval;
01524 pid_t pid;
01525 uid_t uid;
01526
01527 if (!unlikely(options & WCONTINUED))
01528 return 0;
01529
01530 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
01531 return 0;
01532
01533 spin_lock_irq(&p->sighand->siglock);
01534
01535 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
01536 spin_unlock_irq(&p->sighand->siglock);
01537 return 0;
01538 }
01539 if (!unlikely(options & WNOWAIT))
01540 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
01541 uid = __task_cred(p)->uid;
01542 spin_unlock_irq(&p->sighand->siglock);
01543
01544 pid = task_pid_vnr(p);
01545 get_task_struct(p);
01546 read_unlock(&tasklist_lock);
01547
01548 if (!infop) {
01549 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
01550 put_task_struct(p);
01551 if (!retval && stat_addr)
01552 retval = put_user(0xffff, stat_addr);
01553 if (!retval)
01554 retval = pid;
01555 } else {
01556 retval = wait_noreap_copyout(p, pid, uid,
01557 CLD_CONTINUED, SIGCONT,
01558 infop, ru);
01559 BUG_ON(retval == 0);
01560 }
01561
01562 return retval;
01563 }
01564
01565
01566
01567
01568
01569
01570
01571
01572
01573
01574 static int wait_consider_task(struct task_struct *parent, int ptrace,
01575 struct task_struct *p, int *notask_error,
01576 enum pid_type type, struct pid *pid, int options,
01577 struct siginfo __user *infop,
01578 int __user *stat_addr, struct rusage __user *ru)
01579 {
01580 int ret = eligible_child(type, pid, options, p);
01581 if (!ret)
01582 return ret;
01583
01584 if (unlikely(ret < 0)) {
01585
01586
01587
01588
01589
01590
01591
01592 if (*notask_error)
01593 *notask_error = ret;
01594 }
01595
01596 if (likely(!ptrace) && unlikely(p->ptrace)) {
01597
01598
01599
01600
01601 *notask_error = 0;
01602 return 0;
01603 }
01604
01605 if (p->exit_state == EXIT_DEAD)
01606 return 0;
01607
01608
01609
01610
01611 if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
01612 return wait_task_zombie(p, options, infop, stat_addr, ru);
01613
01614
01615
01616
01617
01618 *notask_error = 0;
01619
01620 if (task_is_stopped_or_traced(p))
01621 return wait_task_stopped(ptrace, p, options,
01622 infop, stat_addr, ru);
01623
01624 return wait_task_continued(p, options, infop, stat_addr, ru);
01625 }
01626
01627
01628
01629
01630
01631
01632
01633
01634
01635
01636 static int do_wait_thread(struct task_struct *tsk, int *notask_error,
01637 enum pid_type type, struct pid *pid, int options,
01638 struct siginfo __user *infop, int __user *stat_addr,
01639 struct rusage __user *ru)
01640 {
01641 struct task_struct *p;
01642
01643 list_for_each_entry(p, &tsk->children, sibling) {
01644
01645
01646
01647 if (!task_detached(p)) {
01648 int ret = wait_consider_task(tsk, 0, p, notask_error,
01649 type, pid, options,
01650 infop, stat_addr, ru);
01651 if (ret)
01652 return ret;
01653 }
01654 }
01655
01656 return 0;
01657 }
01658
01659 static int ptrace_do_wait(struct task_struct *tsk, int *notask_error,
01660 enum pid_type type, struct pid *pid, int options,
01661 struct siginfo __user *infop, int __user *stat_addr,
01662 struct rusage __user *ru)
01663 {
01664 struct task_struct *p;
01665
01666
01667
01668
01669 options |= WUNTRACED;
01670
01671 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
01672 int ret = wait_consider_task(tsk, 1, p, notask_error,
01673 type, pid, options,
01674 infop, stat_addr, ru);
01675 if (ret)
01676 return ret;
01677 }
01678
01679 return 0;
01680 }
01681
01682 static long do_wait(enum pid_type type, struct pid *pid, int options,
01683 struct siginfo __user *infop, int __user *stat_addr,
01684 struct rusage __user *ru)
01685 {
01686 DECLARE_WAITQUEUE(wait, current);
01687 struct task_struct *tsk;
01688 int retval;
01689
01690 trace_sched_process_wait(pid);
01691
01692 add_wait_queue(¤t->signal->wait_chldexit,&wait);
01693 repeat:
01694
01695
01696
01697
01698
01699 retval = -ECHILD;
01700 if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
01701 goto end;
01702
01703 current->state = TASK_INTERRUPTIBLE;
01704 read_lock(&tasklist_lock);
01705 tsk = current;
01706 do {
01707 int tsk_result = do_wait_thread(tsk, &retval,
01708 type, pid, options,
01709 infop, stat_addr, ru);
01710 if (!tsk_result)
01711 tsk_result = ptrace_do_wait(tsk, &retval,
01712 type, pid, options,
01713 infop, stat_addr, ru);
01714 if (tsk_result) {
01715
01716
01717
01718 retval = tsk_result;
01719 goto end;
01720 }
01721
01722 if (options & __WNOTHREAD)
01723 break;
01724 tsk = next_thread(tsk);
01725 BUG_ON(tsk->signal != current->signal);
01726 } while (tsk != current);
01727 read_unlock(&tasklist_lock);
01728
01729 if (!retval && !(options & WNOHANG)) {
01730 retval = -ERESTARTSYS;
01731 if (!signal_pending(current)) {
01732 schedule();
01733 goto repeat;
01734 }
01735 }
01736
01737 end:
01738 current->state = TASK_RUNNING;
01739 remove_wait_queue(¤t->signal->wait_chldexit,&wait);
01740 if (infop) {
01741 if (retval > 0)
01742 retval = 0;
01743 else {
01744
01745
01746
01747
01748
01749 if (!retval)
01750 retval = put_user(0, &infop->si_signo);
01751 if (!retval)
01752 retval = put_user(0, &infop->si_errno);
01753 if (!retval)
01754 retval = put_user(0, &infop->si_code);
01755 if (!retval)
01756 retval = put_user(0, &infop->si_pid);
01757 if (!retval)
01758 retval = put_user(0, &infop->si_uid);
01759 if (!retval)
01760 retval = put_user(0, &infop->si_status);
01761 }
01762 }
01763 return retval;
01764 }
01765
01766 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
01767 infop, int, options, struct rusage __user *, ru)
01768 {
01769 struct pid *pid = NULL;
01770 enum pid_type type;
01771 long ret;
01772
01773 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
01774 return -EINVAL;
01775 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
01776 return -EINVAL;
01777
01778 switch (which) {
01779 case P_ALL:
01780 type = PIDTYPE_MAX;
01781 break;
01782 case P_PID:
01783 type = PIDTYPE_PID;
01784 if (upid <= 0)
01785 return -EINVAL;
01786 break;
01787 case P_PGID:
01788 type = PIDTYPE_PGID;
01789 if (upid <= 0)
01790 return -EINVAL;
01791 break;
01792 default:
01793 return -EINVAL;
01794 }
01795
01796 if (type < PIDTYPE_MAX)
01797 pid = find_get_pid(upid);
01798 ret = do_wait(type, pid, options, infop, NULL, ru);
01799 put_pid(pid);
01800
01801
01802 asmlinkage_protect(5, ret, which, upid, infop, options, ru);
01803 return ret;
01804 }
01805
01806 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
01807 int, options, struct rusage __user *, ru)
01808 {
01809 struct pid *pid = NULL;
01810 enum pid_type type;
01811 long ret;
01812
01813 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
01814 __WNOTHREAD|__WCLONE|__WALL))
01815 return -EINVAL;
01816
01817 if (upid == -1)
01818 type = PIDTYPE_MAX;
01819 else if (upid < 0) {
01820 type = PIDTYPE_PGID;
01821 pid = find_get_pid(-upid);
01822 } else if (upid == 0) {
01823 type = PIDTYPE_PGID;
01824 pid = get_pid(task_pgrp(current));
01825 } else {
01826 type = PIDTYPE_PID;
01827 pid = find_get_pid(upid);
01828 }
01829
01830 ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
01831 put_pid(pid);
01832
01833
01834 asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
01835 return ret;
01836 }
01837
01838 #ifdef __ARCH_WANT_SYS_WAITPID
01839
01840
01841
01842
01843
01844 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
01845 {
01846 return sys_wait4(pid, stat_addr, options, NULL);
01847 }
01848
01849 #endif
01850 #endif