diff options
| author | Dmitry Torokhov <dtor_core@ameritech.net> | 2006-06-26 01:31:38 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dtor_core@ameritech.net> | 2006-06-26 01:31:38 -0400 |
| commit | 4854c7b27f0975a2b629f35ea3996d2968eb7c4f (patch) | |
| tree | 4102bdb70289764a2058aff0f907b13d7cf0e0d1 /kernel/sys.c | |
| parent | 3cbd5b32cb625f5c0f1b1476d154fac873dd49ce (diff) | |
| parent | fcc18e83e1f6fd9fa6b333735bf0fcd530655511 (diff) | |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel/sys.c')
| -rw-r--r-- | kernel/sys.c | 80 |
1 files changed, 40 insertions, 40 deletions
diff --git a/kernel/sys.c b/kernel/sys.c index 0b6ec0e7936f..2d5179c67cec 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/notifier.h> | 13 | #include <linux/notifier.h> |
| 14 | #include <linux/reboot.h> | 14 | #include <linux/reboot.h> |
| 15 | #include <linux/prctl.h> | 15 | #include <linux/prctl.h> |
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/highuid.h> | 16 | #include <linux/highuid.h> |
| 18 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| @@ -57,6 +56,12 @@ | |||
| 57 | #ifndef GET_FPEXC_CTL | 56 | #ifndef GET_FPEXC_CTL |
| 58 | # define GET_FPEXC_CTL(a,b) (-EINVAL) | 57 | # define GET_FPEXC_CTL(a,b) (-EINVAL) |
| 59 | #endif | 58 | #endif |
| 59 | #ifndef GET_ENDIAN | ||
| 60 | # define GET_ENDIAN(a,b) (-EINVAL) | ||
| 61 | #endif | ||
| 62 | #ifndef SET_ENDIAN | ||
| 63 | # define SET_ENDIAN(a,b) (-EINVAL) | ||
| 64 | #endif | ||
| 60 | 65 | ||
| 61 | /* | 66 | /* |
| 62 | * this is where the system-wide overflow UID and GID are defined, for | 67 | * this is where the system-wide overflow UID and GID are defined, for |
| @@ -132,14 +137,15 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl, | |||
| 132 | unsigned long val, void *v) | 137 | unsigned long val, void *v) |
| 133 | { | 138 | { |
| 134 | int ret = NOTIFY_DONE; | 139 | int ret = NOTIFY_DONE; |
| 135 | struct notifier_block *nb; | 140 | struct notifier_block *nb, *next_nb; |
| 136 | 141 | ||
| 137 | nb = rcu_dereference(*nl); | 142 | nb = rcu_dereference(*nl); |
| 138 | while (nb) { | 143 | while (nb) { |
| 144 | next_nb = rcu_dereference(nb->next); | ||
| 139 | ret = nb->notifier_call(nb, val, v); | 145 | ret = nb->notifier_call(nb, val, v); |
| 140 | if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) | 146 | if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) |
| 141 | break; | 147 | break; |
| 142 | nb = rcu_dereference(nb->next); | 148 | nb = next_nb; |
| 143 | } | 149 | } |
| 144 | return ret; | 150 | return ret; |
| 145 | } | 151 | } |
| @@ -583,7 +589,7 @@ void emergency_restart(void) | |||
| 583 | } | 589 | } |
| 584 | EXPORT_SYMBOL_GPL(emergency_restart); | 590 | EXPORT_SYMBOL_GPL(emergency_restart); |
| 585 | 591 | ||
| 586 | void kernel_restart_prepare(char *cmd) | 592 | static void kernel_restart_prepare(char *cmd) |
| 587 | { | 593 | { |
| 588 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 594 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
| 589 | system_state = SYSTEM_RESTART; | 595 | system_state = SYSTEM_RESTART; |
| @@ -617,7 +623,7 @@ EXPORT_SYMBOL_GPL(kernel_restart); | |||
| 617 | * Move into place and start executing a preloaded standalone | 623 | * Move into place and start executing a preloaded standalone |
| 618 | * executable. If nothing was preloaded return an error. | 624 | * executable. If nothing was preloaded return an error. |
| 619 | */ | 625 | */ |
| 620 | void kernel_kexec(void) | 626 | static void kernel_kexec(void) |
| 621 | { | 627 | { |
| 622 | #ifdef CONFIG_KEXEC | 628 | #ifdef CONFIG_KEXEC |
| 623 | struct kimage *image; | 629 | struct kimage *image; |
| @@ -631,7 +637,6 @@ void kernel_kexec(void) | |||
| 631 | machine_kexec(image); | 637 | machine_kexec(image); |
| 632 | #endif | 638 | #endif |
| 633 | } | 639 | } |
| 634 | EXPORT_SYMBOL_GPL(kernel_kexec); | ||
| 635 | 640 | ||
| 636 | void kernel_shutdown_prepare(enum system_states state) | 641 | void kernel_shutdown_prepare(enum system_states state) |
| 637 | { | 642 | { |
| @@ -1860,23 +1865,20 @@ out: | |||
| 1860 | * fields when reaping, so a sample either gets all the additions of a | 1865 | * fields when reaping, so a sample either gets all the additions of a |
| 1861 | * given child after it's reaped, or none so this sample is before reaping. | 1866 | * given child after it's reaped, or none so this sample is before reaping. |
| 1862 | * | 1867 | * |
| 1863 | * tasklist_lock locking optimisation: | 1868 | * Locking: |
| 1864 | * If we are current and single threaded, we do not need to take the tasklist | 1869 | * We need to take the siglock for CHILDEREN, SELF and BOTH |
| 1865 | * lock or the siglock. No one else can take our signal_struct away, | 1870 | * for the cases current multithreaded, non-current single threaded |
| 1866 | * no one else can reap the children to update signal->c* counters, and | 1871 | * non-current multithreaded. Thread traversal is now safe with |
| 1867 | * no one else can race with the signal-> fields. | 1872 | * the siglock held. |
| 1868 | * If we do not take the tasklist_lock, the signal-> fields could be read | 1873 | * Strictly speaking, we donot need to take the siglock if we are current and |
| 1869 | * out of order while another thread was just exiting. So we place a | 1874 | * single threaded, as no one else can take our signal_struct away, no one |
| 1870 | * read memory barrier when we avoid the lock. On the writer side, | 1875 | * else can reap the children to update signal->c* counters, and no one else |
| 1871 | * write memory barrier is implied in __exit_signal as __exit_signal releases | 1876 | * can race with the signal-> fields. If we do not take any lock, the |
| 1872 | * the siglock spinlock after updating the signal-> fields. | 1877 | * signal-> fields could be read out of order while another thread was just |
| 1873 | * | 1878 | * exiting. So we should place a read memory barrier when we avoid the lock. |
| 1874 | * We don't really need the siglock when we access the non c* fields | 1879 | * On the writer side, write memory barrier is implied in __exit_signal |
| 1875 | * of the signal_struct (for RUSAGE_SELF) even in multithreaded | 1880 | * as __exit_signal releases the siglock spinlock after updating the signal-> |
| 1876 | * case, since we take the tasklist lock for read and the non c* signal-> | 1881 | * fields. But we don't do this yet to keep things simple. |
| 1877 | * fields are updated only in __exit_signal, which is called with | ||
| 1878 | * tasklist_lock taken for write, hence these two threads cannot execute | ||
| 1879 | * concurrently. | ||
| 1880 | * | 1882 | * |
| 1881 | */ | 1883 | */ |
| 1882 | 1884 | ||
| @@ -1885,35 +1887,25 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
| 1885 | struct task_struct *t; | 1887 | struct task_struct *t; |
| 1886 | unsigned long flags; | 1888 | unsigned long flags; |
| 1887 | cputime_t utime, stime; | 1889 | cputime_t utime, stime; |
| 1888 | int need_lock = 0; | ||
| 1889 | 1890 | ||
| 1890 | memset((char *) r, 0, sizeof *r); | 1891 | memset((char *) r, 0, sizeof *r); |
| 1891 | utime = stime = cputime_zero; | 1892 | utime = stime = cputime_zero; |
| 1892 | 1893 | ||
| 1893 | if (p != current || !thread_group_empty(p)) | 1894 | rcu_read_lock(); |
| 1894 | need_lock = 1; | 1895 | if (!lock_task_sighand(p, &flags)) { |
| 1895 | 1896 | rcu_read_unlock(); | |
| 1896 | if (need_lock) { | 1897 | return; |
| 1897 | read_lock(&tasklist_lock); | 1898 | } |
| 1898 | if (unlikely(!p->signal)) { | ||
| 1899 | read_unlock(&tasklist_lock); | ||
| 1900 | return; | ||
| 1901 | } | ||
| 1902 | } else | ||
| 1903 | /* See locking comments above */ | ||
| 1904 | smp_rmb(); | ||
| 1905 | 1899 | ||
| 1906 | switch (who) { | 1900 | switch (who) { |
| 1907 | case RUSAGE_BOTH: | 1901 | case RUSAGE_BOTH: |
| 1908 | case RUSAGE_CHILDREN: | 1902 | case RUSAGE_CHILDREN: |
| 1909 | spin_lock_irqsave(&p->sighand->siglock, flags); | ||
| 1910 | utime = p->signal->cutime; | 1903 | utime = p->signal->cutime; |
| 1911 | stime = p->signal->cstime; | 1904 | stime = p->signal->cstime; |
| 1912 | r->ru_nvcsw = p->signal->cnvcsw; | 1905 | r->ru_nvcsw = p->signal->cnvcsw; |
| 1913 | r->ru_nivcsw = p->signal->cnivcsw; | 1906 | r->ru_nivcsw = p->signal->cnivcsw; |
| 1914 | r->ru_minflt = p->signal->cmin_flt; | 1907 | r->ru_minflt = p->signal->cmin_flt; |
| 1915 | r->ru_majflt = p->signal->cmaj_flt; | 1908 | r->ru_majflt = p->signal->cmaj_flt; |
| 1916 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | ||
| 1917 | 1909 | ||
| 1918 | if (who == RUSAGE_CHILDREN) | 1910 | if (who == RUSAGE_CHILDREN) |
| 1919 | break; | 1911 | break; |
| @@ -1941,8 +1933,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
| 1941 | BUG(); | 1933 | BUG(); |
| 1942 | } | 1934 | } |
| 1943 | 1935 | ||
| 1944 | if (need_lock) | 1936 | unlock_task_sighand(p, &flags); |
| 1945 | read_unlock(&tasklist_lock); | 1937 | rcu_read_unlock(); |
| 1938 | |||
| 1946 | cputime_to_timeval(utime, &r->ru_utime); | 1939 | cputime_to_timeval(utime, &r->ru_utime); |
| 1947 | cputime_to_timeval(stime, &r->ru_stime); | 1940 | cputime_to_timeval(stime, &r->ru_stime); |
| 1948 | } | 1941 | } |
| @@ -2057,6 +2050,13 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
| 2057 | return -EFAULT; | 2050 | return -EFAULT; |
| 2058 | return 0; | 2051 | return 0; |
| 2059 | } | 2052 | } |
| 2053 | case PR_GET_ENDIAN: | ||
| 2054 | error = GET_ENDIAN(current, arg2); | ||
| 2055 | break; | ||
| 2056 | case PR_SET_ENDIAN: | ||
| 2057 | error = SET_ENDIAN(current, arg2); | ||
| 2058 | break; | ||
| 2059 | |||
| 2060 | default: | 2060 | default: |
| 2061 | error = -EINVAL; | 2061 | error = -EINVAL; |
| 2062 | break; | 2062 | break; |
