diff options
| author | Jeff Garzik <jgarzik@pobox.com> | 2005-07-30 18:14:15 -0400 |
|---|---|---|
| committer | Jeff Garzik <jgarzik@pobox.com> | 2005-07-30 18:14:15 -0400 |
| commit | a670fcb43f01a67ef56176afc76e5d43d128b25c (patch) | |
| tree | 09c9411c78a33ff980e9ea871bc7686e7589abbf /kernel | |
| parent | 327309e899662b482c58cf25f574513d38b5788c (diff) | |
| parent | b0825488a642cadcf39709961dde61440cb0731c (diff) | |
/spare/repo/netdev-2.6 branch 'master'
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/capability.c | 20 | ||||
| -rw-r--r-- | kernel/cpuset.c | 26 | ||||
| -rw-r--r-- | kernel/crash_dump.c | 11 | ||||
| -rw-r--r-- | kernel/itimer.c | 37 | ||||
| -rw-r--r-- | kernel/panic.c | 9 | ||||
| -rw-r--r-- | kernel/posix-timers.c | 17 | ||||
| -rw-r--r-- | kernel/power/disk.c | 9 | ||||
| -rw-r--r-- | kernel/power/poweroff.c | 4 | ||||
| -rw-r--r-- | kernel/power/smp.c | 2 | ||||
| -rw-r--r-- | kernel/sched.c | 8 | ||||
| -rw-r--r-- | kernel/softirq.c | 4 | ||||
| -rw-r--r-- | kernel/sys.c | 111 | ||||
| -rw-r--r-- | kernel/sysctl.c | 63 | ||||
| -rw-r--r-- | kernel/time.c | 2 |
14 files changed, 174 insertions, 149 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 64db1ee820c2..8986a37a67ea 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
| @@ -31,8 +31,14 @@ static DEFINE_SPINLOCK(task_capability_lock); | |||
| 31 | * uninteresting and/or not to be changed. | 31 | * uninteresting and/or not to be changed. |
| 32 | */ | 32 | */ |
| 33 | 33 | ||
| 34 | /* | 34 | /** |
| 35 | * sys_capget - get the capabilities of a given process. | 35 | * sys_capget - get the capabilities of a given process. |
| 36 | * @header: pointer to struct that contains capability version and | ||
| 37 | * target pid data | ||
| 38 | * @dataptr: pointer to struct that contains the effective, permitted, | ||
| 39 | * and inheritable capabilities that are returned | ||
| 40 | * | ||
| 41 | * Returns 0 on success and < 0 on error. | ||
| 36 | */ | 42 | */ |
| 37 | asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | 43 | asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) |
| 38 | { | 44 | { |
| @@ -141,8 +147,14 @@ static inline int cap_set_all(kernel_cap_t *effective, | |||
| 141 | return ret; | 147 | return ret; |
| 142 | } | 148 | } |
| 143 | 149 | ||
| 144 | /* | 150 | /** |
| 145 | * sys_capset - set capabilities for a given process, all processes, or all | 151 | * sys_capset - set capabilities for a process or a group of processes |
| 152 | * @header: pointer to struct that contains capability version and | ||
| 153 | * target pid data | ||
| 154 | * @data: pointer to struct that contains the effective, permitted, | ||
| 155 | * and inheritable capabilities | ||
| 156 | * | ||
| 157 | * Set capabilities for a given process, all processes, or all | ||
| 146 | * processes in a given process group. | 158 | * processes in a given process group. |
| 147 | * | 159 | * |
| 148 | * The restrictions on setting capabilities are specified as: | 160 | * The restrictions on setting capabilities are specified as: |
| @@ -152,6 +164,8 @@ static inline int cap_set_all(kernel_cap_t *effective, | |||
| 152 | * I: any raised capabilities must be a subset of the (old current) permitted | 164 | * I: any raised capabilities must be a subset of the (old current) permitted |
| 153 | * P: any raised capabilities must be a subset of the (old current) permitted | 165 | * P: any raised capabilities must be a subset of the (old current) permitted |
| 154 | * E: must be set to a subset of (new target) permitted | 166 | * E: must be set to a subset of (new target) permitted |
| 167 | * | ||
| 168 | * Returns 0 on success and < 0 on error. | ||
| 155 | */ | 169 | */ |
| 156 | asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | 170 | asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) |
| 157 | { | 171 | { |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 984c0bf3807f..805fb9097318 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1440,10 +1440,10 @@ void __init cpuset_init_smp(void) | |||
| 1440 | 1440 | ||
| 1441 | /** | 1441 | /** |
| 1442 | * cpuset_fork - attach newly forked task to its parents cpuset. | 1442 | * cpuset_fork - attach newly forked task to its parents cpuset. |
| 1443 | * @p: pointer to task_struct of forking parent process. | 1443 | * @tsk: pointer to task_struct of forking parent process. |
| 1444 | * | 1444 | * |
| 1445 | * Description: By default, on fork, a task inherits its | 1445 | * Description: By default, on fork, a task inherits its |
| 1446 | * parents cpuset. The pointer to the shared cpuset is | 1446 | * parent's cpuset. The pointer to the shared cpuset is |
| 1447 | * automatically copied in fork.c by dup_task_struct(). | 1447 | * automatically copied in fork.c by dup_task_struct(). |
| 1448 | * This cpuset_fork() routine need only increment the usage | 1448 | * This cpuset_fork() routine need only increment the usage |
| 1449 | * counter in that cpuset. | 1449 | * counter in that cpuset. |
| @@ -1471,7 +1471,6 @@ void cpuset_fork(struct task_struct *tsk) | |||
| 1471 | * by the cpuset_sem semaphore. If you don't hold cpuset_sem, | 1471 | * by the cpuset_sem semaphore. If you don't hold cpuset_sem, |
| 1472 | * then a zero cpuset use count is a license to any other task to | 1472 | * then a zero cpuset use count is a license to any other task to |
| 1473 | * nuke the cpuset immediately. | 1473 | * nuke the cpuset immediately. |
| 1474 | * | ||
| 1475 | **/ | 1474 | **/ |
| 1476 | 1475 | ||
| 1477 | void cpuset_exit(struct task_struct *tsk) | 1476 | void cpuset_exit(struct task_struct *tsk) |
| @@ -1521,7 +1520,9 @@ void cpuset_init_current_mems_allowed(void) | |||
| 1521 | current->mems_allowed = NODE_MASK_ALL; | 1520 | current->mems_allowed = NODE_MASK_ALL; |
| 1522 | } | 1521 | } |
| 1523 | 1522 | ||
| 1524 | /* | 1523 | /** |
| 1524 | * cpuset_update_current_mems_allowed - update mems parameters to new values | ||
| 1525 | * | ||
| 1525 | * If the current tasks cpusets mems_allowed changed behind our backs, | 1526 | * If the current tasks cpusets mems_allowed changed behind our backs, |
| 1526 | * update current->mems_allowed and mems_generation to the new value. | 1527 | * update current->mems_allowed and mems_generation to the new value. |
| 1527 | * Do not call this routine if in_interrupt(). | 1528 | * Do not call this routine if in_interrupt(). |
| @@ -1540,13 +1541,20 @@ void cpuset_update_current_mems_allowed(void) | |||
| 1540 | } | 1541 | } |
| 1541 | } | 1542 | } |
| 1542 | 1543 | ||
| 1544 | /** | ||
| 1545 | * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed | ||
| 1546 | * @nodes: pointer to a node bitmap that is and-ed with mems_allowed | ||
| 1547 | */ | ||
| 1543 | void cpuset_restrict_to_mems_allowed(unsigned long *nodes) | 1548 | void cpuset_restrict_to_mems_allowed(unsigned long *nodes) |
| 1544 | { | 1549 | { |
| 1545 | bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed), | 1550 | bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed), |
| 1546 | MAX_NUMNODES); | 1551 | MAX_NUMNODES); |
| 1547 | } | 1552 | } |
| 1548 | 1553 | ||
| 1549 | /* | 1554 | /** |
| 1555 | * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed | ||
| 1556 | * @zl: the zonelist to be checked | ||
| 1557 | * | ||
| 1550 | * Are any of the nodes on zonelist zl allowed in current->mems_allowed? | 1558 | * Are any of the nodes on zonelist zl allowed in current->mems_allowed? |
| 1551 | */ | 1559 | */ |
| 1552 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | 1560 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) |
| @@ -1562,8 +1570,12 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | |||
| 1562 | return 0; | 1570 | return 0; |
| 1563 | } | 1571 | } |
| 1564 | 1572 | ||
| 1565 | /* | 1573 | /** |
| 1566 | * Is 'current' valid, and is zone z allowed in current->mems_allowed? | 1574 | * cpuset_zone_allowed - is zone z allowed in current->mems_allowed |
| 1575 | * @z: zone in question | ||
| 1576 | * | ||
| 1577 | * Is zone z allowed in current->mems_allowed, or is | ||
| 1578 | * the CPU in interrupt context? (zone is always allowed in this case) | ||
| 1567 | */ | 1579 | */ |
| 1568 | int cpuset_zone_allowed(struct zone *z) | 1580 | int cpuset_zone_allowed(struct zone *z) |
| 1569 | { | 1581 | { |
diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c index 459ba49e376a..334c37f5218a 100644 --- a/kernel/crash_dump.c +++ b/kernel/crash_dump.c | |||
| @@ -18,7 +18,16 @@ | |||
| 18 | /* Stores the physical address of elf header of crash image. */ | 18 | /* Stores the physical address of elf header of crash image. */ |
| 19 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | 19 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; |
| 20 | 20 | ||
| 21 | /* | 21 | /** |
| 22 | * copy_oldmem_page - copy one page from "oldmem" | ||
| 23 | * @pfn: page frame number to be copied | ||
| 24 | * @buf: target memory address for the copy; this can be in kernel address | ||
| 25 | * space or user address space (see @userbuf) | ||
| 26 | * @csize: number of bytes to copy | ||
| 27 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | ||
| 28 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | ||
| 29 | * otherwise @buf is in kernel address space, use memcpy(). | ||
| 30 | * | ||
| 22 | * Copy a page from "oldmem". For this page, there is no pte mapped | 31 | * Copy a page from "oldmem". For this page, there is no pte mapped |
| 23 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 32 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
| 24 | */ | 33 | */ |
diff --git a/kernel/itimer.c b/kernel/itimer.c index a72cb0e5aa4b..7c1b25e25e47 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
| @@ -112,28 +112,11 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value) | |||
| 112 | return error; | 112 | return error; |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | /* | ||
| 116 | * Called with P->sighand->siglock held and P->signal->real_timer inactive. | ||
| 117 | * If interval is nonzero, arm the timer for interval ticks from now. | ||
| 118 | */ | ||
| 119 | static inline void it_real_arm(struct task_struct *p, unsigned long interval) | ||
| 120 | { | ||
| 121 | p->signal->it_real_value = interval; /* XXX unnecessary field?? */ | ||
| 122 | if (interval == 0) | ||
| 123 | return; | ||
| 124 | if (interval > (unsigned long) LONG_MAX) | ||
| 125 | interval = LONG_MAX; | ||
| 126 | /* the "+ 1" below makes sure that the timer doesn't go off before | ||
| 127 | * the interval requested. This could happen if | ||
| 128 | * time requested % (usecs per jiffy) is more than the usecs left | ||
| 129 | * in the current jiffy */ | ||
| 130 | p->signal->real_timer.expires = jiffies + interval + 1; | ||
| 131 | add_timer(&p->signal->real_timer); | ||
| 132 | } | ||
| 133 | 115 | ||
| 134 | void it_real_fn(unsigned long __data) | 116 | void it_real_fn(unsigned long __data) |
| 135 | { | 117 | { |
| 136 | struct task_struct * p = (struct task_struct *) __data; | 118 | struct task_struct * p = (struct task_struct *) __data; |
| 119 | unsigned long inc = p->signal->it_real_incr; | ||
| 137 | 120 | ||
| 138 | send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p); | 121 | send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p); |
| 139 | 122 | ||
| @@ -141,14 +124,23 @@ void it_real_fn(unsigned long __data) | |||
| 141 | * Now restart the timer if necessary. We don't need any locking | 124 | * Now restart the timer if necessary. We don't need any locking |
| 142 | * here because do_setitimer makes sure we have finished running | 125 | * here because do_setitimer makes sure we have finished running |
| 143 | * before it touches anything. | 126 | * before it touches anything. |
| 127 | * Note, we KNOW we are (or should be) at a jiffie edge here so | ||
| 128 | * we don't need the +1 stuff. Also, we want to use the prior | ||
| 129 | * expire value so as to not "slip" a jiffie if we are late. | ||
| 130 | * Deal with requesting a time prior to "now" here rather than | ||
| 131 | * in add_timer. | ||
| 144 | */ | 132 | */ |
| 145 | it_real_arm(p, p->signal->it_real_incr); | 133 | if (!inc) |
| 134 | return; | ||
| 135 | while (time_before_eq(p->signal->real_timer.expires, jiffies)) | ||
| 136 | p->signal->real_timer.expires += inc; | ||
| 137 | add_timer(&p->signal->real_timer); | ||
| 146 | } | 138 | } |
| 147 | 139 | ||
| 148 | int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) | 140 | int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) |
| 149 | { | 141 | { |
| 150 | struct task_struct *tsk = current; | 142 | struct task_struct *tsk = current; |
| 151 | unsigned long val, interval; | 143 | unsigned long val, interval, expires; |
| 152 | cputime_t cval, cinterval, nval, ninterval; | 144 | cputime_t cval, cinterval, nval, ninterval; |
| 153 | 145 | ||
| 154 | switch (which) { | 146 | switch (which) { |
| @@ -164,7 +156,10 @@ again: | |||
| 164 | } | 156 | } |
| 165 | tsk->signal->it_real_incr = | 157 | tsk->signal->it_real_incr = |
| 166 | timeval_to_jiffies(&value->it_interval); | 158 | timeval_to_jiffies(&value->it_interval); |
| 167 | it_real_arm(tsk, timeval_to_jiffies(&value->it_value)); | 159 | expires = timeval_to_jiffies(&value->it_value); |
| 160 | if (expires) | ||
| 161 | mod_timer(&tsk->signal->real_timer, | ||
| 162 | jiffies + 1 + expires); | ||
| 168 | spin_unlock_irq(&tsk->sighand->siglock); | 163 | spin_unlock_irq(&tsk->sighand->siglock); |
| 169 | if (ovalue) { | 164 | if (ovalue) { |
| 170 | jiffies_to_timeval(val, &ovalue->it_value); | 165 | jiffies_to_timeval(val, &ovalue->it_value); |
diff --git a/kernel/panic.c b/kernel/panic.c index 74ba5f3e46c7..aabc5f86fa3f 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -111,12 +111,11 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
| 111 | mdelay(1); | 111 | mdelay(1); |
| 112 | i++; | 112 | i++; |
| 113 | } | 113 | } |
| 114 | /* | 114 | /* This will not be a clean reboot, with everything |
| 115 | * Should we run the reboot notifier. For the moment Im | 115 | * shutting down. But if there is a chance of |
| 116 | * choosing not too. It might crash, be corrupt or do | 116 | * rebooting the system it will be rebooted. |
| 117 | * more harm than good for other reasons. | ||
| 118 | */ | 117 | */ |
| 119 | machine_restart(NULL); | 118 | emergency_restart(); |
| 120 | } | 119 | } |
| 121 | #ifdef __sparc__ | 120 | #ifdef __sparc__ |
| 122 | { | 121 | { |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5b7b4736d82b..10b2ad749d14 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
| @@ -896,21 +896,10 @@ static int adjust_abs_time(struct k_clock *clock, struct timespec *tp, | |||
| 896 | jiffies_64_f = get_jiffies_64(); | 896 | jiffies_64_f = get_jiffies_64(); |
| 897 | } | 897 | } |
| 898 | /* | 898 | /* |
| 899 | * Take away now to get delta | 899 | * Take away now to get delta and normalize |
| 900 | */ | 900 | */ |
| 901 | oc.tv_sec -= now.tv_sec; | 901 | set_normalized_timespec(&oc, oc.tv_sec - now.tv_sec, |
| 902 | oc.tv_nsec -= now.tv_nsec; | 902 | oc.tv_nsec - now.tv_nsec); |
| 903 | /* | ||
| 904 | * Normalize... | ||
| 905 | */ | ||
| 906 | while ((oc.tv_nsec - NSEC_PER_SEC) >= 0) { | ||
| 907 | oc.tv_nsec -= NSEC_PER_SEC; | ||
| 908 | oc.tv_sec++; | ||
| 909 | } | ||
| 910 | while ((oc.tv_nsec) < 0) { | ||
| 911 | oc.tv_nsec += NSEC_PER_SEC; | ||
| 912 | oc.tv_sec--; | ||
| 913 | } | ||
| 914 | }else{ | 903 | }else{ |
| 915 | jiffies_64_f = get_jiffies_64(); | 904 | jiffies_64_f = get_jiffies_64(); |
| 916 | } | 905 | } |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 3ec789c6b537..664eb0469b6e 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
| @@ -59,16 +59,13 @@ static void power_down(suspend_disk_method_t mode) | |||
| 59 | error = pm_ops->enter(PM_SUSPEND_DISK); | 59 | error = pm_ops->enter(PM_SUSPEND_DISK); |
| 60 | break; | 60 | break; |
| 61 | case PM_DISK_SHUTDOWN: | 61 | case PM_DISK_SHUTDOWN: |
| 62 | printk("Powering off system\n"); | 62 | kernel_power_off(); |
| 63 | device_shutdown(); | ||
| 64 | machine_power_off(); | ||
| 65 | break; | 63 | break; |
| 66 | case PM_DISK_REBOOT: | 64 | case PM_DISK_REBOOT: |
| 67 | device_shutdown(); | 65 | kernel_restart(NULL); |
| 68 | machine_restart(NULL); | ||
| 69 | break; | 66 | break; |
| 70 | } | 67 | } |
| 71 | machine_halt(); | 68 | kernel_halt(); |
| 72 | /* Valid image is on the disk, if we continue we risk serious data corruption | 69 | /* Valid image is on the disk, if we continue we risk serious data corruption |
| 73 | after resume. */ | 70 | after resume. */ |
| 74 | printk(KERN_CRIT "Please power me down manually\n"); | 71 | printk(KERN_CRIT "Please power me down manually\n"); |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index 715081b2d829..7a4144ba3afd 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
| 10 | #include <linux/pm.h> | 10 | #include <linux/pm.h> |
| 11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/reboot.h> | ||
| 12 | 13 | ||
| 13 | /* | 14 | /* |
| 14 | * When the user hits Sys-Rq o to power down the machine this is the | 15 | * When the user hits Sys-Rq o to power down the machine this is the |
| @@ -17,8 +18,7 @@ | |||
| 17 | 18 | ||
| 18 | static void do_poweroff(void *dummy) | 19 | static void do_poweroff(void *dummy) |
| 19 | { | 20 | { |
| 20 | if (pm_power_off) | 21 | kernel_power_off(); |
| 21 | pm_power_off(); | ||
| 22 | } | 22 | } |
| 23 | 23 | ||
| 24 | static DECLARE_WORK(poweroff_work, do_poweroff, NULL); | 24 | static DECLARE_WORK(poweroff_work, do_poweroff, NULL); |
diff --git a/kernel/power/smp.c b/kernel/power/smp.c index bbe23079c62c..911fc62b8225 100644 --- a/kernel/power/smp.c +++ b/kernel/power/smp.c | |||
| @@ -38,7 +38,7 @@ void disable_nonboot_cpus(void) | |||
| 38 | } | 38 | } |
| 39 | printk("Error taking cpu %d down: %d\n", cpu, error); | 39 | printk("Error taking cpu %d down: %d\n", cpu, error); |
| 40 | } | 40 | } |
| 41 | BUG_ON(smp_processor_id() != 0); | 41 | BUG_ON(raw_smp_processor_id() != 0); |
| 42 | if (error) | 42 | if (error) |
| 43 | panic("cpus not sleeping"); | 43 | panic("cpus not sleeping"); |
| 44 | } | 44 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 4107db0dc091..a646e4f36c41 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3486,7 +3486,7 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
| 3486 | p->policy = policy; | 3486 | p->policy = policy; |
| 3487 | p->rt_priority = prio; | 3487 | p->rt_priority = prio; |
| 3488 | if (policy != SCHED_NORMAL) | 3488 | if (policy != SCHED_NORMAL) |
| 3489 | p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority; | 3489 | p->prio = MAX_RT_PRIO-1 - p->rt_priority; |
| 3490 | else | 3490 | else |
| 3491 | p->prio = p->static_prio; | 3491 | p->prio = p->static_prio; |
| 3492 | } | 3492 | } |
| @@ -3518,7 +3518,8 @@ recheck: | |||
| 3518 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0. | 3518 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0. |
| 3519 | */ | 3519 | */ |
| 3520 | if (param->sched_priority < 0 || | 3520 | if (param->sched_priority < 0 || |
| 3521 | param->sched_priority > MAX_USER_RT_PRIO-1) | 3521 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
| 3522 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) | ||
| 3522 | return -EINVAL; | 3523 | return -EINVAL; |
| 3523 | if ((policy == SCHED_NORMAL) != (param->sched_priority == 0)) | 3524 | if ((policy == SCHED_NORMAL) != (param->sched_priority == 0)) |
| 3524 | return -EINVAL; | 3525 | return -EINVAL; |
| @@ -3528,7 +3529,8 @@ recheck: | |||
| 3528 | */ | 3529 | */ |
| 3529 | if (!capable(CAP_SYS_NICE)) { | 3530 | if (!capable(CAP_SYS_NICE)) { |
| 3530 | /* can't change policy */ | 3531 | /* can't change policy */ |
| 3531 | if (policy != p->policy) | 3532 | if (policy != p->policy && |
| 3533 | !p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) | ||
| 3532 | return -EPERM; | 3534 | return -EPERM; |
| 3533 | /* can't increase priority */ | 3535 | /* can't increase priority */ |
| 3534 | if (policy != SCHED_NORMAL && | 3536 | if (policy != SCHED_NORMAL && |
diff --git a/kernel/softirq.c b/kernel/softirq.c index b4ab6af1dea8..31007d6542cc 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -86,7 +86,7 @@ restart: | |||
| 86 | /* Reset the pending bitmask before enabling irqs */ | 86 | /* Reset the pending bitmask before enabling irqs */ |
| 87 | local_softirq_pending() = 0; | 87 | local_softirq_pending() = 0; |
| 88 | 88 | ||
| 89 | local_irq_enable(); | 89 | //local_irq_enable(); |
| 90 | 90 | ||
| 91 | h = softirq_vec; | 91 | h = softirq_vec; |
| 92 | 92 | ||
| @@ -99,7 +99,7 @@ restart: | |||
| 99 | pending >>= 1; | 99 | pending >>= 1; |
| 100 | } while (pending); | 100 | } while (pending); |
| 101 | 101 | ||
| 102 | local_irq_disable(); | 102 | //local_irq_disable(); |
| 103 | 103 | ||
| 104 | pending = local_softirq_pending(); | 104 | pending = local_softirq_pending(); |
| 105 | if (pending && --max_restart) | 105 | if (pending && --max_restart) |
diff --git a/kernel/sys.c b/kernel/sys.c index 9a24374c23bc..000e81ad2c1d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -361,6 +361,66 @@ out_unlock: | |||
| 361 | return retval; | 361 | return retval; |
| 362 | } | 362 | } |
| 363 | 363 | ||
| 364 | void emergency_restart(void) | ||
| 365 | { | ||
| 366 | machine_emergency_restart(); | ||
| 367 | } | ||
| 368 | EXPORT_SYMBOL_GPL(emergency_restart); | ||
| 369 | |||
| 370 | void kernel_restart(char *cmd) | ||
| 371 | { | ||
| 372 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | ||
| 373 | system_state = SYSTEM_RESTART; | ||
| 374 | device_shutdown(); | ||
| 375 | if (!cmd) { | ||
| 376 | printk(KERN_EMERG "Restarting system.\n"); | ||
| 377 | } else { | ||
| 378 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); | ||
| 379 | } | ||
| 380 | printk(".\n"); | ||
| 381 | machine_restart(cmd); | ||
| 382 | } | ||
| 383 | EXPORT_SYMBOL_GPL(kernel_restart); | ||
| 384 | |||
| 385 | void kernel_kexec(void) | ||
| 386 | { | ||
| 387 | #ifdef CONFIG_KEXEC | ||
| 388 | struct kimage *image; | ||
| 389 | image = xchg(&kexec_image, 0); | ||
| 390 | if (!image) { | ||
| 391 | return; | ||
| 392 | } | ||
| 393 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); | ||
| 394 | system_state = SYSTEM_RESTART; | ||
| 395 | device_shutdown(); | ||
| 396 | printk(KERN_EMERG "Starting new kernel\n"); | ||
| 397 | machine_shutdown(); | ||
| 398 | machine_kexec(image); | ||
| 399 | #endif | ||
| 400 | } | ||
| 401 | EXPORT_SYMBOL_GPL(kernel_kexec); | ||
| 402 | |||
| 403 | void kernel_halt(void) | ||
| 404 | { | ||
| 405 | notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); | ||
| 406 | system_state = SYSTEM_HALT; | ||
| 407 | device_suspend(PMSG_SUSPEND); | ||
| 408 | device_shutdown(); | ||
| 409 | printk(KERN_EMERG "System halted.\n"); | ||
| 410 | machine_halt(); | ||
| 411 | } | ||
| 412 | EXPORT_SYMBOL_GPL(kernel_halt); | ||
| 413 | |||
| 414 | void kernel_power_off(void) | ||
| 415 | { | ||
| 416 | notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); | ||
| 417 | system_state = SYSTEM_POWER_OFF; | ||
| 418 | device_suspend(PMSG_SUSPEND); | ||
| 419 | device_shutdown(); | ||
| 420 | printk(KERN_EMERG "Power down.\n"); | ||
| 421 | machine_power_off(); | ||
| 422 | } | ||
| 423 | EXPORT_SYMBOL_GPL(kernel_power_off); | ||
| 364 | 424 | ||
| 365 | /* | 425 | /* |
| 366 | * Reboot system call: for obvious reasons only root may call it, | 426 | * Reboot system call: for obvious reasons only root may call it, |
| @@ -389,11 +449,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
| 389 | lock_kernel(); | 449 | lock_kernel(); |
| 390 | switch (cmd) { | 450 | switch (cmd) { |
| 391 | case LINUX_REBOOT_CMD_RESTART: | 451 | case LINUX_REBOOT_CMD_RESTART: |
| 392 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); | 452 | kernel_restart(NULL); |
| 393 | system_state = SYSTEM_RESTART; | ||
| 394 | device_shutdown(); | ||
| 395 | printk(KERN_EMERG "Restarting system.\n"); | ||
| 396 | machine_restart(NULL); | ||
| 397 | break; | 453 | break; |
| 398 | 454 | ||
| 399 | case LINUX_REBOOT_CMD_CAD_ON: | 455 | case LINUX_REBOOT_CMD_CAD_ON: |
| @@ -405,23 +461,13 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
| 405 | break; | 461 | break; |
| 406 | 462 | ||
| 407 | case LINUX_REBOOT_CMD_HALT: | 463 | case LINUX_REBOOT_CMD_HALT: |
| 408 | notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); | 464 | kernel_halt(); |
| 409 | system_state = SYSTEM_HALT; | ||
| 410 | device_suspend(PMSG_SUSPEND); | ||
| 411 | device_shutdown(); | ||
| 412 | printk(KERN_EMERG "System halted.\n"); | ||
| 413 | machine_halt(); | ||
| 414 | unlock_kernel(); | 465 | unlock_kernel(); |
| 415 | do_exit(0); | 466 | do_exit(0); |
| 416 | break; | 467 | break; |
| 417 | 468 | ||
| 418 | case LINUX_REBOOT_CMD_POWER_OFF: | 469 | case LINUX_REBOOT_CMD_POWER_OFF: |
| 419 | notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); | 470 | kernel_power_off(); |
| 420 | system_state = SYSTEM_POWER_OFF; | ||
| 421 | device_suspend(PMSG_SUSPEND); | ||
| 422 | device_shutdown(); | ||
| 423 | printk(KERN_EMERG "Power down.\n"); | ||
| 424 | machine_power_off(); | ||
| 425 | unlock_kernel(); | 471 | unlock_kernel(); |
| 426 | do_exit(0); | 472 | do_exit(0); |
| 427 | break; | 473 | break; |
| @@ -433,32 +479,14 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
| 433 | } | 479 | } |
| 434 | buffer[sizeof(buffer) - 1] = '\0'; | 480 | buffer[sizeof(buffer) - 1] = '\0'; |
| 435 | 481 | ||
| 436 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer); | 482 | kernel_restart(buffer); |
| 437 | system_state = SYSTEM_RESTART; | ||
| 438 | device_suspend(PMSG_FREEZE); | ||
| 439 | device_shutdown(); | ||
| 440 | printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer); | ||
| 441 | machine_restart(buffer); | ||
| 442 | break; | 483 | break; |
| 443 | 484 | ||
| 444 | #ifdef CONFIG_KEXEC | ||
| 445 | case LINUX_REBOOT_CMD_KEXEC: | 485 | case LINUX_REBOOT_CMD_KEXEC: |
| 446 | { | 486 | kernel_kexec(); |
| 447 | struct kimage *image; | 487 | unlock_kernel(); |
| 448 | image = xchg(&kexec_image, 0); | 488 | return -EINVAL; |
| 449 | if (!image) { | 489 | |
| 450 | unlock_kernel(); | ||
| 451 | return -EINVAL; | ||
| 452 | } | ||
| 453 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); | ||
| 454 | system_state = SYSTEM_RESTART; | ||
| 455 | device_shutdown(); | ||
| 456 | printk(KERN_EMERG "Starting new kernel\n"); | ||
| 457 | machine_shutdown(); | ||
| 458 | machine_kexec(image); | ||
| 459 | break; | ||
| 460 | } | ||
| 461 | #endif | ||
| 462 | #ifdef CONFIG_SOFTWARE_SUSPEND | 490 | #ifdef CONFIG_SOFTWARE_SUSPEND |
| 463 | case LINUX_REBOOT_CMD_SW_SUSPEND: | 491 | case LINUX_REBOOT_CMD_SW_SUSPEND: |
| 464 | { | 492 | { |
| @@ -478,8 +506,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
| 478 | 506 | ||
| 479 | static void deferred_cad(void *dummy) | 507 | static void deferred_cad(void *dummy) |
| 480 | { | 508 | { |
| 481 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); | 509 | kernel_restart(NULL); |
| 482 | machine_restart(NULL); | ||
| 483 | } | 510 | } |
| 484 | 511 | ||
| 485 | /* | 512 | /* |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b240e2cb86fc..3e0bbee549ea 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -67,12 +67,6 @@ extern int printk_ratelimit_jiffies; | |||
| 67 | extern int printk_ratelimit_burst; | 67 | extern int printk_ratelimit_burst; |
| 68 | extern int pid_max_min, pid_max_max; | 68 | extern int pid_max_min, pid_max_max; |
| 69 | 69 | ||
| 70 | #ifdef CONFIG_INOTIFY | ||
| 71 | extern int inotify_max_user_devices; | ||
| 72 | extern int inotify_max_user_watches; | ||
| 73 | extern int inotify_max_queued_events; | ||
| 74 | #endif | ||
| 75 | |||
| 76 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) | 70 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) |
| 77 | int unknown_nmi_panic; | 71 | int unknown_nmi_panic; |
| 78 | extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *, | 72 | extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *, |
| @@ -120,6 +114,7 @@ extern int unaligned_enabled; | |||
| 120 | extern int sysctl_ieee_emulation_warnings; | 114 | extern int sysctl_ieee_emulation_warnings; |
| 121 | #endif | 115 | #endif |
| 122 | extern int sysctl_userprocess_debug; | 116 | extern int sysctl_userprocess_debug; |
| 117 | extern int spin_retry; | ||
| 123 | #endif | 118 | #endif |
| 124 | 119 | ||
| 125 | extern int sysctl_hz_timer; | 120 | extern int sysctl_hz_timer; |
| @@ -152,6 +147,9 @@ extern ctl_table random_table[]; | |||
| 152 | #ifdef CONFIG_UNIX98_PTYS | 147 | #ifdef CONFIG_UNIX98_PTYS |
| 153 | extern ctl_table pty_table[]; | 148 | extern ctl_table pty_table[]; |
| 154 | #endif | 149 | #endif |
| 150 | #ifdef CONFIG_INOTIFY | ||
| 151 | extern ctl_table inotify_table[]; | ||
| 152 | #endif | ||
| 155 | 153 | ||
| 156 | #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT | 154 | #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT |
| 157 | int sysctl_legacy_va_layout; | 155 | int sysctl_legacy_va_layout; |
| @@ -650,7 +648,16 @@ static ctl_table kern_table[] = { | |||
| 650 | .mode = 0644, | 648 | .mode = 0644, |
| 651 | .proc_handler = &proc_dointvec, | 649 | .proc_handler = &proc_dointvec, |
| 652 | }, | 650 | }, |
| 653 | 651 | #if defined(CONFIG_ARCH_S390) | |
| 652 | { | ||
| 653 | .ctl_name = KERN_SPIN_RETRY, | ||
| 654 | .procname = "spin_retry", | ||
| 655 | .data = &spin_retry, | ||
| 656 | .maxlen = sizeof (int), | ||
| 657 | .mode = 0644, | ||
| 658 | .proc_handler = &proc_dointvec, | ||
| 659 | }, | ||
| 660 | #endif | ||
| 654 | { .ctl_name = 0 } | 661 | { .ctl_name = 0 } |
| 655 | }; | 662 | }; |
| 656 | 663 | ||
| @@ -957,6 +964,14 @@ static ctl_table fs_table[] = { | |||
| 957 | .mode = 0644, | 964 | .mode = 0644, |
| 958 | .proc_handler = &proc_dointvec, | 965 | .proc_handler = &proc_dointvec, |
| 959 | }, | 966 | }, |
| 967 | #ifdef CONFIG_INOTIFY | ||
| 968 | { | ||
| 969 | .ctl_name = FS_INOTIFY, | ||
| 970 | .procname = "inotify", | ||
| 971 | .mode = 0555, | ||
| 972 | .child = inotify_table, | ||
| 973 | }, | ||
| 974 | #endif | ||
| 960 | #endif | 975 | #endif |
| 961 | { | 976 | { |
| 962 | .ctl_name = KERN_SETUID_DUMPABLE, | 977 | .ctl_name = KERN_SETUID_DUMPABLE, |
| @@ -966,40 +981,6 @@ static ctl_table fs_table[] = { | |||
| 966 | .mode = 0644, | 981 | .mode = 0644, |
| 967 | .proc_handler = &proc_dointvec, | 982 | .proc_handler = &proc_dointvec, |
| 968 | }, | 983 | }, |
| 969 | #ifdef CONFIG_INOTIFY | ||
| 970 | { | ||
| 971 | .ctl_name = INOTIFY_MAX_USER_DEVICES, | ||
| 972 | .procname = "max_user_devices", | ||
| 973 | .data = &inotify_max_user_devices, | ||
| 974 | .maxlen = sizeof(int), | ||
| 975 | .mode = 0644, | ||
| 976 | .proc_handler = &proc_dointvec_minmax, | ||
| 977 | .strategy = &sysctl_intvec, | ||
| 978 | .extra1 = &zero, | ||
| 979 | }, | ||
| 980 | |||
| 981 | { | ||
| 982 | .ctl_name = INOTIFY_MAX_USER_WATCHES, | ||
| 983 | .procname = "max_user_watches", | ||
| 984 | .data = &inotify_max_user_watches, | ||
| 985 | .maxlen = sizeof(int), | ||
| 986 | .mode = 0644, | ||
| 987 | .proc_handler = &proc_dointvec_minmax, | ||
| 988 | .strategy = &sysctl_intvec, | ||
| 989 | .extra1 = &zero, | ||
| 990 | }, | ||
| 991 | |||
| 992 | { | ||
| 993 | .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, | ||
| 994 | .procname = "max_queued_events", | ||
| 995 | .data = &inotify_max_queued_events, | ||
| 996 | .maxlen = sizeof(int), | ||
| 997 | .mode = 0644, | ||
| 998 | .proc_handler = &proc_dointvec_minmax, | ||
| 999 | .strategy = &sysctl_intvec, | ||
| 1000 | .extra1 = &zero | ||
| 1001 | }, | ||
| 1002 | #endif | ||
| 1003 | { .ctl_name = 0 } | 984 | { .ctl_name = 0 } |
| 1004 | }; | 985 | }; |
| 1005 | 986 | ||
diff --git a/kernel/time.c b/kernel/time.c index d4335c1c884c..dd5ae1162a8f 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
| @@ -128,7 +128,7 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us | |||
| 128 | * as real UNIX machines always do it. This avoids all headaches about | 128 | * as real UNIX machines always do it. This avoids all headaches about |
| 129 | * daylight saving times and warping kernel clocks. | 129 | * daylight saving times and warping kernel clocks. |
| 130 | */ | 130 | */ |
| 131 | inline static void warp_clock(void) | 131 | static inline void warp_clock(void) |
| 132 | { | 132 | { |
| 133 | write_seqlock_irq(&xtime_lock); | 133 | write_seqlock_irq(&xtime_lock); |
| 134 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; | 134 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; |
