aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/capability.c20
-rw-r--r--kernel/cpuset.c26
-rw-r--r--kernel/crash_dump.c11
-rw-r--r--kernel/itimer.c37
-rw-r--r--kernel/power/smp.c2
-rw-r--r--kernel/sys.c1
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/time.c2
8 files changed, 75 insertions, 36 deletions
diff --git a/kernel/capability.c b/kernel/capability.c
index 64db1ee820c2..8986a37a67ea 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -31,8 +31,14 @@ static DEFINE_SPINLOCK(task_capability_lock);
31 * uninteresting and/or not to be changed. 31 * uninteresting and/or not to be changed.
32 */ 32 */
33 33
34/* 34/**
35 * sys_capget - get the capabilities of a given process. 35 * sys_capget - get the capabilities of a given process.
36 * @header: pointer to struct that contains capability version and
37 * target pid data
38 * @dataptr: pointer to struct that contains the effective, permitted,
39 * and inheritable capabilities that are returned
40 *
41 * Returns 0 on success and < 0 on error.
36 */ 42 */
37asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) 43asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
38{ 44{
@@ -141,8 +147,14 @@ static inline int cap_set_all(kernel_cap_t *effective,
141 return ret; 147 return ret;
142} 148}
143 149
144/* 150/**
145 * sys_capset - set capabilities for a given process, all processes, or all 151 * sys_capset - set capabilities for a process or a group of processes
152 * @header: pointer to struct that contains capability version and
153 * target pid data
154 * @data: pointer to struct that contains the effective, permitted,
155 * and inheritable capabilities
156 *
157 * Set capabilities for a given process, all processes, or all
146 * processes in a given process group. 158 * processes in a given process group.
147 * 159 *
148 * The restrictions on setting capabilities are specified as: 160 * The restrictions on setting capabilities are specified as:
@@ -152,6 +164,8 @@ static inline int cap_set_all(kernel_cap_t *effective,
152 * I: any raised capabilities must be a subset of the (old current) permitted 164 * I: any raised capabilities must be a subset of the (old current) permitted
153 * P: any raised capabilities must be a subset of the (old current) permitted 165 * P: any raised capabilities must be a subset of the (old current) permitted
154 * E: must be set to a subset of (new target) permitted 166 * E: must be set to a subset of (new target) permitted
167 *
168 * Returns 0 on success and < 0 on error.
155 */ 169 */
156asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) 170asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
157{ 171{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 984c0bf3807f..805fb9097318 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1440,10 +1440,10 @@ void __init cpuset_init_smp(void)
1440 1440
1441/** 1441/**
1442 * cpuset_fork - attach newly forked task to its parents cpuset. 1442 * cpuset_fork - attach newly forked task to its parents cpuset.
1443 * @p: pointer to task_struct of forking parent process. 1443 * @tsk: pointer to task_struct of forking parent process.
1444 * 1444 *
1445 * Description: By default, on fork, a task inherits its 1445 * Description: By default, on fork, a task inherits its
1446 * parents cpuset. The pointer to the shared cpuset is 1446 * parent's cpuset. The pointer to the shared cpuset is
1447 * automatically copied in fork.c by dup_task_struct(). 1447 * automatically copied in fork.c by dup_task_struct().
1448 * This cpuset_fork() routine need only increment the usage 1448 * This cpuset_fork() routine need only increment the usage
1449 * counter in that cpuset. 1449 * counter in that cpuset.
@@ -1471,7 +1471,6 @@ void cpuset_fork(struct task_struct *tsk)
1471 * by the cpuset_sem semaphore. If you don't hold cpuset_sem, 1471 * by the cpuset_sem semaphore. If you don't hold cpuset_sem,
1472 * then a zero cpuset use count is a license to any other task to 1472 * then a zero cpuset use count is a license to any other task to
1473 * nuke the cpuset immediately. 1473 * nuke the cpuset immediately.
1474 *
1475 **/ 1474 **/
1476 1475
1477void cpuset_exit(struct task_struct *tsk) 1476void cpuset_exit(struct task_struct *tsk)
@@ -1521,7 +1520,9 @@ void cpuset_init_current_mems_allowed(void)
1521 current->mems_allowed = NODE_MASK_ALL; 1520 current->mems_allowed = NODE_MASK_ALL;
1522} 1521}
1523 1522
1524/* 1523/**
1524 * cpuset_update_current_mems_allowed - update mems parameters to new values
1525 *
1525 * If the current tasks cpusets mems_allowed changed behind our backs, 1526 * If the current tasks cpusets mems_allowed changed behind our backs,
1526 * update current->mems_allowed and mems_generation to the new value. 1527 * update current->mems_allowed and mems_generation to the new value.
1527 * Do not call this routine if in_interrupt(). 1528 * Do not call this routine if in_interrupt().
@@ -1540,13 +1541,20 @@ void cpuset_update_current_mems_allowed(void)
1540 } 1541 }
1541} 1542}
1542 1543
1544/**
1545 * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed
1546 * @nodes: pointer to a node bitmap that is and-ed with mems_allowed
1547 */
1543void cpuset_restrict_to_mems_allowed(unsigned long *nodes) 1548void cpuset_restrict_to_mems_allowed(unsigned long *nodes)
1544{ 1549{
1545 bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed), 1550 bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed),
1546 MAX_NUMNODES); 1551 MAX_NUMNODES);
1547} 1552}
1548 1553
1549/* 1554/**
1555 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
1556 * @zl: the zonelist to be checked
1557 *
1550 * Are any of the nodes on zonelist zl allowed in current->mems_allowed? 1558 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
1551 */ 1559 */
1552int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) 1560int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
@@ -1562,8 +1570,12 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
1562 return 0; 1570 return 0;
1563} 1571}
1564 1572
1565/* 1573/**
1566 * Is 'current' valid, and is zone z allowed in current->mems_allowed? 1574 * cpuset_zone_allowed - is zone z allowed in current->mems_allowed
1575 * @z: zone in question
1576 *
1577 * Is zone z allowed in current->mems_allowed, or is
1578 * the CPU in interrupt context? (zone is always allowed in this case)
1567 */ 1579 */
1568int cpuset_zone_allowed(struct zone *z) 1580int cpuset_zone_allowed(struct zone *z)
1569{ 1581{
diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c
index 459ba49e376a..334c37f5218a 100644
--- a/kernel/crash_dump.c
+++ b/kernel/crash_dump.c
@@ -18,7 +18,16 @@
18/* Stores the physical address of elf header of crash image. */ 18/* Stores the physical address of elf header of crash image. */
19unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 19unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
20 20
21/* 21/**
22 * copy_oldmem_page - copy one page from "oldmem"
23 * @pfn: page frame number to be copied
24 * @buf: target memory address for the copy; this can be in kernel address
25 * space or user address space (see @userbuf)
26 * @csize: number of bytes to copy
27 * @offset: offset in bytes into the page (based on pfn) to begin the copy
28 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
29 * otherwise @buf is in kernel address space, use memcpy().
30 *
22 * Copy a page from "oldmem". For this page, there is no pte mapped 31 * Copy a page from "oldmem". For this page, there is no pte mapped
23 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 32 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
24 */ 33 */
diff --git a/kernel/itimer.c b/kernel/itimer.c
index a72cb0e5aa4b..7c1b25e25e47 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -112,28 +112,11 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
112 return error; 112 return error;
113} 113}
114 114
115/*
116 * Called with P->sighand->siglock held and P->signal->real_timer inactive.
117 * If interval is nonzero, arm the timer for interval ticks from now.
118 */
119static inline void it_real_arm(struct task_struct *p, unsigned long interval)
120{
121 p->signal->it_real_value = interval; /* XXX unnecessary field?? */
122 if (interval == 0)
123 return;
124 if (interval > (unsigned long) LONG_MAX)
125 interval = LONG_MAX;
126 /* the "+ 1" below makes sure that the timer doesn't go off before
127 * the interval requested. This could happen if
128 * time requested % (usecs per jiffy) is more than the usecs left
129 * in the current jiffy */
130 p->signal->real_timer.expires = jiffies + interval + 1;
131 add_timer(&p->signal->real_timer);
132}
133 115
134void it_real_fn(unsigned long __data) 116void it_real_fn(unsigned long __data)
135{ 117{
136 struct task_struct * p = (struct task_struct *) __data; 118 struct task_struct * p = (struct task_struct *) __data;
119 unsigned long inc = p->signal->it_real_incr;
137 120
138 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p); 121 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p);
139 122
@@ -141,14 +124,23 @@ void it_real_fn(unsigned long __data)
141 * Now restart the timer if necessary. We don't need any locking 124 * Now restart the timer if necessary. We don't need any locking
142 * here because do_setitimer makes sure we have finished running 125 * here because do_setitimer makes sure we have finished running
143 * before it touches anything. 126 * before it touches anything.
127 * Note, we KNOW we are (or should be) at a jiffie edge here so
128 * we don't need the +1 stuff. Also, we want to use the prior
129 * expire value so as to not "slip" a jiffie if we are late.
130 * Deal with requesting a time prior to "now" here rather than
131 * in add_timer.
144 */ 132 */
145 it_real_arm(p, p->signal->it_real_incr); 133 if (!inc)
134 return;
135 while (time_before_eq(p->signal->real_timer.expires, jiffies))
136 p->signal->real_timer.expires += inc;
137 add_timer(&p->signal->real_timer);
146} 138}
147 139
148int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) 140int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
149{ 141{
150 struct task_struct *tsk = current; 142 struct task_struct *tsk = current;
151 unsigned long val, interval; 143 unsigned long val, interval, expires;
152 cputime_t cval, cinterval, nval, ninterval; 144 cputime_t cval, cinterval, nval, ninterval;
153 145
154 switch (which) { 146 switch (which) {
@@ -164,7 +156,10 @@ again:
164 } 156 }
165 tsk->signal->it_real_incr = 157 tsk->signal->it_real_incr =
166 timeval_to_jiffies(&value->it_interval); 158 timeval_to_jiffies(&value->it_interval);
167 it_real_arm(tsk, timeval_to_jiffies(&value->it_value)); 159 expires = timeval_to_jiffies(&value->it_value);
160 if (expires)
161 mod_timer(&tsk->signal->real_timer,
162 jiffies + 1 + expires);
168 spin_unlock_irq(&tsk->sighand->siglock); 163 spin_unlock_irq(&tsk->sighand->siglock);
169 if (ovalue) { 164 if (ovalue) {
170 jiffies_to_timeval(val, &ovalue->it_value); 165 jiffies_to_timeval(val, &ovalue->it_value);
diff --git a/kernel/power/smp.c b/kernel/power/smp.c
index bbe23079c62c..911fc62b8225 100644
--- a/kernel/power/smp.c
+++ b/kernel/power/smp.c
@@ -38,7 +38,7 @@ void disable_nonboot_cpus(void)
38 } 38 }
39 printk("Error taking cpu %d down: %d\n", cpu, error); 39 printk("Error taking cpu %d down: %d\n", cpu, error);
40 } 40 }
41 BUG_ON(smp_processor_id() != 0); 41 BUG_ON(raw_smp_processor_id() != 0);
42 if (error) 42 if (error)
43 panic("cpus not sleeping"); 43 panic("cpus not sleeping");
44} 44}
diff --git a/kernel/sys.c b/kernel/sys.c
index a74039036fb4..8f255259ef9e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -371,7 +371,6 @@ void kernel_restart(char *cmd)
371{ 371{
372 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 372 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
373 system_state = SYSTEM_RESTART; 373 system_state = SYSTEM_RESTART;
374 device_suspend(PMSG_FREEZE);
375 device_shutdown(); 374 device_shutdown();
376 if (!cmd) { 375 if (!cmd) {
377 printk(KERN_EMERG "Restarting system.\n"); 376 printk(KERN_EMERG "Restarting system.\n");
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e60b9c36f1f0..3e0bbee549ea 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -114,6 +114,7 @@ extern int unaligned_enabled;
114extern int sysctl_ieee_emulation_warnings; 114extern int sysctl_ieee_emulation_warnings;
115#endif 115#endif
116extern int sysctl_userprocess_debug; 116extern int sysctl_userprocess_debug;
117extern int spin_retry;
117#endif 118#endif
118 119
119extern int sysctl_hz_timer; 120extern int sysctl_hz_timer;
@@ -647,7 +648,16 @@ static ctl_table kern_table[] = {
647 .mode = 0644, 648 .mode = 0644,
648 .proc_handler = &proc_dointvec, 649 .proc_handler = &proc_dointvec,
649 }, 650 },
650 651#if defined(CONFIG_ARCH_S390)
652 {
653 .ctl_name = KERN_SPIN_RETRY,
654 .procname = "spin_retry",
655 .data = &spin_retry,
656 .maxlen = sizeof (int),
657 .mode = 0644,
658 .proc_handler = &proc_dointvec,
659 },
660#endif
651 { .ctl_name = 0 } 661 { .ctl_name = 0 }
652}; 662};
653 663
diff --git a/kernel/time.c b/kernel/time.c
index d4335c1c884c..dd5ae1162a8f 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -128,7 +128,7 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us
128 * as real UNIX machines always do it. This avoids all headaches about 128 * as real UNIX machines always do it. This avoids all headaches about
129 * daylight saving times and warping kernel clocks. 129 * daylight saving times and warping kernel clocks.
130 */ 130 */
131inline static void warp_clock(void) 131static inline void warp_clock(void)
132{ 132{
133 write_seqlock_irq(&xtime_lock); 133 write_seqlock_irq(&xtime_lock);
134 wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; 134 wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;