aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/futex.c51
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/kmod.c13
-rw-r--r--kernel/lockdep.c12
-rw-r--r--kernel/module.c29
-rw-r--r--kernel/params.c10
-rw-r--r--kernel/power/main.c3
-rw-r--r--kernel/power/pm.c4
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/workqueue.c5
17 files changed, 112 insertions, 52 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index cf19547cc9e4..521dfa53cb99 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -482,7 +482,7 @@ static void do_acct_process(struct file *file)
482#endif 482#endif
483#if ACCT_VERSION==3 483#if ACCT_VERSION==3
484 ac.ac_pid = current->tgid; 484 ac.ac_pid = current->tgid;
485 ac.ac_ppid = current->parent->tgid; 485 ac.ac_ppid = current->real_parent->tgid;
486#endif 486#endif
487 487
488 spin_lock_irq(&current->sighand->siglock); 488 spin_lock_irq(&current->sighand->siglock);
diff --git a/kernel/futex.c b/kernel/futex.c
index 172a1aeeafdb..db9824de8bf0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1097,15 +1097,15 @@ static void unqueue_me_pi(struct futex_q *q)
1097} 1097}
1098 1098
1099/* 1099/*
1100 * Fixup the pi_state owner with current. 1100 * Fixup the pi_state owner with the new owner.
1101 * 1101 *
1102 * Must be called with hash bucket lock held and mm->sem held for non 1102 * Must be called with hash bucket lock held and mm->sem held for non
1103 * private futexes. 1103 * private futexes.
1104 */ 1104 */
1105static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 1105static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1106 struct task_struct *curr) 1106 struct task_struct *newowner)
1107{ 1107{
1108 u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS; 1108 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1109 struct futex_pi_state *pi_state = q->pi_state; 1109 struct futex_pi_state *pi_state = q->pi_state;
1110 u32 uval, curval, newval; 1110 u32 uval, curval, newval;
1111 int ret; 1111 int ret;
@@ -1119,12 +1119,12 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1119 } else 1119 } else
1120 newtid |= FUTEX_OWNER_DIED; 1120 newtid |= FUTEX_OWNER_DIED;
1121 1121
1122 pi_state->owner = curr; 1122 pi_state->owner = newowner;
1123 1123
1124 spin_lock_irq(&curr->pi_lock); 1124 spin_lock_irq(&newowner->pi_lock);
1125 WARN_ON(!list_empty(&pi_state->list)); 1125 WARN_ON(!list_empty(&pi_state->list));
1126 list_add(&pi_state->list, &curr->pi_state_list); 1126 list_add(&pi_state->list, &newowner->pi_state_list);
1127 spin_unlock_irq(&curr->pi_lock); 1127 spin_unlock_irq(&newowner->pi_lock);
1128 1128
1129 /* 1129 /*
1130 * We own it, so we have to replace the pending owner 1130 * We own it, so we have to replace the pending owner
@@ -1508,9 +1508,40 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1508 * when we were on the way back before we locked the 1508 * when we were on the way back before we locked the
1509 * hash bucket. 1509 * hash bucket.
1510 */ 1510 */
1511 if (q.pi_state->owner == curr && 1511 if (q.pi_state->owner == curr) {
1512 rt_mutex_trylock(&q.pi_state->pi_mutex)) { 1512 /*
1513 ret = 0; 1513 * Try to get the rt_mutex now. This might
1514 * fail as some other task acquired the
1515 * rt_mutex after we removed ourself from the
1516 * rt_mutex waiters list.
1517 */
1518 if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1519 ret = 0;
1520 else {
1521 /*
1522 * pi_state is incorrect, some other
1523 * task did a lock steal and we
1524 * returned due to timeout or signal
1525 * without taking the rt_mutex. Too
1526 * late. We can access the
1527 * rt_mutex_owner without locking, as
1528 * the other task is now blocked on
1529 * the hash bucket lock. Fix the state
1530 * up.
1531 */
1532 struct task_struct *owner;
1533 int res;
1534
1535 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1536 res = fixup_pi_state_owner(uaddr, &q, owner);
1537
1538 WARN_ON(rt_mutex_owner(&q.pi_state->pi_mutex) !=
1539 owner);
1540
1541 /* propagate -EFAULT, if the fixup failed */
1542 if (res)
1543 ret = res;
1544 }
1514 } else { 1545 } else {
1515 /* 1546 /*
1516 * Paranoia check. If we did not take the lock 1547 * Paranoia check. If we did not take the lock
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e65dd0b47cdc..f994bb8065e6 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1378,7 +1378,7 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
1378/* 1378/*
1379 * Functions related to boot-time initialization: 1379 * Functions related to boot-time initialization:
1380 */ 1380 */
1381static void __devinit init_hrtimers_cpu(int cpu) 1381static void __cpuinit init_hrtimers_cpu(int cpu)
1382{ 1382{
1383 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1383 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1384 int i; 1384 int i;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index aa74a1ef2da8..9a26eec9eb04 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1404,6 +1404,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1404 VMCOREINFO_OFFSET(list_head, next); 1404 VMCOREINFO_OFFSET(list_head, next);
1405 VMCOREINFO_OFFSET(list_head, prev); 1405 VMCOREINFO_OFFSET(list_head, prev);
1406 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); 1406 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1407 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1407 VMCOREINFO_NUMBER(NR_FREE_PAGES); 1408 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1408 1409
1409 arch_crash_save_vmcoreinfo(); 1410 arch_crash_save_vmcoreinfo();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index c6a4f8aebeba..bb7df2a28bd7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -451,13 +451,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
451 enum umh_wait wait) 451 enum umh_wait wait)
452{ 452{
453 DECLARE_COMPLETION_ONSTACK(done); 453 DECLARE_COMPLETION_ONSTACK(done);
454 int retval; 454 int retval = 0;
455 455
456 helper_lock(); 456 helper_lock();
457 if (sub_info->path[0] == '\0') { 457 if (sub_info->path[0] == '\0')
458 retval = 0;
459 goto out; 458 goto out;
460 }
461 459
462 if (!khelper_wq || usermodehelper_disabled) { 460 if (!khelper_wq || usermodehelper_disabled) {
463 retval = -EBUSY; 461 retval = -EBUSY;
@@ -468,13 +466,14 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
468 sub_info->wait = wait; 466 sub_info->wait = wait;
469 467
470 queue_work(khelper_wq, &sub_info->work); 468 queue_work(khelper_wq, &sub_info->work);
471 if (wait == UMH_NO_WAIT) /* task has freed sub_info */ 469 if (wait == UMH_NO_WAIT) /* task has freed sub_info */
472 return 0; 470 goto unlock;
473 wait_for_completion(&done); 471 wait_for_completion(&done);
474 retval = sub_info->retval; 472 retval = sub_info->retval;
475 473
476 out: 474out:
477 call_usermodehelper_freeinfo(sub_info); 475 call_usermodehelper_freeinfo(sub_info);
476unlock:
478 helper_unlock(); 477 helper_unlock();
479 return retval; 478 return retval;
480} 479}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 723bd9f92556..4335f12a27c6 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2943,9 +2943,10 @@ void lockdep_free_key_range(void *start, unsigned long size)
2943 struct list_head *head; 2943 struct list_head *head;
2944 unsigned long flags; 2944 unsigned long flags;
2945 int i; 2945 int i;
2946 int locked;
2946 2947
2947 raw_local_irq_save(flags); 2948 raw_local_irq_save(flags);
2948 graph_lock(); 2949 locked = graph_lock();
2949 2950
2950 /* 2951 /*
2951 * Unhash all classes that were created by this module: 2952 * Unhash all classes that were created by this module:
@@ -2959,7 +2960,8 @@ void lockdep_free_key_range(void *start, unsigned long size)
2959 zap_class(class); 2960 zap_class(class);
2960 } 2961 }
2961 2962
2962 graph_unlock(); 2963 if (locked)
2964 graph_unlock();
2963 raw_local_irq_restore(flags); 2965 raw_local_irq_restore(flags);
2964} 2966}
2965 2967
@@ -2969,6 +2971,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
2969 struct list_head *head; 2971 struct list_head *head;
2970 unsigned long flags; 2972 unsigned long flags;
2971 int i, j; 2973 int i, j;
2974 int locked;
2972 2975
2973 raw_local_irq_save(flags); 2976 raw_local_irq_save(flags);
2974 2977
@@ -2987,7 +2990,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
2987 * Debug check: in the end all mapped classes should 2990 * Debug check: in the end all mapped classes should
2988 * be gone. 2991 * be gone.
2989 */ 2992 */
2990 graph_lock(); 2993 locked = graph_lock();
2991 for (i = 0; i < CLASSHASH_SIZE; i++) { 2994 for (i = 0; i < CLASSHASH_SIZE; i++) {
2992 head = classhash_table + i; 2995 head = classhash_table + i;
2993 if (list_empty(head)) 2996 if (list_empty(head))
@@ -3000,7 +3003,8 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3000 } 3003 }
3001 } 3004 }
3002 } 3005 }
3003 graph_unlock(); 3006 if (locked)
3007 graph_unlock();
3004 3008
3005out_restore: 3009out_restore:
3006 raw_local_irq_restore(flags); 3010 raw_local_irq_restore(flags);
diff --git a/kernel/module.c b/kernel/module.c
index 91fe6958b6e1..c2e3e2e98801 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2214,29 +2214,34 @@ static const char *get_ksymbol(struct module *mod,
2214/* For kallsyms to ask for address resolution. NULL means not found. 2214/* For kallsyms to ask for address resolution. NULL means not found.
2215 We don't lock, as this is used for oops resolution and races are a 2215 We don't lock, as this is used for oops resolution and races are a
2216 lesser concern. */ 2216 lesser concern. */
2217/* FIXME: Risky: returns a pointer into a module w/o lock */
2217const char *module_address_lookup(unsigned long addr, 2218const char *module_address_lookup(unsigned long addr,
2218 unsigned long *size, 2219 unsigned long *size,
2219 unsigned long *offset, 2220 unsigned long *offset,
2220 char **modname) 2221 char **modname)
2221{ 2222{
2222 struct module *mod; 2223 struct module *mod;
2224 const char *ret = NULL;
2223 2225
2226 preempt_disable();
2224 list_for_each_entry(mod, &modules, list) { 2227 list_for_each_entry(mod, &modules, list) {
2225 if (within(addr, mod->module_init, mod->init_size) 2228 if (within(addr, mod->module_init, mod->init_size)
2226 || within(addr, mod->module_core, mod->core_size)) { 2229 || within(addr, mod->module_core, mod->core_size)) {
2227 if (modname) 2230 if (modname)
2228 *modname = mod->name; 2231 *modname = mod->name;
2229 return get_ksymbol(mod, addr, size, offset); 2232 ret = get_ksymbol(mod, addr, size, offset);
2233 break;
2230 } 2234 }
2231 } 2235 }
2232 return NULL; 2236 preempt_enable();
2237 return ret;
2233} 2238}
2234 2239
2235int lookup_module_symbol_name(unsigned long addr, char *symname) 2240int lookup_module_symbol_name(unsigned long addr, char *symname)
2236{ 2241{
2237 struct module *mod; 2242 struct module *mod;
2238 2243
2239 mutex_lock(&module_mutex); 2244 preempt_disable();
2240 list_for_each_entry(mod, &modules, list) { 2245 list_for_each_entry(mod, &modules, list) {
2241 if (within(addr, mod->module_init, mod->init_size) || 2246 if (within(addr, mod->module_init, mod->init_size) ||
2242 within(addr, mod->module_core, mod->core_size)) { 2247 within(addr, mod->module_core, mod->core_size)) {
@@ -2246,12 +2251,12 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
2246 if (!sym) 2251 if (!sym)
2247 goto out; 2252 goto out;
2248 strlcpy(symname, sym, KSYM_NAME_LEN); 2253 strlcpy(symname, sym, KSYM_NAME_LEN);
2249 mutex_unlock(&module_mutex); 2254 preempt_enable();
2250 return 0; 2255 return 0;
2251 } 2256 }
2252 } 2257 }
2253out: 2258out:
2254 mutex_unlock(&module_mutex); 2259 preempt_enable();
2255 return -ERANGE; 2260 return -ERANGE;
2256} 2261}
2257 2262
@@ -2260,7 +2265,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
2260{ 2265{
2261 struct module *mod; 2266 struct module *mod;
2262 2267
2263 mutex_lock(&module_mutex); 2268 preempt_disable();
2264 list_for_each_entry(mod, &modules, list) { 2269 list_for_each_entry(mod, &modules, list) {
2265 if (within(addr, mod->module_init, mod->init_size) || 2270 if (within(addr, mod->module_init, mod->init_size) ||
2266 within(addr, mod->module_core, mod->core_size)) { 2271 within(addr, mod->module_core, mod->core_size)) {
@@ -2273,12 +2278,12 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
2273 strlcpy(modname, mod->name, MODULE_NAME_LEN); 2278 strlcpy(modname, mod->name, MODULE_NAME_LEN);
2274 if (name) 2279 if (name)
2275 strlcpy(name, sym, KSYM_NAME_LEN); 2280 strlcpy(name, sym, KSYM_NAME_LEN);
2276 mutex_unlock(&module_mutex); 2281 preempt_enable();
2277 return 0; 2282 return 0;
2278 } 2283 }
2279 } 2284 }
2280out: 2285out:
2281 mutex_unlock(&module_mutex); 2286 preempt_enable();
2282 return -ERANGE; 2287 return -ERANGE;
2283} 2288}
2284 2289
@@ -2287,7 +2292,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2287{ 2292{
2288 struct module *mod; 2293 struct module *mod;
2289 2294
2290 mutex_lock(&module_mutex); 2295 preempt_disable();
2291 list_for_each_entry(mod, &modules, list) { 2296 list_for_each_entry(mod, &modules, list) {
2292 if (symnum < mod->num_symtab) { 2297 if (symnum < mod->num_symtab) {
2293 *value = mod->symtab[symnum].st_value; 2298 *value = mod->symtab[symnum].st_value;
@@ -2296,12 +2301,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2296 KSYM_NAME_LEN); 2301 KSYM_NAME_LEN);
2297 strlcpy(module_name, mod->name, MODULE_NAME_LEN); 2302 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
2298 *exported = is_exported(name, mod); 2303 *exported = is_exported(name, mod);
2299 mutex_unlock(&module_mutex); 2304 preempt_enable();
2300 return 0; 2305 return 0;
2301 } 2306 }
2302 symnum -= mod->num_symtab; 2307 symnum -= mod->num_symtab;
2303 } 2308 }
2304 mutex_unlock(&module_mutex); 2309 preempt_enable();
2305 return -ERANGE; 2310 return -ERANGE;
2306} 2311}
2307 2312
@@ -2324,6 +2329,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
2324 unsigned long ret = 0; 2329 unsigned long ret = 0;
2325 2330
2326 /* Don't lock: we're in enough trouble already. */ 2331 /* Don't lock: we're in enough trouble already. */
2332 preempt_disable();
2327 if ((colon = strchr(name, ':')) != NULL) { 2333 if ((colon = strchr(name, ':')) != NULL) {
2328 *colon = '\0'; 2334 *colon = '\0';
2329 if ((mod = find_module(name)) != NULL) 2335 if ((mod = find_module(name)) != NULL)
@@ -2334,6 +2340,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
2334 if ((ret = mod_find_symname(mod, name)) != 0) 2340 if ((ret = mod_find_symname(mod, name)) != 0)
2335 break; 2341 break;
2336 } 2342 }
2343 preempt_enable();
2337 return ret; 2344 return ret;
2338} 2345}
2339#endif /* CONFIG_KALLSYMS */ 2346#endif /* CONFIG_KALLSYMS */
diff --git a/kernel/params.c b/kernel/params.c
index 2a4c51487e72..7686417ee00e 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -697,8 +697,18 @@ static struct kset_uevent_ops module_uevent_ops = {
697decl_subsys(module, &module_ktype, &module_uevent_ops); 697decl_subsys(module, &module_ktype, &module_uevent_ops);
698int module_sysfs_initialized; 698int module_sysfs_initialized;
699 699
700static void module_release(struct kobject *kobj)
701{
702 /*
703 * Stupid empty release function to allow the memory for the kobject to
704 * be properly cleaned up. This will not need to be present for 2.6.25
705 * with the upcoming kobject core rework.
706 */
707}
708
700static struct kobj_type module_ktype = { 709static struct kobj_type module_ktype = {
701 .sysfs_ops = &module_sysfs_ops, 710 .sysfs_ops = &module_sysfs_ops,
711 .release = module_release,
702}; 712};
703 713
704/* 714/*
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3cdf95b1dc92..f71c9504a5c5 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -28,6 +28,9 @@ BLOCKING_NOTIFIER_HEAD(pm_chain_head);
28 28
29DEFINE_MUTEX(pm_mutex); 29DEFINE_MUTEX(pm_mutex);
30 30
31unsigned int pm_flags;
32EXPORT_SYMBOL(pm_flags);
33
31#ifdef CONFIG_SUSPEND 34#ifdef CONFIG_SUSPEND
32 35
33/* This is just an arbitrary number */ 36/* This is just an arbitrary number */
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index c50d15266c10..60c73fa670d5 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -27,8 +27,6 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29 29
30int pm_active;
31
32/* 30/*
33 * Locking notes: 31 * Locking notes:
34 * pm_devs_lock can be a semaphore providing pm ops are not called 32 * pm_devs_lock can be a semaphore providing pm ops are not called
@@ -204,6 +202,4 @@ int pm_send_all(pm_request_t rqst, void *data)
204 202
205EXPORT_SYMBOL(pm_register); 203EXPORT_SYMBOL(pm_register);
206EXPORT_SYMBOL(pm_send_all); 204EXPORT_SYMBOL(pm_send_all);
207EXPORT_SYMBOL(pm_active);
208
209 205
diff --git a/kernel/printk.c b/kernel/printk.c
index a30fe33de395..89011bf8c106 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -817,7 +817,7 @@ __setup("console=", console_setup);
817 * commonly to provide a default console (ie from PROM variables) when 817 * commonly to provide a default console (ie from PROM variables) when
818 * the user has not supplied one. 818 * the user has not supplied one.
819 */ 819 */
820int __init add_preferred_console(char *name, int idx, char *options) 820int add_preferred_console(char *name, int idx, char *options)
821{ 821{
822 struct console_cmdline *c; 822 struct console_cmdline *c;
823 int i; 823 int i;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 7c76f2ffaeaa..c25db863081d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -120,7 +120,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
120 return ret; 120 return ret;
121} 121}
122 122
123static int may_attach(struct task_struct *task) 123int __ptrace_may_attach(struct task_struct *task)
124{ 124{
125 /* May we inspect the given task? 125 /* May we inspect the given task?
126 * This check is used both for attaching with ptrace 126 * This check is used both for attaching with ptrace
@@ -154,7 +154,7 @@ int ptrace_may_attach(struct task_struct *task)
154{ 154{
155 int err; 155 int err;
156 task_lock(task); 156 task_lock(task);
157 err = may_attach(task); 157 err = __ptrace_may_attach(task);
158 task_unlock(task); 158 task_unlock(task);
159 return !err; 159 return !err;
160} 160}
@@ -196,7 +196,7 @@ repeat:
196 /* the same process cannot be attached many times */ 196 /* the same process cannot be attached many times */
197 if (task->ptrace & PT_PTRACED) 197 if (task->ptrace & PT_PTRACED)
198 goto bad; 198 goto bad;
199 retval = may_attach(task); 199 retval = __ptrace_may_attach(task);
200 if (retval) 200 if (retval)
201 goto bad; 201 goto bad;
202 202
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a66d4d1615f7..f2c1a04e9b18 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -549,7 +549,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
549 rdp->blimit = blimit; 549 rdp->blimit = blimit;
550} 550}
551 551
552static void __devinit rcu_online_cpu(int cpu) 552static void __cpuinit rcu_online_cpu(int cpu)
553{ 553{
554 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 554 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
555 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); 555 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
diff --git a/kernel/sched.c b/kernel/sched.c
index 3df84ea6aba9..e76b11ca6df3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4918,7 +4918,7 @@ static void show_task(struct task_struct *p)
4918 } 4918 }
4919#endif 4919#endif
4920 printk(KERN_CONT "%5lu %5d %6d\n", free, 4920 printk(KERN_CONT "%5lu %5d %6d\n", free,
4921 task_pid_nr(p), task_pid_nr(p->parent)); 4921 task_pid_nr(p), task_pid_nr(p->real_parent));
4922 4922
4923 if (state != TASK_RUNNING) 4923 if (state != TASK_RUNNING)
4924 show_stack(p, NULL); 4924 show_stack(p, NULL);
@@ -7153,6 +7153,14 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7153{ 7153{
7154 int i; 7154 int i;
7155 7155
7156 /*
7157 * A weight of 0 or 1 can cause arithmetics problems.
7158 * (The default weight is 1024 - so there's no practical
7159 * limitation from this.)
7160 */
7161 if (shares < 2)
7162 shares = 2;
7163
7156 spin_lock(&tg->lock); 7164 spin_lock(&tg->lock);
7157 if (tg->shares == shares) 7165 if (tg->shares == shares)
7158 goto done; 7166 goto done;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index d30467b47ddd..80fbbfc04290 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -31,9 +31,9 @@
31/* 31/*
32 * Ease the printing of nsec fields: 32 * Ease the printing of nsec fields:
33 */ 33 */
34static long long nsec_high(long long nsec) 34static long long nsec_high(unsigned long long nsec)
35{ 35{
36 if (nsec < 0) { 36 if ((long long)nsec < 0) {
37 nsec = -nsec; 37 nsec = -nsec;
38 do_div(nsec, 1000000); 38 do_div(nsec, 1000000);
39 return -nsec; 39 return -nsec;
@@ -43,9 +43,9 @@ static long long nsec_high(long long nsec)
43 return nsec; 43 return nsec;
44} 44}
45 45
46static unsigned long nsec_low(long long nsec) 46static unsigned long nsec_low(unsigned long long nsec)
47{ 47{
48 if (nsec < 0) 48 if ((long long)nsec < 0)
49 nsec = -nsec; 49 nsec = -nsec;
50 50
51 return do_div(nsec, 1000000); 51 return do_div(nsec, 1000000);
diff --git a/kernel/timer.c b/kernel/timer.c
index d4527dcef1af..2a00c22203f3 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -978,7 +978,7 @@ asmlinkage long sys_getppid(void)
978 int pid; 978 int pid;
979 979
980 rcu_read_lock(); 980 rcu_read_lock();
981 pid = task_ppid_nr_ns(current, current->nsproxy->pid_ns); 981 pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns);
982 rcu_read_unlock(); 982 rcu_read_unlock();
983 983
984 return pid; 984 return pid;
@@ -1289,7 +1289,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1289 } 1289 }
1290} 1290}
1291 1291
1292static void __devinit migrate_timers(int cpu) 1292static void __cpuinit migrate_timers(int cpu)
1293{ 1293{
1294 tvec_base_t *old_base; 1294 tvec_base_t *old_base;
1295 tvec_base_t *new_base; 1295 tvec_base_t *new_base;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 52d5e7c9a8e6..8db0b597509e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -722,7 +722,8 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
722struct workqueue_struct *__create_workqueue_key(const char *name, 722struct workqueue_struct *__create_workqueue_key(const char *name,
723 int singlethread, 723 int singlethread,
724 int freezeable, 724 int freezeable,
725 struct lock_class_key *key) 725 struct lock_class_key *key,
726 const char *lock_name)
726{ 727{
727 struct workqueue_struct *wq; 728 struct workqueue_struct *wq;
728 struct cpu_workqueue_struct *cwq; 729 struct cpu_workqueue_struct *cwq;
@@ -739,7 +740,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
739 } 740 }
740 741
741 wq->name = name; 742 wq->name = name;
742 lockdep_init_map(&wq->lockdep_map, name, key, 0); 743 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
743 wq->singlethread = singlethread; 744 wq->singlethread = singlethread;
744 wq->freezeable = freezeable; 745 wq->freezeable = freezeable;
745 INIT_LIST_HEAD(&wq->list); 746 INIT_LIST_HEAD(&wq->list);