aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/irq/chip.c33
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/lockdep.c13
-rw-r--r--kernel/module.c94
-rw-r--r--kernel/posix-cpu-timers.c27
-rw-r--r--kernel/power/disk.c8
-rw-r--r--kernel/power/swap.c3
-rw-r--r--kernel/power/user.c8
-rw-r--r--kernel/printk.c11
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/workqueue.c7
16 files changed, 135 insertions, 110 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 32c96628463e..27dd3ee47099 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -19,7 +19,7 @@
19static DEFINE_MUTEX(cpu_add_remove_lock); 19static DEFINE_MUTEX(cpu_add_remove_lock);
20static DEFINE_MUTEX(cpu_bitmask_lock); 20static DEFINE_MUTEX(cpu_bitmask_lock);
21 21
22static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
23 23
24/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 24/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
25 * Should always be manipulated under cpu_add_remove_lock 25 * Should always be manipulated under cpu_add_remove_lock
@@ -68,7 +68,11 @@ EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
68/* Need to know about CPUs going up/down? */ 68/* Need to know about CPUs going up/down? */
69int __cpuinit register_cpu_notifier(struct notifier_block *nb) 69int __cpuinit register_cpu_notifier(struct notifier_block *nb)
70{ 70{
71 return blocking_notifier_chain_register(&cpu_chain, nb); 71 int ret;
72 mutex_lock(&cpu_add_remove_lock);
73 ret = raw_notifier_chain_register(&cpu_chain, nb);
74 mutex_unlock(&cpu_add_remove_lock);
75 return ret;
72} 76}
73 77
74#ifdef CONFIG_HOTPLUG_CPU 78#ifdef CONFIG_HOTPLUG_CPU
@@ -77,7 +81,9 @@ EXPORT_SYMBOL(register_cpu_notifier);
77 81
78void unregister_cpu_notifier(struct notifier_block *nb) 82void unregister_cpu_notifier(struct notifier_block *nb)
79{ 83{
80 blocking_notifier_chain_unregister(&cpu_chain, nb); 84 mutex_lock(&cpu_add_remove_lock);
85 raw_notifier_chain_unregister(&cpu_chain, nb);
86 mutex_unlock(&cpu_add_remove_lock);
81} 87}
82EXPORT_SYMBOL(unregister_cpu_notifier); 88EXPORT_SYMBOL(unregister_cpu_notifier);
83 89
@@ -126,7 +132,7 @@ static int _cpu_down(unsigned int cpu)
126 if (!cpu_online(cpu)) 132 if (!cpu_online(cpu))
127 return -EINVAL; 133 return -EINVAL;
128 134
129 err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 135 err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
130 (void *)(long)cpu); 136 (void *)(long)cpu);
131 if (err == NOTIFY_BAD) { 137 if (err == NOTIFY_BAD) {
132 printk("%s: attempt to take down CPU %u failed\n", 138 printk("%s: attempt to take down CPU %u failed\n",
@@ -146,7 +152,7 @@ static int _cpu_down(unsigned int cpu)
146 152
147 if (IS_ERR(p)) { 153 if (IS_ERR(p)) {
148 /* CPU didn't die: tell everyone. Can't complain. */ 154 /* CPU didn't die: tell everyone. Can't complain. */
149 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 155 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
150 (void *)(long)cpu) == NOTIFY_BAD) 156 (void *)(long)cpu) == NOTIFY_BAD)
151 BUG(); 157 BUG();
152 158
@@ -169,7 +175,7 @@ static int _cpu_down(unsigned int cpu)
169 put_cpu(); 175 put_cpu();
170 176
171 /* CPU is completely dead: tell everyone. Too late to complain. */ 177 /* CPU is completely dead: tell everyone. Too late to complain. */
172 if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD, 178 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD,
173 (void *)(long)cpu) == NOTIFY_BAD) 179 (void *)(long)cpu) == NOTIFY_BAD)
174 BUG(); 180 BUG();
175 181
@@ -206,7 +212,7 @@ static int __devinit _cpu_up(unsigned int cpu)
206 if (cpu_online(cpu) || !cpu_present(cpu)) 212 if (cpu_online(cpu) || !cpu_present(cpu))
207 return -EINVAL; 213 return -EINVAL;
208 214
209 ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 215 ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
210 if (ret == NOTIFY_BAD) { 216 if (ret == NOTIFY_BAD) {
211 printk("%s: attempt to bring up CPU %u failed\n", 217 printk("%s: attempt to bring up CPU %u failed\n",
212 __FUNCTION__, cpu); 218 __FUNCTION__, cpu);
@@ -223,11 +229,11 @@ static int __devinit _cpu_up(unsigned int cpu)
223 BUG_ON(!cpu_online(cpu)); 229 BUG_ON(!cpu_online(cpu));
224 230
225 /* Now call notifier in preparation. */ 231 /* Now call notifier in preparation. */
226 blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); 232 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
227 233
228out_notify: 234out_notify:
229 if (ret != 0) 235 if (ret != 0)
230 blocking_notifier_call_chain(&cpu_chain, 236 raw_notifier_call_chain(&cpu_chain,
231 CPU_UP_CANCELED, hcpu); 237 CPU_UP_CANCELED, hcpu);
232 238
233 return ret; 239 return ret;
diff --git a/kernel/fork.c b/kernel/fork.c
index 7dc6140baac6..29ebb30850ed 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -984,6 +984,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
984 if (!p) 984 if (!p)
985 goto fork_out; 985 goto fork_out;
986 986
987 rt_mutex_init_task(p);
988
987#ifdef CONFIG_TRACE_IRQFLAGS 989#ifdef CONFIG_TRACE_IRQFLAGS
988 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 990 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
989 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 991 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
@@ -1088,8 +1090,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1088 p->lockdep_recursion = 0; 1090 p->lockdep_recursion = 0;
1089#endif 1091#endif
1090 1092
1091 rt_mutex_init_task(p);
1092
1093#ifdef CONFIG_DEBUG_MUTEXES 1093#ifdef CONFIG_DEBUG_MUTEXES
1094 p->blocked_on = NULL; /* not blocked yet */ 1094 p->blocked_on = NULL; /* not blocked yet */
1095#endif 1095#endif
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 11c99697acfe..2d0dc3efe813 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -499,7 +499,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
499#endif /* CONFIG_SMP */ 499#endif /* CONFIG_SMP */
500 500
501void 501void
502__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained) 502__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
503 const char *name)
503{ 504{
504 struct irq_desc *desc; 505 struct irq_desc *desc;
505 unsigned long flags; 506 unsigned long flags;
@@ -540,6 +541,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained)
540 desc->depth = 1; 541 desc->depth = 1;
541 } 542 }
542 desc->handle_irq = handle; 543 desc->handle_irq = handle;
544 desc->name = name;
543 545
544 if (handle != handle_bad_irq && is_chained) { 546 if (handle != handle_bad_irq && is_chained) {
545 desc->status &= ~IRQ_DISABLED; 547 desc->status &= ~IRQ_DISABLED;
@@ -555,30 +557,13 @@ set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
555 irq_flow_handler_t handle) 557 irq_flow_handler_t handle)
556{ 558{
557 set_irq_chip(irq, chip); 559 set_irq_chip(irq, chip);
558 __set_irq_handler(irq, handle, 0); 560 __set_irq_handler(irq, handle, 0, NULL);
559} 561}
560 562
561/* 563void
562 * Get a descriptive string for the highlevel handler, for 564set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
563 * /proc/interrupts output: 565 irq_flow_handler_t handle, const char *name)
564 */
565const char *
566handle_irq_name(irq_flow_handler_t handle)
567{ 566{
568 if (handle == handle_level_irq) 567 set_irq_chip(irq, chip);
569 return "level "; 568 __set_irq_handler(irq, handle, 0, name);
570 if (handle == handle_fasteoi_irq)
571 return "fasteoi";
572 if (handle == handle_edge_irq)
573 return "edge ";
574 if (handle == handle_simple_irq)
575 return "simple ";
576#ifdef CONFIG_SMP
577 if (handle == handle_percpu_irq)
578 return "percpu ";
579#endif
580 if (handle == handle_bad_irq)
581 return "bad ";
582
583 return NULL;
584} 569}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 607c7809ad01..9a352667007c 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -57,7 +57,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
57 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity) 57 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity)
58 return -EIO; 58 return -EIO;
59 59
60 err = cpumask_parse(buffer, count, new_value); 60 err = cpumask_parse_user(buffer, count, new_value);
61 if (err) 61 if (err)
62 return err; 62 return err;
63 63
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ba7156ac70c1..b739be2a6dc9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -575,6 +575,8 @@ static noinline int print_circular_bug_tail(void)
575 return 0; 575 return 0;
576} 576}
577 577
578#define RECURSION_LIMIT 40
579
578static int noinline print_infinite_recursion_bug(void) 580static int noinline print_infinite_recursion_bug(void)
579{ 581{
580 __raw_spin_unlock(&hash_lock); 582 __raw_spin_unlock(&hash_lock);
@@ -595,7 +597,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
595 debug_atomic_inc(&nr_cyclic_check_recursions); 597 debug_atomic_inc(&nr_cyclic_check_recursions);
596 if (depth > max_recursion_depth) 598 if (depth > max_recursion_depth)
597 max_recursion_depth = depth; 599 max_recursion_depth = depth;
598 if (depth >= 20) 600 if (depth >= RECURSION_LIMIT)
599 return print_infinite_recursion_bug(); 601 return print_infinite_recursion_bug();
600 /* 602 /*
601 * Check this lock's dependency list: 603 * Check this lock's dependency list:
@@ -645,7 +647,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
645 647
646 if (depth > max_recursion_depth) 648 if (depth > max_recursion_depth)
647 max_recursion_depth = depth; 649 max_recursion_depth = depth;
648 if (depth >= 20) 650 if (depth >= RECURSION_LIMIT)
649 return print_infinite_recursion_bug(); 651 return print_infinite_recursion_bug();
650 652
651 debug_atomic_inc(&nr_find_usage_forwards_checks); 653 debug_atomic_inc(&nr_find_usage_forwards_checks);
@@ -684,7 +686,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
684 686
685 if (depth > max_recursion_depth) 687 if (depth > max_recursion_depth)
686 max_recursion_depth = depth; 688 max_recursion_depth = depth;
687 if (depth >= 20) 689 if (depth >= RECURSION_LIMIT)
688 return print_infinite_recursion_bug(); 690 return print_infinite_recursion_bug();
689 691
690 debug_atomic_inc(&nr_find_usage_backwards_checks); 692 debug_atomic_inc(&nr_find_usage_backwards_checks);
@@ -1114,8 +1116,6 @@ static int count_matching_names(struct lock_class *new_class)
1114 return count + 1; 1116 return count + 1;
1115} 1117}
1116 1118
1117extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
1118
1119/* 1119/*
1120 * Register a lock's class in the hash-table, if the class is not present 1120 * Register a lock's class in the hash-table, if the class is not present
1121 * yet. Otherwise we look it up. We cache the result in the lock object 1121 * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1153,8 +1153,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1153 * (or spin_lock_init()) call - which acts as the key. For static 1153 * (or spin_lock_init()) call - which acts as the key. For static
1154 * locks we use the lock object itself as the key. 1154 * locks we use the lock object itself as the key.
1155 */ 1155 */
1156 if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) 1156 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
1157 __error_too_big_MAX_LOCKDEP_SUBCLASSES();
1158 1157
1159 key = lock->key->subkeys + subclass; 1158 key = lock->key->subkeys + subclass;
1160 1159
diff --git a/kernel/module.c b/kernel/module.c
index 7f60e782de1e..67009bd56c52 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -87,6 +87,12 @@ static inline int strong_try_module_get(struct module *mod)
87 return try_module_get(mod); 87 return try_module_get(mod);
88} 88}
89 89
90static inline void add_taint_module(struct module *mod, unsigned flag)
91{
92 add_taint(flag);
93 mod->taints |= flag;
94}
95
90/* A thread that wants to hold a reference to a module only while it 96/* A thread that wants to hold a reference to a module only while it
91 * is running can call ths to safely exit. 97 * is running can call ths to safely exit.
92 * nfsd and lockd use this. 98 * nfsd and lockd use this.
@@ -847,12 +853,10 @@ static int check_version(Elf_Shdr *sechdrs,
847 return 0; 853 return 0;
848 } 854 }
849 /* Not in module's version table. OK, but that taints the kernel. */ 855 /* Not in module's version table. OK, but that taints the kernel. */
850 if (!(tainted & TAINT_FORCED_MODULE)) { 856 if (!(tainted & TAINT_FORCED_MODULE))
851 printk("%s: no version for \"%s\" found: kernel tainted.\n", 857 printk("%s: no version for \"%s\" found: kernel tainted.\n",
852 mod->name, symname); 858 mod->name, symname);
853 add_taint(TAINT_FORCED_MODULE); 859 add_taint_module(mod, TAINT_FORCED_MODULE);
854 mod->taints |= TAINT_FORCED_MODULE;
855 }
856 return 1; 860 return 1;
857} 861}
858 862
@@ -910,7 +914,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
910 unsigned long ret; 914 unsigned long ret;
911 const unsigned long *crc; 915 const unsigned long *crc;
912 916
913 ret = __find_symbol(name, &owner, &crc, mod->license_gplok); 917 ret = __find_symbol(name, &owner, &crc,
918 !(mod->taints & TAINT_PROPRIETARY_MODULE));
914 if (ret) { 919 if (ret) {
915 /* use_module can fail due to OOM, or module unloading */ 920 /* use_module can fail due to OOM, or module unloading */
916 if (!check_version(sechdrs, versindex, name, mod, crc) || 921 if (!check_version(sechdrs, versindex, name, mod, crc) ||
@@ -1335,12 +1340,11 @@ static void set_license(struct module *mod, const char *license)
1335 if (!license) 1340 if (!license)
1336 license = "unspecified"; 1341 license = "unspecified";
1337 1342
1338 mod->license_gplok = license_is_gpl_compatible(license); 1343 if (!license_is_gpl_compatible(license)) {
1339 if (!mod->license_gplok && !(tainted & TAINT_PROPRIETARY_MODULE)) { 1344 if (!(tainted & TAINT_PROPRIETARY_MODULE))
1340 printk(KERN_WARNING "%s: module license '%s' taints kernel.\n", 1345 printk(KERN_WARNING "%s: module license '%s' taints"
1341 mod->name, license); 1346 "kernel.\n", mod->name, license);
1342 add_taint(TAINT_PROPRIETARY_MODULE); 1347 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1343 mod->taints |= TAINT_PROPRIETARY_MODULE;
1344 } 1348 }
1345} 1349}
1346 1350
@@ -1619,8 +1623,7 @@ static struct module *load_module(void __user *umod,
1619 modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); 1623 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
1620 /* This is allowed: modprobe --force will invalidate it. */ 1624 /* This is allowed: modprobe --force will invalidate it. */
1621 if (!modmagic) { 1625 if (!modmagic) {
1622 add_taint(TAINT_FORCED_MODULE); 1626 add_taint_module(mod, TAINT_FORCED_MODULE);
1623 mod->taints |= TAINT_FORCED_MODULE;
1624 printk(KERN_WARNING "%s: no version magic, tainting kernel.\n", 1627 printk(KERN_WARNING "%s: no version magic, tainting kernel.\n",
1625 mod->name); 1628 mod->name);
1626 } else if (!same_magic(modmagic, vermagic)) { 1629 } else if (!same_magic(modmagic, vermagic)) {
@@ -1714,14 +1717,10 @@ static struct module *load_module(void __user *umod,
1714 /* Set up license info based on the info section */ 1717 /* Set up license info based on the info section */
1715 set_license(mod, get_modinfo(sechdrs, infoindex, "license")); 1718 set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
1716 1719
1717 if (strcmp(mod->name, "ndiswrapper") == 0) { 1720 if (strcmp(mod->name, "ndiswrapper") == 0)
1718 add_taint(TAINT_PROPRIETARY_MODULE); 1721 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1719 mod->taints |= TAINT_PROPRIETARY_MODULE; 1722 if (strcmp(mod->name, "driverloader") == 0)
1720 } 1723 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1721 if (strcmp(mod->name, "driverloader") == 0) {
1722 add_taint(TAINT_PROPRIETARY_MODULE);
1723 mod->taints |= TAINT_PROPRIETARY_MODULE;
1724 }
1725 1724
1726 /* Set up MODINFO_ATTR fields */ 1725 /* Set up MODINFO_ATTR fields */
1727 setup_modinfo(mod, sechdrs, infoindex); 1726 setup_modinfo(mod, sechdrs, infoindex);
@@ -1766,8 +1765,7 @@ static struct module *load_module(void __user *umod,
1766 (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { 1765 (mod->num_unused_gpl_syms && !unusedgplcrcindex)) {
1767 printk(KERN_WARNING "%s: No versions for exported symbols." 1766 printk(KERN_WARNING "%s: No versions for exported symbols."
1768 " Tainting kernel.\n", mod->name); 1767 " Tainting kernel.\n", mod->name);
1769 add_taint(TAINT_FORCED_MODULE); 1768 add_taint_module(mod, TAINT_FORCED_MODULE);
1770 mod->taints |= TAINT_FORCED_MODULE;
1771 } 1769 }
1772#endif 1770#endif
1773 1771
@@ -2132,9 +2130,33 @@ static void m_stop(struct seq_file *m, void *p)
2132 mutex_unlock(&module_mutex); 2130 mutex_unlock(&module_mutex);
2133} 2131}
2134 2132
2133static char *taint_flags(unsigned int taints, char *buf)
2134{
2135 int bx = 0;
2136
2137 if (taints) {
2138 buf[bx++] = '(';
2139 if (taints & TAINT_PROPRIETARY_MODULE)
2140 buf[bx++] = 'P';
2141 if (taints & TAINT_FORCED_MODULE)
2142 buf[bx++] = 'F';
2143 /*
2144 * TAINT_FORCED_RMMOD: could be added.
2145 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
2146 * apply to modules.
2147 */
2148 buf[bx++] = ')';
2149 }
2150 buf[bx] = '\0';
2151
2152 return buf;
2153}
2154
2135static int m_show(struct seq_file *m, void *p) 2155static int m_show(struct seq_file *m, void *p)
2136{ 2156{
2137 struct module *mod = list_entry(p, struct module, list); 2157 struct module *mod = list_entry(p, struct module, list);
2158 char buf[8];
2159
2138 seq_printf(m, "%s %lu", 2160 seq_printf(m, "%s %lu",
2139 mod->name, mod->init_size + mod->core_size); 2161 mod->name, mod->init_size + mod->core_size);
2140 print_unload_info(m, mod); 2162 print_unload_info(m, mod);
@@ -2147,6 +2169,10 @@ static int m_show(struct seq_file *m, void *p)
2147 /* Used by oprofile and other similar tools. */ 2169 /* Used by oprofile and other similar tools. */
2148 seq_printf(m, " 0x%p", mod->module_core); 2170 seq_printf(m, " 0x%p", mod->module_core);
2149 2171
2172 /* Taints info */
2173 if (mod->taints)
2174 seq_printf(m, " %s", taint_flags(mod->taints, buf));
2175
2150 seq_printf(m, "\n"); 2176 seq_printf(m, "\n");
2151 return 0; 2177 return 0;
2152} 2178}
@@ -2235,28 +2261,6 @@ struct module *module_text_address(unsigned long addr)
2235 return mod; 2261 return mod;
2236} 2262}
2237 2263
2238static char *taint_flags(unsigned int taints, char *buf)
2239{
2240 *buf = '\0';
2241 if (taints) {
2242 int bx;
2243
2244 buf[0] = '(';
2245 bx = 1;
2246 if (taints & TAINT_PROPRIETARY_MODULE)
2247 buf[bx++] = 'P';
2248 if (taints & TAINT_FORCED_MODULE)
2249 buf[bx++] = 'F';
2250 /*
2251 * TAINT_FORCED_RMMOD: could be added.
2252 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
2253 * apply to modules.
2254 */
2255 buf[bx] = ')';
2256 }
2257 return buf;
2258}
2259
2260/* Don't grab lock, we're oopsing. */ 2264/* Don't grab lock, we're oopsing. */
2261void print_modules(void) 2265void print_modules(void)
2262{ 2266{
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 479b16b44f79..7c3e1e6dfb5b 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -88,6 +88,19 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
88} 88}
89 89
90/* 90/*
91 * Divide and limit the result to res >= 1
92 *
93 * This is necessary to prevent signal delivery starvation, when the result of
94 * the division would be rounded down to 0.
95 */
96static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
97{
98 cputime_t res = cputime_div(time, div);
99
100 return max_t(cputime_t, res, 1);
101}
102
103/*
91 * Update expiry time from increment, and increase overrun count, 104 * Update expiry time from increment, and increase overrun count,
92 * given the current clock sample. 105 * given the current clock sample.
93 */ 106 */
@@ -483,8 +496,8 @@ static void process_timer_rebalance(struct task_struct *p,
483 BUG(); 496 BUG();
484 break; 497 break;
485 case CPUCLOCK_PROF: 498 case CPUCLOCK_PROF:
486 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 499 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
487 nthreads); 500 nthreads);
488 do { 501 do {
489 if (likely(!(t->flags & PF_EXITING))) { 502 if (likely(!(t->flags & PF_EXITING))) {
490 ticks = cputime_add(prof_ticks(t), left); 503 ticks = cputime_add(prof_ticks(t), left);
@@ -498,8 +511,8 @@ static void process_timer_rebalance(struct task_struct *p,
498 } while (t != p); 511 } while (t != p);
499 break; 512 break;
500 case CPUCLOCK_VIRT: 513 case CPUCLOCK_VIRT:
501 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 514 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
502 nthreads); 515 nthreads);
503 do { 516 do {
504 if (likely(!(t->flags & PF_EXITING))) { 517 if (likely(!(t->flags & PF_EXITING))) {
505 ticks = cputime_add(virt_ticks(t), left); 518 ticks = cputime_add(virt_ticks(t), left);
@@ -515,6 +528,7 @@ static void process_timer_rebalance(struct task_struct *p,
515 case CPUCLOCK_SCHED: 528 case CPUCLOCK_SCHED:
516 nsleft = expires.sched - val.sched; 529 nsleft = expires.sched - val.sched;
517 do_div(nsleft, nthreads); 530 do_div(nsleft, nthreads);
531 nsleft = max_t(unsigned long long, nsleft, 1);
518 do { 532 do {
519 if (likely(!(t->flags & PF_EXITING))) { 533 if (likely(!(t->flags & PF_EXITING))) {
520 ns = t->sched_time + nsleft; 534 ns = t->sched_time + nsleft;
@@ -1159,12 +1173,13 @@ static void check_process_timers(struct task_struct *tsk,
1159 1173
1160 prof_left = cputime_sub(prof_expires, utime); 1174 prof_left = cputime_sub(prof_expires, utime);
1161 prof_left = cputime_sub(prof_left, stime); 1175 prof_left = cputime_sub(prof_left, stime);
1162 prof_left = cputime_div(prof_left, nthreads); 1176 prof_left = cputime_div_non_zero(prof_left, nthreads);
1163 virt_left = cputime_sub(virt_expires, utime); 1177 virt_left = cputime_sub(virt_expires, utime);
1164 virt_left = cputime_div(virt_left, nthreads); 1178 virt_left = cputime_div_non_zero(virt_left, nthreads);
1165 if (sched_expires) { 1179 if (sched_expires) {
1166 sched_left = sched_expires - sched_time; 1180 sched_left = sched_expires - sched_time;
1167 do_div(sched_left, nthreads); 1181 do_div(sched_left, nthreads);
1182 sched_left = max_t(unsigned long long, sched_left, 1);
1168 } else { 1183 } else {
1169 sched_left = 0; 1184 sched_left = 0;
1170 } 1185 }
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index d72234942798..d3a158a60312 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -18,6 +18,7 @@
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/mount.h> 19#include <linux/mount.h>
20#include <linux/pm.h> 20#include <linux/pm.h>
21#include <linux/console.h>
21#include <linux/cpu.h> 22#include <linux/cpu.h>
22 23
23#include "power.h" 24#include "power.h"
@@ -119,8 +120,10 @@ int pm_suspend_disk(void)
119 if (error) 120 if (error)
120 return error; 121 return error;
121 122
123 suspend_console();
122 error = device_suspend(PMSG_FREEZE); 124 error = device_suspend(PMSG_FREEZE);
123 if (error) { 125 if (error) {
126 resume_console();
124 printk("Some devices failed to suspend\n"); 127 printk("Some devices failed to suspend\n");
125 unprepare_processes(); 128 unprepare_processes();
126 return error; 129 return error;
@@ -133,6 +136,7 @@ int pm_suspend_disk(void)
133 136
134 if (in_suspend) { 137 if (in_suspend) {
135 device_resume(); 138 device_resume();
139 resume_console();
136 pr_debug("PM: writing image.\n"); 140 pr_debug("PM: writing image.\n");
137 error = swsusp_write(); 141 error = swsusp_write();
138 if (!error) 142 if (!error)
@@ -148,6 +152,7 @@ int pm_suspend_disk(void)
148 swsusp_free(); 152 swsusp_free();
149 Done: 153 Done:
150 device_resume(); 154 device_resume();
155 resume_console();
151 unprepare_processes(); 156 unprepare_processes();
152 return error; 157 return error;
153} 158}
@@ -212,7 +217,9 @@ static int software_resume(void)
212 217
213 pr_debug("PM: Preparing devices for restore.\n"); 218 pr_debug("PM: Preparing devices for restore.\n");
214 219
220 suspend_console();
215 if ((error = device_suspend(PMSG_PRETHAW))) { 221 if ((error = device_suspend(PMSG_PRETHAW))) {
222 resume_console();
216 printk("Some devices failed to suspend\n"); 223 printk("Some devices failed to suspend\n");
217 swsusp_free(); 224 swsusp_free();
218 goto Thaw; 225 goto Thaw;
@@ -224,6 +231,7 @@ static int software_resume(void)
224 swsusp_resume(); 231 swsusp_resume();
225 pr_debug("PM: Restore failed, recovering.n"); 232 pr_debug("PM: Restore failed, recovering.n");
226 device_resume(); 233 device_resume();
234 resume_console();
227 Thaw: 235 Thaw:
228 unprepare_processes(); 236 unprepare_processes();
229 Done: 237 Done:
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 9b2ee5344dee..1a3b0dd2c3fc 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -425,7 +425,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
425 bio_set_pages_dirty(bio); 425 bio_set_pages_dirty(bio);
426 bio_put(bio); 426 bio_put(bio);
427 } else { 427 } else {
428 get_page(page); 428 if (rw == READ)
429 get_page(page); /* These pages are freed later */
429 bio->bi_private = *bio_chain; 430 bio->bi_private = *bio_chain;
430 *bio_chain = bio; 431 *bio_chain = bio;
431 submit_bio(rw | (1 << BIO_RW_SYNC), bio); 432 submit_bio(rw | (1 << BIO_RW_SYNC), bio);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 93b5dd283dea..d991d3b0e5a4 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -19,6 +19,7 @@
19#include <linux/swapops.h> 19#include <linux/swapops.h>
20#include <linux/pm.h> 20#include <linux/pm.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/console.h>
22#include <linux/cpu.h> 23#include <linux/cpu.h>
23 24
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
@@ -173,12 +174,14 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
173 /* Free memory before shutting down devices. */ 174 /* Free memory before shutting down devices. */
174 error = swsusp_shrink_memory(); 175 error = swsusp_shrink_memory();
175 if (!error) { 176 if (!error) {
177 suspend_console();
176 error = device_suspend(PMSG_FREEZE); 178 error = device_suspend(PMSG_FREEZE);
177 if (!error) { 179 if (!error) {
178 in_suspend = 1; 180 in_suspend = 1;
179 error = swsusp_suspend(); 181 error = swsusp_suspend();
180 device_resume(); 182 device_resume();
181 } 183 }
184 resume_console();
182 } 185 }
183 up(&pm_sem); 186 up(&pm_sem);
184 if (!error) 187 if (!error)
@@ -196,11 +199,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
196 snapshot_free_unused_memory(&data->handle); 199 snapshot_free_unused_memory(&data->handle);
197 down(&pm_sem); 200 down(&pm_sem);
198 pm_prepare_console(); 201 pm_prepare_console();
202 suspend_console();
199 error = device_suspend(PMSG_PRETHAW); 203 error = device_suspend(PMSG_PRETHAW);
200 if (!error) { 204 if (!error) {
201 error = swsusp_resume(); 205 error = swsusp_resume();
202 device_resume(); 206 device_resume();
203 } 207 }
208 resume_console();
204 pm_restore_console(); 209 pm_restore_console();
205 up(&pm_sem); 210 up(&pm_sem);
206 break; 211 break;
@@ -289,6 +294,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
289 } 294 }
290 295
291 /* Put devices to sleep */ 296 /* Put devices to sleep */
297 suspend_console();
292 error = device_suspend(PMSG_SUSPEND); 298 error = device_suspend(PMSG_SUSPEND);
293 if (error) { 299 if (error) {
294 printk(KERN_ERR "Failed to suspend some devices.\n"); 300 printk(KERN_ERR "Failed to suspend some devices.\n");
@@ -299,7 +305,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
299 /* Wake up devices */ 305 /* Wake up devices */
300 device_resume(); 306 device_resume();
301 } 307 }
302 308 resume_console();
303 if (pm_ops->finish) 309 if (pm_ops->finish)
304 pm_ops->finish(PM_SUSPEND_MEM); 310 pm_ops->finish(PM_SUSPEND_MEM);
305 311
diff --git a/kernel/printk.c b/kernel/printk.c
index 771f5e861bcd..f7d427ef5038 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -820,15 +820,8 @@ void release_console_sem(void)
820 console_locked = 0; 820 console_locked = 0;
821 up(&console_sem); 821 up(&console_sem);
822 spin_unlock_irqrestore(&logbuf_lock, flags); 822 spin_unlock_irqrestore(&logbuf_lock, flags);
823 if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) { 823 if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait))
824 /* 824 wake_up_interruptible(&log_wait);
825 * If we printk from within the lock dependency code,
826 * from within the scheduler code, then do not lock
827 * up due to self-recursion:
828 */
829 if (!lockdep_internal())
830 wake_up_interruptible(&log_wait);
831 }
832} 825}
833EXPORT_SYMBOL(release_console_sem); 826EXPORT_SYMBOL(release_console_sem);
834 827
diff --git a/kernel/profile.c b/kernel/profile.c
index 857300a2afec..f940b462eec9 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -399,7 +399,7 @@ static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffe
399 unsigned long full_count = count, err; 399 unsigned long full_count = count, err;
400 cpumask_t new_value; 400 cpumask_t new_value;
401 401
402 err = cpumask_parse(buffer, count, new_value); 402 err = cpumask_parse_user(buffer, count, new_value);
403 if (err) 403 if (err)
404 return err; 404 return err;
405 405
diff --git a/kernel/sched.c b/kernel/sched.c
index 53608a59d6e3..094b5687eef6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
1822 struct mm_struct *mm = next->mm; 1822 struct mm_struct *mm = next->mm;
1823 struct mm_struct *oldmm = prev->active_mm; 1823 struct mm_struct *oldmm = prev->active_mm;
1824 1824
1825 if (unlikely(!mm)) { 1825 if (!mm) {
1826 next->active_mm = oldmm; 1826 next->active_mm = oldmm;
1827 atomic_inc(&oldmm->mm_count); 1827 atomic_inc(&oldmm->mm_count);
1828 enter_lazy_tlb(oldmm, next); 1828 enter_lazy_tlb(oldmm, next);
1829 } else 1829 } else
1830 switch_mm(oldmm, mm, next); 1830 switch_mm(oldmm, mm, next);
1831 1831
1832 if (unlikely(!prev->mm)) { 1832 if (!prev->mm) {
1833 prev->active_mm = NULL; 1833 prev->active_mm = NULL;
1834 WARN_ON(rq->prev_mm); 1834 WARN_ON(rq->prev_mm);
1835 rq->prev_mm = oldmm; 1835 rq->prev_mm = oldmm;
@@ -3491,7 +3491,7 @@ asmlinkage void __sched preempt_schedule(void)
3491 * If there is a non-zero preempt_count or interrupts are disabled, 3491 * If there is a non-zero preempt_count or interrupts are disabled,
3492 * we do not want to preempt the current task. Just return.. 3492 * we do not want to preempt the current task. Just return..
3493 */ 3493 */
3494 if (unlikely(ti->preempt_count || irqs_disabled())) 3494 if (likely(ti->preempt_count || irqs_disabled()))
3495 return; 3495 return;
3496 3496
3497need_resched: 3497need_resched:
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 7a3b2e75f040..0e53314b14de 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -49,6 +49,7 @@ cond_syscall(compat_sys_get_robust_list);
49cond_syscall(sys_epoll_create); 49cond_syscall(sys_epoll_create);
50cond_syscall(sys_epoll_ctl); 50cond_syscall(sys_epoll_ctl);
51cond_syscall(sys_epoll_wait); 51cond_syscall(sys_epoll_wait);
52cond_syscall(sys_epoll_pwait);
52cond_syscall(sys_semget); 53cond_syscall(sys_semget);
53cond_syscall(sys_semop); 54cond_syscall(sys_semop);
54cond_syscall(sys_semtimedop); 55cond_syscall(sys_semtimedop);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 126bb30c4afe..a99b2a6e6a07 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -57,7 +57,7 @@ static cycle_t jiffies_read(void)
57 57
58struct clocksource clocksource_jiffies = { 58struct clocksource clocksource_jiffies = {
59 .name = "jiffies", 59 .name = "jiffies",
60 .rating = 0, /* lowest rating*/ 60 .rating = 1, /* lowest valid rating*/
61 .read = jiffies_read, 61 .read = jiffies_read,
62 .mask = 0xffffffff, /*32bits*/ 62 .mask = 0xffffffff, /*32bits*/
63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ 63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cfc737bffe6d..3df9bfc7ff78 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -28,6 +28,7 @@
28#include <linux/notifier.h> 28#include <linux/notifier.h>
29#include <linux/kthread.h> 29#include <linux/kthread.h>
30#include <linux/hardirq.h> 30#include <linux/hardirq.h>
31#include <linux/mempolicy.h>
31 32
32/* 33/*
33 * The per-CPU workqueue (if single thread, we always use the first 34 * The per-CPU workqueue (if single thread, we always use the first
@@ -245,6 +246,12 @@ static int worker_thread(void *__cwq)
245 sigprocmask(SIG_BLOCK, &blocked, NULL); 246 sigprocmask(SIG_BLOCK, &blocked, NULL);
246 flush_signals(current); 247 flush_signals(current);
247 248
249 /*
250 * We inherited MPOL_INTERLEAVE from the booting kernel.
251 * Set MPOL_DEFAULT to insure node local allocations.
252 */
253 numa_default_policy();
254
248 /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 255 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
249 sa.sa.sa_handler = SIG_IGN; 256 sa.sa.sa_handler = SIG_IGN;
250 sa.sa.sa_flags = 0; 257 sa.sa.sa_flags = 0;