aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/exec_domain.c2
-rw-r--r--kernel/irq/manage.c39
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/module.c336
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/rtmutex-tester.c7
-rw-r--r--kernel/sched.c44
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/softlockup.c45
-rw-r--r--kernel/stop_machine.c3
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c24
-rw-r--r--kernel/taskstats.c4
-rw-r--r--kernel/time/clocksource.c12
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/tick-common.c14
-rw-r--r--kernel/time/tick-sched.c4
-rw-r--r--kernel/trace/trace_sysprof.c4
-rw-r--r--kernel/workqueue.c6
24 files changed, 359 insertions, 217 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 033603c1d7c3..2cc409ce0a8f 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -441,7 +441,7 @@ void __ref enable_nonboot_cpus(void)
441 goto out; 441 goto out;
442 442
443 printk("Enabling non-boot CPUs ...\n"); 443 printk("Enabling non-boot CPUs ...\n");
444 for_each_cpu_mask(cpu, frozen_cpus) { 444 for_each_cpu_mask_nr(cpu, frozen_cpus) {
445 error = _cpu_up(cpu, 1); 445 error = _cpu_up(cpu, 1);
446 if (!error) { 446 if (!error) {
447 printk("CPU%d is up\n", cpu); 447 printk("CPU%d is up\n", cpu);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3c3ef02f65f1..d5738910c34c 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -679,7 +679,9 @@ restart:
679 if (apn == b->pn) { 679 if (apn == b->pn) {
680 cpus_or(*dp, *dp, b->cpus_allowed); 680 cpus_or(*dp, *dp, b->cpus_allowed);
681 b->pn = -1; 681 b->pn = -1;
682 update_domain_attr(dattr, b); 682 if (dattr)
683 update_domain_attr(dattr
684 + nslot, b);
683 } 685 }
684 } 686 }
685 nslot++; 687 nslot++;
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index a9e6bad9f706..c1ef192aa655 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -65,7 +65,7 @@ lookup_exec_domain(u_long personality)
65 goto out; 65 goto out;
66 } 66 }
67 67
68#ifdef CONFIG_KMOD 68#ifdef CONFIG_MODULES
69 read_unlock(&exec_domains_lock); 69 read_unlock(&exec_domains_lock);
70 request_module("personality-%ld", pers); 70 request_module("personality-%ld", pers);
71 read_lock(&exec_domains_lock); 71 read_lock(&exec_domains_lock);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 77a51be36010..3cfc0fefb5ee 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -217,6 +217,17 @@ void enable_irq(unsigned int irq)
217} 217}
218EXPORT_SYMBOL(enable_irq); 218EXPORT_SYMBOL(enable_irq);
219 219
220int set_irq_wake_real(unsigned int irq, unsigned int on)
221{
222 struct irq_desc *desc = irq_desc + irq;
223 int ret = -ENXIO;
224
225 if (desc->chip->set_wake)
226 ret = desc->chip->set_wake(irq, on);
227
228 return ret;
229}
230
220/** 231/**
221 * set_irq_wake - control irq power management wakeup 232 * set_irq_wake - control irq power management wakeup
222 * @irq: interrupt to control 233 * @irq: interrupt to control
@@ -233,30 +244,34 @@ int set_irq_wake(unsigned int irq, unsigned int on)
233{ 244{
234 struct irq_desc *desc = irq_desc + irq; 245 struct irq_desc *desc = irq_desc + irq;
235 unsigned long flags; 246 unsigned long flags;
236 int ret = -ENXIO; 247 int ret = 0;
237 int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
238 248
239 /* wakeup-capable irqs can be shared between drivers that 249 /* wakeup-capable irqs can be shared between drivers that
240 * don't need to have the same sleep mode behaviors. 250 * don't need to have the same sleep mode behaviors.
241 */ 251 */
242 spin_lock_irqsave(&desc->lock, flags); 252 spin_lock_irqsave(&desc->lock, flags);
243 if (on) { 253 if (on) {
244 if (desc->wake_depth++ == 0) 254 if (desc->wake_depth++ == 0) {
245 desc->status |= IRQ_WAKEUP; 255 ret = set_irq_wake_real(irq, on);
246 else 256 if (ret)
247 set_wake = NULL; 257 desc->wake_depth = 0;
258 else
259 desc->status |= IRQ_WAKEUP;
260 }
248 } else { 261 } else {
249 if (desc->wake_depth == 0) { 262 if (desc->wake_depth == 0) {
250 printk(KERN_WARNING "Unbalanced IRQ %d " 263 printk(KERN_WARNING "Unbalanced IRQ %d "
251 "wake disable\n", irq); 264 "wake disable\n", irq);
252 WARN_ON(1); 265 WARN_ON(1);
253 } else if (--desc->wake_depth == 0) 266 } else if (--desc->wake_depth == 0) {
254 desc->status &= ~IRQ_WAKEUP; 267 ret = set_irq_wake_real(irq, on);
255 else 268 if (ret)
256 set_wake = NULL; 269 desc->wake_depth = 1;
270 else
271 desc->status &= ~IRQ_WAKEUP;
272 }
257 } 273 }
258 if (set_wake) 274
259 ret = desc->chip->set_wake(irq, on);
260 spin_unlock_irqrestore(&desc->lock, flags); 275 spin_unlock_irqrestore(&desc->lock, flags);
261 return ret; 276 return ret;
262} 277}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 8df97d3dfda8..90d7af1c1655 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -42,7 +42,7 @@ extern int max_threads;
42 42
43static struct workqueue_struct *khelper_wq; 43static struct workqueue_struct *khelper_wq;
44 44
45#ifdef CONFIG_KMOD 45#ifdef CONFIG_MODULES
46 46
47/* 47/*
48 modprobe_path is set via /proc/sys. 48 modprobe_path is set via /proc/sys.
diff --git a/kernel/module.c b/kernel/module.c
index 5f80478b746d..d8b5605132a0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -70,6 +70,9 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
70 70
71static BLOCKING_NOTIFIER_HEAD(module_notify_list); 71static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72 72
73/* Bounds of module allocation, for speeding __module_text_address */
74static unsigned long module_addr_min = -1UL, module_addr_max = 0;
75
73int register_module_notifier(struct notifier_block * nb) 76int register_module_notifier(struct notifier_block * nb)
74{ 77{
75 return blocking_notifier_chain_register(&module_notify_list, nb); 78 return blocking_notifier_chain_register(&module_notify_list, nb);
@@ -134,17 +137,19 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
134extern const struct kernel_symbol __stop___ksymtab_gpl[]; 137extern const struct kernel_symbol __stop___ksymtab_gpl[];
135extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 138extern const struct kernel_symbol __start___ksymtab_gpl_future[];
136extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 139extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
137extern const struct kernel_symbol __start___ksymtab_unused[];
138extern const struct kernel_symbol __stop___ksymtab_unused[];
139extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
140extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
141extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 140extern const struct kernel_symbol __start___ksymtab_gpl_future[];
142extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 141extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
143extern const unsigned long __start___kcrctab[]; 142extern const unsigned long __start___kcrctab[];
144extern const unsigned long __start___kcrctab_gpl[]; 143extern const unsigned long __start___kcrctab_gpl[];
145extern const unsigned long __start___kcrctab_gpl_future[]; 144extern const unsigned long __start___kcrctab_gpl_future[];
145#ifdef CONFIG_UNUSED_SYMBOLS
146extern const struct kernel_symbol __start___ksymtab_unused[];
147extern const struct kernel_symbol __stop___ksymtab_unused[];
148extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
149extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
146extern const unsigned long __start___kcrctab_unused[]; 150extern const unsigned long __start___kcrctab_unused[];
147extern const unsigned long __start___kcrctab_unused_gpl[]; 151extern const unsigned long __start___kcrctab_unused_gpl[];
152#endif
148 153
149#ifndef CONFIG_MODVERSIONS 154#ifndef CONFIG_MODVERSIONS
150#define symversion(base, idx) NULL 155#define symversion(base, idx) NULL
@@ -152,156 +157,186 @@ extern const unsigned long __start___kcrctab_unused_gpl[];
152#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 157#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
153#endif 158#endif
154 159
155/* lookup symbol in given range of kernel_symbols */
156static const struct kernel_symbol *lookup_symbol(const char *name,
157 const struct kernel_symbol *start,
158 const struct kernel_symbol *stop)
159{
160 const struct kernel_symbol *ks = start;
161 for (; ks < stop; ks++)
162 if (strcmp(ks->name, name) == 0)
163 return ks;
164 return NULL;
165}
166
167static bool always_ok(bool gplok, bool warn, const char *name)
168{
169 return true;
170}
171
172static bool printk_unused_warning(bool gplok, bool warn, const char *name)
173{
174 if (warn) {
175 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
176 "however this module is using it.\n", name);
177 printk(KERN_WARNING
178 "This symbol will go away in the future.\n");
179 printk(KERN_WARNING
180 "Please evalute if this is the right api to use and if "
181 "it really is, submit a report the linux kernel "
182 "mailinglist together with submitting your code for "
183 "inclusion.\n");
184 }
185 return true;
186}
187
188static bool gpl_only_unused_warning(bool gplok, bool warn, const char *name)
189{
190 if (!gplok)
191 return false;
192 return printk_unused_warning(gplok, warn, name);
193}
194
195static bool gpl_only(bool gplok, bool warn, const char *name)
196{
197 return gplok;
198}
199
200static bool warn_if_not_gpl(bool gplok, bool warn, const char *name)
201{
202 if (!gplok && warn) {
203 printk(KERN_WARNING "Symbol %s is being used "
204 "by a non-GPL module, which will not "
205 "be allowed in the future\n", name);
206 printk(KERN_WARNING "Please see the file "
207 "Documentation/feature-removal-schedule.txt "
208 "in the kernel source tree for more details.\n");
209 }
210 return true;
211}
212
213struct symsearch { 160struct symsearch {
214 const struct kernel_symbol *start, *stop; 161 const struct kernel_symbol *start, *stop;
215 const unsigned long *crcs; 162 const unsigned long *crcs;
216 bool (*check)(bool gplok, bool warn, const char *name); 163 enum {
164 NOT_GPL_ONLY,
165 GPL_ONLY,
166 WILL_BE_GPL_ONLY,
167 } licence;
168 bool unused;
217}; 169};
218 170
219/* Look through this array of symbol tables for a symbol match which 171static bool each_symbol_in_section(const struct symsearch *arr,
220 * passes the check function. */ 172 unsigned int arrsize,
221static const struct kernel_symbol *search_symarrays(const struct symsearch *arr, 173 struct module *owner,
222 unsigned int num, 174 bool (*fn)(const struct symsearch *syms,
223 const char *name, 175 struct module *owner,
224 bool gplok, 176 unsigned int symnum, void *data),
225 bool warn, 177 void *data)
226 const unsigned long **crc)
227{ 178{
228 unsigned int i; 179 unsigned int i, j;
229 const struct kernel_symbol *ks;
230 180
231 for (i = 0; i < num; i++) { 181 for (j = 0; j < arrsize; j++) {
232 ks = lookup_symbol(name, arr[i].start, arr[i].stop); 182 for (i = 0; i < arr[j].stop - arr[j].start; i++)
233 if (!ks || !arr[i].check(gplok, warn, name)) 183 if (fn(&arr[j], owner, i, data))
234 continue; 184 return true;
235
236 if (crc)
237 *crc = symversion(arr[i].crcs, ks - arr[i].start);
238 return ks;
239 } 185 }
240 return NULL; 186
187 return false;
241} 188}
242 189
243/* Find a symbol, return value, (optional) crc and (optional) module 190/* Returns true as soon as fn returns true, otherwise false. */
244 * which owns it */ 191static bool each_symbol(bool (*fn)(const struct symsearch *arr,
245static unsigned long find_symbol(const char *name, 192 struct module *owner,
246 struct module **owner, 193 unsigned int symnum, void *data),
247 const unsigned long **crc, 194 void *data)
248 bool gplok,
249 bool warn)
250{ 195{
251 struct module *mod; 196 struct module *mod;
252 const struct kernel_symbol *ks;
253 const struct symsearch arr[] = { 197 const struct symsearch arr[] = {
254 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 198 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
255 always_ok }, 199 NOT_GPL_ONLY, false },
256 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 200 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
257 __start___kcrctab_gpl, gpl_only }, 201 __start___kcrctab_gpl,
202 GPL_ONLY, false },
258 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, 203 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
259 __start___kcrctab_gpl_future, warn_if_not_gpl }, 204 __start___kcrctab_gpl_future,
205 WILL_BE_GPL_ONLY, false },
206#ifdef CONFIG_UNUSED_SYMBOLS
260 { __start___ksymtab_unused, __stop___ksymtab_unused, 207 { __start___ksymtab_unused, __stop___ksymtab_unused,
261 __start___kcrctab_unused, printk_unused_warning }, 208 __start___kcrctab_unused,
209 NOT_GPL_ONLY, true },
262 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, 210 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
263 __start___kcrctab_unused_gpl, gpl_only_unused_warning }, 211 __start___kcrctab_unused_gpl,
212 GPL_ONLY, true },
213#endif
264 }; 214 };
265 215
266 /* Core kernel first. */ 216 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
267 ks = search_symarrays(arr, ARRAY_SIZE(arr), name, gplok, warn, crc); 217 return true;
268 if (ks) {
269 if (owner)
270 *owner = NULL;
271 return ks->value;
272 }
273 218
274 /* Now try modules. */
275 list_for_each_entry(mod, &modules, list) { 219 list_for_each_entry(mod, &modules, list) {
276 struct symsearch arr[] = { 220 struct symsearch arr[] = {
277 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 221 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
278 always_ok }, 222 NOT_GPL_ONLY, false },
279 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 223 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
280 mod->gpl_crcs, gpl_only }, 224 mod->gpl_crcs,
225 GPL_ONLY, false },
281 { mod->gpl_future_syms, 226 { mod->gpl_future_syms,
282 mod->gpl_future_syms + mod->num_gpl_future_syms, 227 mod->gpl_future_syms + mod->num_gpl_future_syms,
283 mod->gpl_future_crcs, warn_if_not_gpl }, 228 mod->gpl_future_crcs,
229 WILL_BE_GPL_ONLY, false },
230#ifdef CONFIG_UNUSED_SYMBOLS
284 { mod->unused_syms, 231 { mod->unused_syms,
285 mod->unused_syms + mod->num_unused_syms, 232 mod->unused_syms + mod->num_unused_syms,
286 mod->unused_crcs, printk_unused_warning }, 233 mod->unused_crcs,
234 NOT_GPL_ONLY, true },
287 { mod->unused_gpl_syms, 235 { mod->unused_gpl_syms,
288 mod->unused_gpl_syms + mod->num_unused_gpl_syms, 236 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
289 mod->unused_gpl_crcs, gpl_only_unused_warning }, 237 mod->unused_gpl_crcs,
238 GPL_ONLY, true },
239#endif
290 }; 240 };
291 241
292 ks = search_symarrays(arr, ARRAY_SIZE(arr), 242 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
293 name, gplok, warn, crc); 243 return true;
294 if (ks) { 244 }
295 if (owner) 245 return false;
296 *owner = mod; 246}
297 return ks->value; 247
248struct find_symbol_arg {
249 /* Input */
250 const char *name;
251 bool gplok;
252 bool warn;
253
254 /* Output */
255 struct module *owner;
256 const unsigned long *crc;
257 unsigned long value;
258};
259
260static bool find_symbol_in_section(const struct symsearch *syms,
261 struct module *owner,
262 unsigned int symnum, void *data)
263{
264 struct find_symbol_arg *fsa = data;
265
266 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
267 return false;
268
269 if (!fsa->gplok) {
270 if (syms->licence == GPL_ONLY)
271 return false;
272 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
273 printk(KERN_WARNING "Symbol %s is being used "
274 "by a non-GPL module, which will not "
275 "be allowed in the future\n", fsa->name);
276 printk(KERN_WARNING "Please see the file "
277 "Documentation/feature-removal-schedule.txt "
278 "in the kernel source tree for more details.\n");
298 } 279 }
299 } 280 }
300 281
282#ifdef CONFIG_UNUSED_SYMBOLS
283 if (syms->unused && fsa->warn) {
284 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
285 "however this module is using it.\n", fsa->name);
286 printk(KERN_WARNING
287 "This symbol will go away in the future.\n");
288 printk(KERN_WARNING
289 "Please evalute if this is the right api to use and if "
290 "it really is, submit a report the linux kernel "
291 "mailinglist together with submitting your code for "
292 "inclusion.\n");
293 }
294#endif
295
296 fsa->owner = owner;
297 fsa->crc = symversion(syms->crcs, symnum);
298 fsa->value = syms->start[symnum].value;
299 return true;
300}
301
302/* Find a symbol, return value, (optional) crc and (optional) module
303 * which owns it */
304static unsigned long find_symbol(const char *name,
305 struct module **owner,
306 const unsigned long **crc,
307 bool gplok,
308 bool warn)
309{
310 struct find_symbol_arg fsa;
311
312 fsa.name = name;
313 fsa.gplok = gplok;
314 fsa.warn = warn;
315
316 if (each_symbol(find_symbol_in_section, &fsa)) {
317 if (owner)
318 *owner = fsa.owner;
319 if (crc)
320 *crc = fsa.crc;
321 return fsa.value;
322 }
323
301 DEBUGP("Failed to find symbol %s\n", name); 324 DEBUGP("Failed to find symbol %s\n", name);
302 return -ENOENT; 325 return -ENOENT;
303} 326}
304 327
328/* lookup symbol in given range of kernel_symbols */
329static const struct kernel_symbol *lookup_symbol(const char *name,
330 const struct kernel_symbol *start,
331 const struct kernel_symbol *stop)
332{
333 const struct kernel_symbol *ks = start;
334 for (; ks < stop; ks++)
335 if (strcmp(ks->name, name) == 0)
336 return ks;
337 return NULL;
338}
339
305/* Search for module by name: must hold module_mutex. */ 340/* Search for module by name: must hold module_mutex. */
306static struct module *find_module(const char *name) 341static struct module *find_module(const char *name)
307{ 342{
@@ -639,8 +674,8 @@ static int __try_stop_module(void *_sref)
639{ 674{
640 struct stopref *sref = _sref; 675 struct stopref *sref = _sref;
641 676
642 /* If it's not unused, quit unless we are told to block. */ 677 /* If it's not unused, quit unless we're forcing. */
643 if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) { 678 if (module_refcount(sref->mod) != 0) {
644 if (!(*sref->forced = try_force_unload(sref->flags))) 679 if (!(*sref->forced = try_force_unload(sref->flags)))
645 return -EWOULDBLOCK; 680 return -EWOULDBLOCK;
646 } 681 }
@@ -652,9 +687,16 @@ static int __try_stop_module(void *_sref)
652 687
653static int try_stop_module(struct module *mod, int flags, int *forced) 688static int try_stop_module(struct module *mod, int flags, int *forced)
654{ 689{
655 struct stopref sref = { mod, flags, forced }; 690 if (flags & O_NONBLOCK) {
691 struct stopref sref = { mod, flags, forced };
656 692
657 return stop_machine_run(__try_stop_module, &sref, NR_CPUS); 693 return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
694 } else {
695 /* We don't need to stop the machine for this. */
696 mod->state = MODULE_STATE_GOING;
697 synchronize_sched();
698 return 0;
699 }
658} 700}
659 701
660unsigned int module_refcount(struct module *mod) 702unsigned int module_refcount(struct module *mod)
@@ -1445,8 +1487,10 @@ static int verify_export_symbols(struct module *mod)
1445 { mod->syms, mod->num_syms }, 1487 { mod->syms, mod->num_syms },
1446 { mod->gpl_syms, mod->num_gpl_syms }, 1488 { mod->gpl_syms, mod->num_gpl_syms },
1447 { mod->gpl_future_syms, mod->num_gpl_future_syms }, 1489 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1490#ifdef CONFIG_UNUSED_SYMBOLS
1448 { mod->unused_syms, mod->num_unused_syms }, 1491 { mod->unused_syms, mod->num_unused_syms },
1449 { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, 1492 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1493#endif
1450 }; 1494 };
1451 1495
1452 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1496 for (i = 0; i < ARRAY_SIZE(arr); i++) {
@@ -1526,7 +1570,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
1526} 1570}
1527 1571
1528/* Update size with this section: return offset. */ 1572/* Update size with this section: return offset. */
1529static long get_offset(unsigned long *size, Elf_Shdr *sechdr) 1573static long get_offset(unsigned int *size, Elf_Shdr *sechdr)
1530{ 1574{
1531 long ret; 1575 long ret;
1532 1576
@@ -1738,6 +1782,20 @@ static inline void add_kallsyms(struct module *mod,
1738} 1782}
1739#endif /* CONFIG_KALLSYMS */ 1783#endif /* CONFIG_KALLSYMS */
1740 1784
1785static void *module_alloc_update_bounds(unsigned long size)
1786{
1787 void *ret = module_alloc(size);
1788
1789 if (ret) {
1790 /* Update module bounds. */
1791 if ((unsigned long)ret < module_addr_min)
1792 module_addr_min = (unsigned long)ret;
1793 if ((unsigned long)ret + size > module_addr_max)
1794 module_addr_max = (unsigned long)ret + size;
1795 }
1796 return ret;
1797}
1798
1741/* Allocate and load the module: note that size of section 0 is always 1799/* Allocate and load the module: note that size of section 0 is always
1742 zero, and we rely on this for optional sections. */ 1800 zero, and we rely on this for optional sections. */
1743static struct module *load_module(void __user *umod, 1801static struct module *load_module(void __user *umod,
@@ -1764,10 +1822,12 @@ static struct module *load_module(void __user *umod,
1764 unsigned int gplfutureindex; 1822 unsigned int gplfutureindex;
1765 unsigned int gplfuturecrcindex; 1823 unsigned int gplfuturecrcindex;
1766 unsigned int unwindex = 0; 1824 unsigned int unwindex = 0;
1825#ifdef CONFIG_UNUSED_SYMBOLS
1767 unsigned int unusedindex; 1826 unsigned int unusedindex;
1768 unsigned int unusedcrcindex; 1827 unsigned int unusedcrcindex;
1769 unsigned int unusedgplindex; 1828 unsigned int unusedgplindex;
1770 unsigned int unusedgplcrcindex; 1829 unsigned int unusedgplcrcindex;
1830#endif
1771 unsigned int markersindex; 1831 unsigned int markersindex;
1772 unsigned int markersstringsindex; 1832 unsigned int markersstringsindex;
1773 struct module *mod; 1833 struct module *mod;
@@ -1850,13 +1910,15 @@ static struct module *load_module(void __user *umod,
1850 exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab"); 1910 exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
1851 gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl"); 1911 gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
1852 gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future"); 1912 gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future");
1853 unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
1854 unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
1855 crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab"); 1913 crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
1856 gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl"); 1914 gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
1857 gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future"); 1915 gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future");
1916#ifdef CONFIG_UNUSED_SYMBOLS
1917 unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
1918 unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
1858 unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused"); 1919 unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused");
1859 unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl"); 1920 unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl");
1921#endif
1860 setupindex = find_sec(hdr, sechdrs, secstrings, "__param"); 1922 setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
1861 exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table"); 1923 exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
1862 obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm"); 1924 obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
@@ -1935,7 +1997,7 @@ static struct module *load_module(void __user *umod,
1935 layout_sections(mod, hdr, sechdrs, secstrings); 1997 layout_sections(mod, hdr, sechdrs, secstrings);
1936 1998
1937 /* Do the allocs. */ 1999 /* Do the allocs. */
1938 ptr = module_alloc(mod->core_size); 2000 ptr = module_alloc_update_bounds(mod->core_size);
1939 if (!ptr) { 2001 if (!ptr) {
1940 err = -ENOMEM; 2002 err = -ENOMEM;
1941 goto free_percpu; 2003 goto free_percpu;
@@ -1943,7 +2005,7 @@ static struct module *load_module(void __user *umod,
1943 memset(ptr, 0, mod->core_size); 2005 memset(ptr, 0, mod->core_size);
1944 mod->module_core = ptr; 2006 mod->module_core = ptr;
1945 2007
1946 ptr = module_alloc(mod->init_size); 2008 ptr = module_alloc_update_bounds(mod->init_size);
1947 if (!ptr && mod->init_size) { 2009 if (!ptr && mod->init_size) {
1948 err = -ENOMEM; 2010 err = -ENOMEM;
1949 goto free_core; 2011 goto free_core;
@@ -2018,14 +2080,15 @@ static struct module *load_module(void __user *umod,
2018 mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr; 2080 mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr;
2019 mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size / 2081 mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size /
2020 sizeof(*mod->gpl_future_syms); 2082 sizeof(*mod->gpl_future_syms);
2021 mod->num_unused_syms = sechdrs[unusedindex].sh_size /
2022 sizeof(*mod->unused_syms);
2023 mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
2024 sizeof(*mod->unused_gpl_syms);
2025 mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr; 2083 mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr;
2026 if (gplfuturecrcindex) 2084 if (gplfuturecrcindex)
2027 mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr; 2085 mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr;
2028 2086
2087#ifdef CONFIG_UNUSED_SYMBOLS
2088 mod->num_unused_syms = sechdrs[unusedindex].sh_size /
2089 sizeof(*mod->unused_syms);
2090 mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
2091 sizeof(*mod->unused_gpl_syms);
2029 mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr; 2092 mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr;
2030 if (unusedcrcindex) 2093 if (unusedcrcindex)
2031 mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; 2094 mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr;
@@ -2033,13 +2096,17 @@ static struct module *load_module(void __user *umod,
2033 if (unusedgplcrcindex) 2096 if (unusedgplcrcindex)
2034 mod->unused_gpl_crcs 2097 mod->unused_gpl_crcs
2035 = (void *)sechdrs[unusedgplcrcindex].sh_addr; 2098 = (void *)sechdrs[unusedgplcrcindex].sh_addr;
2099#endif
2036 2100
2037#ifdef CONFIG_MODVERSIONS 2101#ifdef CONFIG_MODVERSIONS
2038 if ((mod->num_syms && !crcindex) || 2102 if ((mod->num_syms && !crcindex)
2039 (mod->num_gpl_syms && !gplcrcindex) || 2103 || (mod->num_gpl_syms && !gplcrcindex)
2040 (mod->num_gpl_future_syms && !gplfuturecrcindex) || 2104 || (mod->num_gpl_future_syms && !gplfuturecrcindex)
2041 (mod->num_unused_syms && !unusedcrcindex) || 2105#ifdef CONFIG_UNUSED_SYMBOLS
2042 (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { 2106 || (mod->num_unused_syms && !unusedcrcindex)
2107 || (mod->num_unused_gpl_syms && !unusedgplcrcindex)
2108#endif
2109 ) {
2043 printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name); 2110 printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name);
2044 err = try_to_force_load(mod, "nocrc"); 2111 err = try_to_force_load(mod, "nocrc");
2045 if (err) 2112 if (err)
@@ -2512,7 +2579,7 @@ static int m_show(struct seq_file *m, void *p)
2512 struct module *mod = list_entry(p, struct module, list); 2579 struct module *mod = list_entry(p, struct module, list);
2513 char buf[8]; 2580 char buf[8];
2514 2581
2515 seq_printf(m, "%s %lu", 2582 seq_printf(m, "%s %u",
2516 mod->name, mod->init_size + mod->core_size); 2583 mod->name, mod->init_size + mod->core_size);
2517 print_unload_info(m, mod); 2584 print_unload_info(m, mod);
2518 2585
@@ -2595,6 +2662,9 @@ struct module *__module_text_address(unsigned long addr)
2595{ 2662{
2596 struct module *mod; 2663 struct module *mod;
2597 2664
2665 if (addr < module_addr_min || addr > module_addr_max)
2666 return NULL;
2667
2598 list_for_each_entry(mod, &modules, list) 2668 list_for_each_entry(mod, &modules, list)
2599 if (within(addr, mod->module_init, mod->init_text_size) 2669 if (within(addr, mod->module_init, mod->init_text_size)
2600 || within(addr, mod->module_core, mod->core_text_size)) 2670 || within(addr, mod->module_core, mod->core_text_size))
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index b45da40e8d25..59dfdf1e1d20 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -82,7 +82,7 @@ config PM_SLEEP_SMP
82 82
83config PM_SLEEP 83config PM_SLEEP
84 bool 84 bool
85 depends on SUSPEND || HIBERNATION 85 depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
86 default y 86 default y
87 87
88config SUSPEND 88config SUSPEND
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 16eeeaa9d618..6f8696c502f4 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
106 */ 106 */
107 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
108 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
109 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
111 } 111 }
112} 112}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 6f62b77d93c4..27827931ca0d 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
756 756
757 /* Now ask each CPU for acknowledgement of the flip. */ 757 /* Now ask each CPU for acknowledgement of the flip. */
758 758
759 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 759 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
761 dyntick_save_progress_counter(cpu); 761 dyntick_save_progress_counter(cpu);
762 } 762 }
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
774 int cpu; 774 int cpu;
775 775
776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
777 for_each_cpu_mask(cpu, rcu_cpu_online_map) 777 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
778 if (rcu_try_flip_waitack_needed(cpu) && 778 if (rcu_try_flip_waitack_needed(cpu) &&
779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
806 /* Check to see if the sum of the "last" counters is zero. */ 806 /* Check to see if the sum of the "last" counters is zero. */
807 807
808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
809 for_each_cpu_mask(cpu, rcu_cpu_online_map) 809 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
811 if (sum != 0) { 811 if (sum != 0) {
812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
821 smp_mb(); /* ^^^^^^^^^^^^ */ 821 smp_mb(); /* ^^^^^^^^^^^^ */
822 822
823 /* Call for a memory barrier from each CPU. */ 823 /* Call for a memory barrier from each CPU. */
824 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 824 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
826 dyntick_save_progress_counter(cpu); 826 dyntick_save_progress_counter(cpu);
827 } 827 }
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
841 int cpu; 841 int cpu;
842 842
843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
844 for_each_cpu_mask(cpu, rcu_cpu_online_map) 844 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
845 if (rcu_try_flip_waitmb_needed(cpu) && 845 if (rcu_try_flip_waitmb_needed(cpu) &&
846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 092e4c620af9..a56f629b057a 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -297,8 +297,8 @@ static int test_func(void *data)
297 * 297 *
298 * opcode:data 298 * opcode:data
299 */ 299 */
300static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, 300static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
301 size_t count) 301 const char *buf, size_t count)
302{ 302{
303 struct sched_param schedpar; 303 struct sched_param schedpar;
304 struct test_thread_data *td; 304 struct test_thread_data *td;
@@ -360,7 +360,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
360 * @dev: thread to query 360 * @dev: thread to query
361 * @buf: char buffer to be filled with thread status info 361 * @buf: char buffer to be filled with thread status info
362 */ 362 */
363static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) 363static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
364 char *buf)
364{ 365{
365 struct test_thread_data *td; 366 struct test_thread_data *td;
366 struct task_struct *tsk; 367 struct task_struct *tsk;
diff --git a/kernel/sched.c b/kernel/sched.c
index 62b1b8ecb5c7..6acf749d3336 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2029,7 +2029,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2029 /* Tally up the load of all CPUs in the group */ 2029 /* Tally up the load of all CPUs in the group */
2030 avg_load = 0; 2030 avg_load = 0;
2031 2031
2032 for_each_cpu_mask(i, group->cpumask) { 2032 for_each_cpu_mask_nr(i, group->cpumask) {
2033 /* Bias balancing toward cpus of our domain */ 2033 /* Bias balancing toward cpus of our domain */
2034 if (local_group) 2034 if (local_group)
2035 load = source_load(i, load_idx); 2035 load = source_load(i, load_idx);
@@ -2071,7 +2071,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2071 /* Traverse only the allowed CPUs */ 2071 /* Traverse only the allowed CPUs */
2072 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2072 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2073 2073
2074 for_each_cpu_mask(i, *tmp) { 2074 for_each_cpu_mask_nr(i, *tmp) {
2075 load = weighted_cpuload(i); 2075 load = weighted_cpuload(i);
2076 2076
2077 if (load < min_load || (load == min_load && i == this_cpu)) { 2077 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3089,7 +3089,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3089 max_cpu_load = 0; 3089 max_cpu_load = 0;
3090 min_cpu_load = ~0UL; 3090 min_cpu_load = ~0UL;
3091 3091
3092 for_each_cpu_mask(i, group->cpumask) { 3092 for_each_cpu_mask_nr(i, group->cpumask) {
3093 struct rq *rq; 3093 struct rq *rq;
3094 3094
3095 if (!cpu_isset(i, *cpus)) 3095 if (!cpu_isset(i, *cpus))
@@ -3368,7 +3368,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3368 unsigned long max_load = 0; 3368 unsigned long max_load = 0;
3369 int i; 3369 int i;
3370 3370
3371 for_each_cpu_mask(i, group->cpumask) { 3371 for_each_cpu_mask_nr(i, group->cpumask) {
3372 unsigned long wl; 3372 unsigned long wl;
3373 3373
3374 if (!cpu_isset(i, *cpus)) 3374 if (!cpu_isset(i, *cpus))
@@ -3910,7 +3910,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3910 int balance_cpu; 3910 int balance_cpu;
3911 3911
3912 cpu_clear(this_cpu, cpus); 3912 cpu_clear(this_cpu, cpus);
3913 for_each_cpu_mask(balance_cpu, cpus) { 3913 for_each_cpu_mask_nr(balance_cpu, cpus) {
3914 /* 3914 /*
3915 * If this cpu gets work to do, stop the load balancing 3915 * If this cpu gets work to do, stop the load balancing
3916 * work being done for other cpus. Next load 3916 * work being done for other cpus. Next load
@@ -6721,7 +6721,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6721 6721
6722 cpus_clear(*covered); 6722 cpus_clear(*covered);
6723 6723
6724 for_each_cpu_mask(i, *span) { 6724 for_each_cpu_mask_nr(i, *span) {
6725 struct sched_group *sg; 6725 struct sched_group *sg;
6726 int group = group_fn(i, cpu_map, &sg, tmpmask); 6726 int group = group_fn(i, cpu_map, &sg, tmpmask);
6727 int j; 6727 int j;
@@ -6732,7 +6732,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6732 cpus_clear(sg->cpumask); 6732 cpus_clear(sg->cpumask);
6733 sg->__cpu_power = 0; 6733 sg->__cpu_power = 0;
6734 6734
6735 for_each_cpu_mask(j, *span) { 6735 for_each_cpu_mask_nr(j, *span) {
6736 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6736 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6737 continue; 6737 continue;
6738 6738
@@ -6932,7 +6932,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
6932 if (!sg) 6932 if (!sg)
6933 return; 6933 return;
6934 do { 6934 do {
6935 for_each_cpu_mask(j, sg->cpumask) { 6935 for_each_cpu_mask_nr(j, sg->cpumask) {
6936 struct sched_domain *sd; 6936 struct sched_domain *sd;
6937 6937
6938 sd = &per_cpu(phys_domains, j); 6938 sd = &per_cpu(phys_domains, j);
@@ -6957,7 +6957,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6957{ 6957{
6958 int cpu, i; 6958 int cpu, i;
6959 6959
6960 for_each_cpu_mask(cpu, *cpu_map) { 6960 for_each_cpu_mask_nr(cpu, *cpu_map) {
6961 struct sched_group **sched_group_nodes 6961 struct sched_group **sched_group_nodes
6962 = sched_group_nodes_bycpu[cpu]; 6962 = sched_group_nodes_bycpu[cpu];
6963 6963
@@ -7196,7 +7196,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7196 /* 7196 /*
7197 * Set up domains for cpus specified by the cpu_map. 7197 * Set up domains for cpus specified by the cpu_map.
7198 */ 7198 */
7199 for_each_cpu_mask(i, *cpu_map) { 7199 for_each_cpu_mask_nr(i, *cpu_map) {
7200 struct sched_domain *sd = NULL, *p; 7200 struct sched_domain *sd = NULL, *p;
7201 SCHED_CPUMASK_VAR(nodemask, allmasks); 7201 SCHED_CPUMASK_VAR(nodemask, allmasks);
7202 7202
@@ -7263,7 +7263,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7263 7263
7264#ifdef CONFIG_SCHED_SMT 7264#ifdef CONFIG_SCHED_SMT
7265 /* Set up CPU (sibling) groups */ 7265 /* Set up CPU (sibling) groups */
7266 for_each_cpu_mask(i, *cpu_map) { 7266 for_each_cpu_mask_nr(i, *cpu_map) {
7267 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7267 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7268 SCHED_CPUMASK_VAR(send_covered, allmasks); 7268 SCHED_CPUMASK_VAR(send_covered, allmasks);
7269 7269
@@ -7280,7 +7280,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7280 7280
7281#ifdef CONFIG_SCHED_MC 7281#ifdef CONFIG_SCHED_MC
7282 /* Set up multi-core groups */ 7282 /* Set up multi-core groups */
7283 for_each_cpu_mask(i, *cpu_map) { 7283 for_each_cpu_mask_nr(i, *cpu_map) {
7284 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7284 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7285 SCHED_CPUMASK_VAR(send_covered, allmasks); 7285 SCHED_CPUMASK_VAR(send_covered, allmasks);
7286 7286
@@ -7347,7 +7347,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7347 goto error; 7347 goto error;
7348 } 7348 }
7349 sched_group_nodes[i] = sg; 7349 sched_group_nodes[i] = sg;
7350 for_each_cpu_mask(j, *nodemask) { 7350 for_each_cpu_mask_nr(j, *nodemask) {
7351 struct sched_domain *sd; 7351 struct sched_domain *sd;
7352 7352
7353 sd = &per_cpu(node_domains, j); 7353 sd = &per_cpu(node_domains, j);
@@ -7393,21 +7393,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7393 7393
7394 /* Calculate CPU power for physical packages and nodes */ 7394 /* Calculate CPU power for physical packages and nodes */
7395#ifdef CONFIG_SCHED_SMT 7395#ifdef CONFIG_SCHED_SMT
7396 for_each_cpu_mask(i, *cpu_map) { 7396 for_each_cpu_mask_nr(i, *cpu_map) {
7397 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7397 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7398 7398
7399 init_sched_groups_power(i, sd); 7399 init_sched_groups_power(i, sd);
7400 } 7400 }
7401#endif 7401#endif
7402#ifdef CONFIG_SCHED_MC 7402#ifdef CONFIG_SCHED_MC
7403 for_each_cpu_mask(i, *cpu_map) { 7403 for_each_cpu_mask_nr(i, *cpu_map) {
7404 struct sched_domain *sd = &per_cpu(core_domains, i); 7404 struct sched_domain *sd = &per_cpu(core_domains, i);
7405 7405
7406 init_sched_groups_power(i, sd); 7406 init_sched_groups_power(i, sd);
7407 } 7407 }
7408#endif 7408#endif
7409 7409
7410 for_each_cpu_mask(i, *cpu_map) { 7410 for_each_cpu_mask_nr(i, *cpu_map) {
7411 struct sched_domain *sd = &per_cpu(phys_domains, i); 7411 struct sched_domain *sd = &per_cpu(phys_domains, i);
7412 7412
7413 init_sched_groups_power(i, sd); 7413 init_sched_groups_power(i, sd);
@@ -7427,7 +7427,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7427#endif 7427#endif
7428 7428
7429 /* Attach the domains */ 7429 /* Attach the domains */
7430 for_each_cpu_mask(i, *cpu_map) { 7430 for_each_cpu_mask_nr(i, *cpu_map) {
7431 struct sched_domain *sd; 7431 struct sched_domain *sd;
7432#ifdef CONFIG_SCHED_SMT 7432#ifdef CONFIG_SCHED_SMT
7433 sd = &per_cpu(cpu_domains, i); 7433 sd = &per_cpu(cpu_domains, i);
@@ -7510,7 +7510,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7510 7510
7511 unregister_sched_domain_sysctl(); 7511 unregister_sched_domain_sysctl();
7512 7512
7513 for_each_cpu_mask(i, *cpu_map) 7513 for_each_cpu_mask_nr(i, *cpu_map)
7514 cpu_attach_domain(NULL, &def_root_domain, i); 7514 cpu_attach_domain(NULL, &def_root_domain, i);
7515 synchronize_sched(); 7515 synchronize_sched();
7516 arch_destroy_sched_domains(cpu_map, &tmpmask); 7516 arch_destroy_sched_domains(cpu_map, &tmpmask);
@@ -7641,11 +7641,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7641} 7641}
7642 7642
7643#ifdef CONFIG_SCHED_MC 7643#ifdef CONFIG_SCHED_MC
7644static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) 7644static ssize_t sched_mc_power_savings_show(struct sys_device *dev,
7645 struct sysdev_attribute *attr, char *page)
7645{ 7646{
7646 return sprintf(page, "%u\n", sched_mc_power_savings); 7647 return sprintf(page, "%u\n", sched_mc_power_savings);
7647} 7648}
7648static ssize_t sched_mc_power_savings_store(struct sys_device *dev, 7649static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
7650 struct sysdev_attribute *attr,
7649 const char *buf, size_t count) 7651 const char *buf, size_t count)
7650{ 7652{
7651 return sched_power_savings_store(buf, count, 0); 7653 return sched_power_savings_store(buf, count, 0);
@@ -7655,11 +7657,13 @@ static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
7655#endif 7657#endif
7656 7658
7657#ifdef CONFIG_SCHED_SMT 7659#ifdef CONFIG_SCHED_SMT
7658static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) 7660static ssize_t sched_smt_power_savings_show(struct sys_device *dev,
7661 struct sysdev_attribute *attr, char *page)
7659{ 7662{
7660 return sprintf(page, "%u\n", sched_smt_power_savings); 7663 return sprintf(page, "%u\n", sched_smt_power_savings);
7661} 7664}
7662static ssize_t sched_smt_power_savings_store(struct sys_device *dev, 7665static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
7666 struct sysdev_attribute *attr,
7663 const char *buf, size_t count) 7667 const char *buf, size_t count)
7664{ 7668{
7665 return sched_power_savings_store(buf, count, 1); 7669 return sched_power_savings_store(buf, count, 1);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7f700263f04c..cf2cd6ce4cb2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1033,7 +1033,7 @@ static int wake_idle(int cpu, struct task_struct *p)
1033 && !task_hot(p, task_rq(p)->clock, sd))) { 1033 && !task_hot(p, task_rq(p)->clock, sd))) {
1034 cpus_and(tmp, sd->span, p->cpus_allowed); 1034 cpus_and(tmp, sd->span, p->cpus_allowed);
1035 cpus_and(tmp, tmp, cpu_active_map); 1035 cpus_and(tmp, tmp, cpu_active_map);
1036 for_each_cpu_mask(i, tmp) { 1036 for_each_cpu_mask_nr(i, tmp) {
1037 if (idle_cpu(i)) { 1037 if (idle_cpu(i)) {
1038 if (i != task_cpu(p)) { 1038 if (i != task_cpu(p)) {
1039 schedstat_inc(p, 1039 schedstat_inc(p,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 24621cea8bb0..f85a76363eee 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
240 240
241 spin_lock(&rt_b->rt_runtime_lock); 241 spin_lock(&rt_b->rt_runtime_lock);
242 rt_period = ktime_to_ns(rt_b->rt_period); 242 rt_period = ktime_to_ns(rt_b->rt_period);
243 for_each_cpu_mask(i, rd->span) { 243 for_each_cpu_mask_nr(i, rd->span) {
244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245 s64 diff; 245 s64 diff;
246 246
@@ -1128,7 +1128,7 @@ static int pull_rt_task(struct rq *this_rq)
1128 1128
1129 next = pick_next_task_rt(this_rq); 1129 next = pick_next_task_rt(this_rq);
1130 1130
1131 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1131 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1132 if (this_cpu == cpu) 1132 if (this_cpu == cpu)
1133 continue; 1133 continue;
1134 1134
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index a272d78185eb..7bd8d1aadd5d 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/freezer.h> 14#include <linux/freezer.h>
15#include <linux/kthread.h> 15#include <linux/kthread.h>
16#include <linux/lockdep.h>
16#include <linux/notifier.h> 17#include <linux/notifier.h>
17#include <linux/module.h> 18#include <linux/module.h>
18 19
@@ -25,7 +26,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp);
25static DEFINE_PER_CPU(struct task_struct *, watchdog_task); 26static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
26 27
27static int __read_mostly did_panic; 28static int __read_mostly did_panic;
28unsigned long __read_mostly softlockup_thresh = 60; 29int __read_mostly softlockup_thresh = 60;
30
31/*
32 * Should we panic (and reboot, if panic_timeout= is set) when a
33 * soft-lockup occurs:
34 */
35unsigned int __read_mostly softlockup_panic =
36 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
37
38static int __init softlockup_panic_setup(char *str)
39{
40 softlockup_panic = simple_strtoul(str, NULL, 0);
41
42 return 1;
43}
44__setup("softlockup_panic=", softlockup_panic_setup);
29 45
30static int 46static int
31softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) 47softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -84,6 +100,14 @@ void softlockup_tick(void)
84 struct pt_regs *regs = get_irq_regs(); 100 struct pt_regs *regs = get_irq_regs();
85 unsigned long now; 101 unsigned long now;
86 102
103 /* Is detection switched off? */
104 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
105 /* Be sure we don't false trigger if switched back on */
106 if (touch_timestamp)
107 per_cpu(touch_timestamp, this_cpu) = 0;
108 return;
109 }
110
87 if (touch_timestamp == 0) { 111 if (touch_timestamp == 0) {
88 __touch_softlockup_watchdog(); 112 __touch_softlockup_watchdog();
89 return; 113 return;
@@ -92,11 +116,8 @@ void softlockup_tick(void)
92 print_timestamp = per_cpu(print_timestamp, this_cpu); 116 print_timestamp = per_cpu(print_timestamp, this_cpu);
93 117
94 /* report at most once a second */ 118 /* report at most once a second */
95 if ((print_timestamp >= touch_timestamp && 119 if (print_timestamp == touch_timestamp || did_panic)
96 print_timestamp < (touch_timestamp + 1)) ||
97 did_panic || !per_cpu(watchdog_task, this_cpu)) {
98 return; 120 return;
99 }
100 121
101 /* do not print during early bootup: */ 122 /* do not print during early bootup: */
102 if (unlikely(system_state != SYSTEM_RUNNING)) { 123 if (unlikely(system_state != SYSTEM_RUNNING)) {
@@ -106,8 +127,11 @@ void softlockup_tick(void)
106 127
107 now = get_timestamp(this_cpu); 128 now = get_timestamp(this_cpu);
108 129
109 /* Wake up the high-prio watchdog task every second: */ 130 /*
110 if (now > (touch_timestamp + 1)) 131 * Wake up the high-prio watchdog task twice per
132 * threshold timespan.
133 */
134 if (now > touch_timestamp + softlockup_thresh/2)
111 wake_up_process(per_cpu(watchdog_task, this_cpu)); 135 wake_up_process(per_cpu(watchdog_task, this_cpu));
112 136
113 /* Warn about unreasonable delays: */ 137 /* Warn about unreasonable delays: */
@@ -121,11 +145,15 @@ void softlockup_tick(void)
121 this_cpu, now - touch_timestamp, 145 this_cpu, now - touch_timestamp,
122 current->comm, task_pid_nr(current)); 146 current->comm, task_pid_nr(current));
123 print_modules(); 147 print_modules();
148 print_irqtrace_events(current);
124 if (regs) 149 if (regs)
125 show_regs(regs); 150 show_regs(regs);
126 else 151 else
127 dump_stack(); 152 dump_stack();
128 spin_unlock(&print_lock); 153 spin_unlock(&print_lock);
154
155 if (softlockup_panic)
156 panic("softlockup: hung tasks");
129} 157}
130 158
131/* 159/*
@@ -178,6 +206,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now)
178 206
179 t->last_switch_timestamp = now; 207 t->last_switch_timestamp = now;
180 touch_nmi_watchdog(); 208 touch_nmi_watchdog();
209
210 if (softlockup_panic)
211 panic("softlockup: blocked tasks");
181} 212}
182 213
183/* 214/*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index ba9b2054ecbd..738b411ff2d3 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
33{ 33{
34 int irqs_disabled = 0; 34 int irqs_disabled = 0;
35 int prepared = 0; 35 int prepared = 0;
36 cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
36 37
37 set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); 38 set_cpus_allowed_ptr(current, cpumask);
38 39
39 /* Ack: we are alive */ 40 /* Ack: we are alive */
40 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 41 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 5b9b467de070..0fea0ee12da9 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -59,6 +59,7 @@ cond_syscall(sys_epoll_create);
59cond_syscall(sys_epoll_ctl); 59cond_syscall(sys_epoll_ctl);
60cond_syscall(sys_epoll_wait); 60cond_syscall(sys_epoll_wait);
61cond_syscall(sys_epoll_pwait); 61cond_syscall(sys_epoll_pwait);
62cond_syscall(compat_sys_epoll_pwait);
62cond_syscall(sys_semget); 63cond_syscall(sys_semget);
63cond_syscall(sys_semop); 64cond_syscall(sys_semop);
64cond_syscall(sys_semtimedop); 65cond_syscall(sys_semtimedop);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6b16e16428d8..2a7b9d88706b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -88,12 +88,13 @@ extern int rcutorture_runnable;
88#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 88#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
89 89
90/* Constants used for minimum and maximum */ 90/* Constants used for minimum and maximum */
91#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) 91#if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP)
92static int one = 1; 92static int one = 1;
93#endif 93#endif
94 94
95#ifdef CONFIG_DETECT_SOFTLOCKUP 95#ifdef CONFIG_DETECT_SOFTLOCKUP
96static int sixty = 60; 96static int sixty = 60;
97static int neg_one = -1;
97#endif 98#endif
98 99
99#ifdef CONFIG_MMU 100#ifdef CONFIG_MMU
@@ -110,7 +111,7 @@ static int min_percpu_pagelist_fract = 8;
110 111
111static int ngroups_max = NGROUPS_MAX; 112static int ngroups_max = NGROUPS_MAX;
112 113
113#ifdef CONFIG_KMOD 114#ifdef CONFIG_MODULES
114extern char modprobe_path[]; 115extern char modprobe_path[];
115#endif 116#endif
116#ifdef CONFIG_CHR_DEV_SG 117#ifdef CONFIG_CHR_DEV_SG
@@ -475,7 +476,7 @@ static struct ctl_table kern_table[] = {
475 .proc_handler = &ftrace_enable_sysctl, 476 .proc_handler = &ftrace_enable_sysctl,
476 }, 477 },
477#endif 478#endif
478#ifdef CONFIG_KMOD 479#ifdef CONFIG_MODULES
479 { 480 {
480 .ctl_name = KERN_MODPROBE, 481 .ctl_name = KERN_MODPROBE,
481 .procname = "modprobe", 482 .procname = "modprobe",
@@ -739,13 +740,24 @@ static struct ctl_table kern_table[] = {
739#ifdef CONFIG_DETECT_SOFTLOCKUP 740#ifdef CONFIG_DETECT_SOFTLOCKUP
740 { 741 {
741 .ctl_name = CTL_UNNUMBERED, 742 .ctl_name = CTL_UNNUMBERED,
743 .procname = "softlockup_panic",
744 .data = &softlockup_panic,
745 .maxlen = sizeof(int),
746 .mode = 0644,
747 .proc_handler = &proc_dointvec_minmax,
748 .strategy = &sysctl_intvec,
749 .extra1 = &zero,
750 .extra2 = &one,
751 },
752 {
753 .ctl_name = CTL_UNNUMBERED,
742 .procname = "softlockup_thresh", 754 .procname = "softlockup_thresh",
743 .data = &softlockup_thresh, 755 .data = &softlockup_thresh,
744 .maxlen = sizeof(unsigned long), 756 .maxlen = sizeof(int),
745 .mode = 0644, 757 .mode = 0644,
746 .proc_handler = &proc_doulongvec_minmax, 758 .proc_handler = &proc_dointvec_minmax,
747 .strategy = &sysctl_intvec, 759 .strategy = &sysctl_intvec,
748 .extra1 = &one, 760 .extra1 = &neg_one,
749 .extra2 = &sixty, 761 .extra2 = &sixty,
750 }, 762 },
751 { 763 {
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4a23517169a6..06b17547f4e7 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 if (isadd == REGISTER) { 303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) { 304 for_each_cpu_mask_nr(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 306 cpu_to_node(cpu));
307 if (!s) 307 if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 320
321 /* Deregister or cleanup */ 321 /* Deregister or cleanup */
322cleanup: 322cleanup:
323 for_each_cpu_mask(cpu, mask) { 323 for_each_cpu_mask_nr(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index dadde5361f32..093d4acf993b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
149 149
150 if (next_cpu >= NR_CPUS) 150 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 151 next_cpu = first_cpu(cpu_online_map);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 152 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 153 add_timer_on(&watchdog_timer, next_cpu);
@@ -376,7 +376,8 @@ void clocksource_unregister(struct clocksource *cs)
376 * Provides sysfs interface for listing current clocksource. 376 * Provides sysfs interface for listing current clocksource.
377 */ 377 */
378static ssize_t 378static ssize_t
379sysfs_show_current_clocksources(struct sys_device *dev, char *buf) 379sysfs_show_current_clocksources(struct sys_device *dev,
380 struct sysdev_attribute *attr, char *buf)
380{ 381{
381 ssize_t count = 0; 382 ssize_t count = 0;
382 383
@@ -397,6 +398,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
397 * clocksource selction. 398 * clocksource selction.
398 */ 399 */
399static ssize_t sysfs_override_clocksource(struct sys_device *dev, 400static ssize_t sysfs_override_clocksource(struct sys_device *dev,
401 struct sysdev_attribute *attr,
400 const char *buf, size_t count) 402 const char *buf, size_t count)
401{ 403{
402 struct clocksource *ovr = NULL; 404 struct clocksource *ovr = NULL;
@@ -449,7 +451,9 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
449 * Provides sysfs interface for listing registered clocksources 451 * Provides sysfs interface for listing registered clocksources
450 */ 452 */
451static ssize_t 453static ssize_t
452sysfs_show_available_clocksources(struct sys_device *dev, char *buf) 454sysfs_show_available_clocksources(struct sys_device *dev,
455 struct sysdev_attribute *attr,
456 char *buf)
453{ 457{
454 struct clocksource *src; 458 struct clocksource *src;
455 ssize_t count = 0; 459 ssize_t count = 0;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f48d0f09d32f..31463d370b94 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -399,8 +399,7 @@ again:
399 mask = CPU_MASK_NONE; 399 mask = CPU_MASK_NONE;
400 now = ktime_get(); 400 now = ktime_get();
401 /* Find all expired events */ 401 /* Find all expired events */
402 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 402 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
403 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
404 td = &per_cpu(tick_cpu_device, cpu); 403 td = &per_cpu(tick_cpu_device, cpu);
405 if (td->evtdev->next_event.tv64 <= now.tv64) 404 if (td->evtdev->next_event.tv64 <= now.tv64)
406 cpu_set(cpu, mask); 405 cpu_set(cpu, mask);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 4f3886562b8c..bf43284d6855 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
135 */ 135 */
136static void tick_setup_device(struct tick_device *td, 136static void tick_setup_device(struct tick_device *td,
137 struct clock_event_device *newdev, int cpu, 137 struct clock_event_device *newdev, int cpu,
138 cpumask_t cpumask) 138 const cpumask_t *cpumask)
139{ 139{
140 ktime_t next_event; 140 ktime_t next_event;
141 void (*handler)(struct clock_event_device *) = NULL; 141 void (*handler)(struct clock_event_device *) = NULL;
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td,
169 * When the device is not per cpu, pin the interrupt to the 169 * When the device is not per cpu, pin the interrupt to the
170 * current cpu: 170 * current cpu:
171 */ 171 */
172 if (!cpus_equal(newdev->cpumask, cpumask)) 172 if (!cpus_equal(newdev->cpumask, *cpumask))
173 irq_set_affinity(newdev->irq, cpumask); 173 irq_set_affinity(newdev->irq, *cpumask);
174 174
175 /* 175 /*
176 * When global broadcasting is active, check if the current 176 * When global broadcasting is active, check if the current
@@ -196,20 +196,20 @@ static int tick_check_new_device(struct clock_event_device *newdev)
196 struct tick_device *td; 196 struct tick_device *td;
197 int cpu, ret = NOTIFY_OK; 197 int cpu, ret = NOTIFY_OK;
198 unsigned long flags; 198 unsigned long flags;
199 cpumask_t cpumask; 199 cpumask_of_cpu_ptr_declare(cpumask);
200 200
201 spin_lock_irqsave(&tick_device_lock, flags); 201 spin_lock_irqsave(&tick_device_lock, flags);
202 202
203 cpu = smp_processor_id(); 203 cpu = smp_processor_id();
204 cpumask_of_cpu_ptr_next(cpumask, cpu);
204 if (!cpu_isset(cpu, newdev->cpumask)) 205 if (!cpu_isset(cpu, newdev->cpumask))
205 goto out_bc; 206 goto out_bc;
206 207
207 td = &per_cpu(tick_cpu_device, cpu); 208 td = &per_cpu(tick_cpu_device, cpu);
208 curdev = td->evtdev; 209 curdev = td->evtdev;
209 cpumask = cpumask_of_cpu(cpu);
210 210
211 /* cpu local device ? */ 211 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, cpumask)) { 212 if (!cpus_equal(newdev->cpumask, *cpumask)) {
213 213
214 /* 214 /*
215 * If the cpu affinity of the device interrupt can not 215 * If the cpu affinity of the device interrupt can not
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 222 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 223 * by a non cpu local device
224 */ 224 */
225 if (curdev && cpus_equal(curdev->cpumask, cpumask)) 225 if (curdev && cpus_equal(curdev->cpumask, *cpumask))
226 goto out_bc; 226 goto out_bc;
227 } 227 }
228 228
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index beef7ccdf842..942fc7c85283 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -140,8 +140,6 @@ void tick_nohz_update_jiffies(void)
140 if (!ts->tick_stopped) 140 if (!ts->tick_stopped)
141 return; 141 return;
142 142
143 touch_softlockup_watchdog();
144
145 cpu_clear(cpu, nohz_cpu_mask); 143 cpu_clear(cpu, nohz_cpu_mask);
146 now = ktime_get(); 144 now = ktime_get();
147 ts->idle_waketime = now; 145 ts->idle_waketime = now;
@@ -149,6 +147,8 @@ void tick_nohz_update_jiffies(void)
149 local_irq_save(flags); 147 local_irq_save(flags);
150 tick_do_update_jiffies64(now); 148 tick_do_update_jiffies64(now);
151 local_irq_restore(flags); 149 local_irq_restore(flags);
150
151 touch_softlockup_watchdog();
152} 152}
153 153
154void tick_nohz_stop_idle(int cpu) 154void tick_nohz_stop_idle(int cpu)
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 2301e1e7c606..63528086337c 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -213,7 +213,9 @@ static void start_stack_timers(void)
213 int cpu; 213 int cpu;
214 214
215 for_each_online_cpu(cpu) { 215 for_each_online_cpu(cpu) {
216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 216 cpumask_of_cpu_ptr(new_mask, cpu);
217
218 set_cpus_allowed_ptr(current, new_mask);
217 start_stack_timer(cpu); 219 start_stack_timer(cpu);
218 } 220 }
219 set_cpus_allowed_ptr(current, &saved_mask); 221 set_cpus_allowed_ptr(current, &saved_mask);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce7799540c91..a6d36346d10a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
397 might_sleep(); 397 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 399 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map) 400 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 402}
403EXPORT_SYMBOL_GPL(flush_workqueue); 403EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 477 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 478 cpu_map = wq_cpu_map(wq);
479 479
480 for_each_cpu_mask(cpu, *cpu_map) 480 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 482}
483 483
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
813 list_del(&wq->list); 813 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 814 spin_unlock(&workqueue_lock);
815 815
816 for_each_cpu_mask(cpu, *cpu_map) 816 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 818 put_online_cpus();
819 819