aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-04-19 01:55:58 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-19 01:56:17 -0400
commit68d2cf25d39324c54b5e42de7915c623a0917abe (patch)
treeff1291450d7e6630bc77ec1363c3db8d74fa58b0 /kernel
parent176fcc5c5f0131504a55e1e1d35389c49a9177c2 (diff)
parent5d2cd90922c778908bd0cd669e572a5b5eafd737 (diff)
Merge branch 'perf/urgent' into perf/core
Merge reason: we'll be queueing up dependent changes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c6
-rw-r--r--kernel/debug/kdb/kdb_support.c2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/irq/Kconfig7
-rw-r--r--kernel/irq/autoprobe.c4
-rw-r--r--kernel/irq/chip.c133
-rw-r--r--kernel/irq/compat.h72
-rw-r--r--kernel/irq/debug.h2
-rw-r--r--kernel/irq/dummychip.c9
-rw-r--r--kernel/irq/handle.c3
-rw-r--r--kernel/irq/internals.h10
-rw-r--r--kernel/irq/manage.c13
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/proc.c8
-rw-r--r--kernel/irq/resend.c1
-rw-r--r--kernel/irq/settings.h55
-rw-r--r--kernel/irq/spurious.c1
-rw-r--r--kernel/kexec.c11
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/latencytop.c2
-rw-r--r--kernel/lockdep.c4
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/mutex.c2
-rw-r--r--kernel/padata.c8
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/perf_event.c21
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/sched.c20
-rw-r--r--kernel/sched_autogroup.c2
-rw-r--r--kernel/sched_fair.c19
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/signal.c155
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_clock.c2
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/user-return-notifier.c2
-rw-r--r--kernel/wait.c2
-rw-r--r--kernel/workqueue.c2
54 files changed, 249 insertions, 399 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 37b2bea170c8..e99dda04b126 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -607,7 +607,7 @@ void audit_trim_trees(void)
607 spin_lock(&hash_lock); 607 spin_lock(&hash_lock);
608 list_for_each_entry(node, &tree->chunks, list) { 608 list_for_each_entry(node, &tree->chunks, list) {
609 struct audit_chunk *chunk = find_chunk(node); 609 struct audit_chunk *chunk = find_chunk(node);
610 /* this could be NULL if the watch is dieing else where... */ 610 /* this could be NULL if the watch is dying else where... */
611 struct inode *inode = chunk->mark.i.inode; 611 struct inode *inode = chunk->mark.i.inode;
612 node->index |= 1U<<31; 612 node->index |= 1U<<31;
613 if (iterate_mounts(compare_root, inode, root_mnt)) 613 if (iterate_mounts(compare_root, inode, root_mnt))
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index f49a0318c2ed..b33513a08beb 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1011,7 +1011,7 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
1011/* 1011/*
1012 * to_send and len_sent accounting are very loose estimates. We aren't 1012 * to_send and len_sent accounting are very loose estimates. We aren't
1013 * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being 1013 * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
1014 * within about 500 bytes (next page boundry) 1014 * within about 500 bytes (next page boundary)
1015 * 1015 *
1016 * why snprintf? an int is up to 12 digits long. if we just assumed when 1016 * why snprintf? an int is up to 12 digits long. if we just assumed when
1017 * logging that a[%d]= was going to be 16 characters long we would be wasting 1017 * logging that a[%d]= was going to be 16 characters long we would be wasting
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e31b220a743d..25c7eb52de1a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -157,7 +157,7 @@ struct css_id {
157}; 157};
158 158
159/* 159/*
160 * cgroup_event represents events which userspace want to recieve. 160 * cgroup_event represents events which userspace want to receive.
161 */ 161 */
162struct cgroup_event { 162struct cgroup_event {
163 /* 163 /*
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c95fc4df0faa..12b7458f23b1 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -126,7 +126,7 @@ static void cpu_hotplug_done(void)
126#else /* #if CONFIG_HOTPLUG_CPU */ 126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {} 127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {} 128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */ 129#endif /* #else #if CONFIG_HOTPLUG_CPU */
130 130
131/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
132int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index cefd4a11f6d9..bad6786dee88 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -538,7 +538,7 @@ return_normal:
538 538
539 /* 539 /*
540 * For single stepping, try to only enter on the processor 540 * For single stepping, try to only enter on the processor
541 * that was single stepping. To gaurd against a deadlock, the 541 * that was single stepping. To guard against a deadlock, the
542 * kernel will only try for the value of sstep_tries before 542 * kernel will only try for the value of sstep_tries before
543 * giving up and continuing on. 543 * giving up and continuing on.
544 */ 544 */
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 6bc6e3bc4f9c..be14779bcef6 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -441,9 +441,9 @@ static int kdb_check_regs(void)
441 * symbol name, and offset to the caller. 441 * symbol name, and offset to the caller.
442 * 442 *
443 * The argument may consist of a numeric value (decimal or 443 * The argument may consist of a numeric value (decimal or
444 * hexidecimal), a symbol name, a register name (preceeded by the 444 * hexidecimal), a symbol name, a register name (preceded by the
445 * percent sign), an environment variable with a numeric value 445 * percent sign), an environment variable with a numeric value
446 * (preceeded by a dollar sign) or a simple arithmetic expression 446 * (preceded by a dollar sign) or a simple arithmetic expression
447 * consisting of a symbol name, +/-, and a numeric constant value 447 * consisting of a symbol name, +/-, and a numeric constant value
448 * (offset). 448 * (offset).
449 * Parameters: 449 * Parameters:
@@ -1335,7 +1335,7 @@ void kdb_print_state(const char *text, int value)
1335 * error The hardware-defined error code 1335 * error The hardware-defined error code
1336 * reason2 kdb's current reason code. 1336 * reason2 kdb's current reason code.
1337 * Initially error but can change 1337 * Initially error but can change
1338 * acording to kdb state. 1338 * according to kdb state.
1339 * db_result Result code from break or debug point. 1339 * db_result Result code from break or debug point.
1340 * regs The exception frame at time of fault/breakpoint. 1340 * regs The exception frame at time of fault/breakpoint.
1341 * should always be valid. 1341 * should always be valid.
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 6b2485dcb050..5532dd37aa86 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -545,7 +545,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
545 * Mask for process state. 545 * Mask for process state.
546 * Notes: 546 * Notes:
547 * The mask folds data from several sources into a single long value, so 547 * The mask folds data from several sources into a single long value, so
548 * be carefull not to overlap the bits. TASK_* bits are in the LSB, 548 * be careful not to overlap the bits. TASK_* bits are in the LSB,
549 * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there 549 * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there
550 * is no overlap between TASK_* and EXIT_* but that may not always be 550 * is no overlap between TASK_* and EXIT_* but that may not always be
551 * true, so EXIT_* bits are shifted left 16 bits before being stored in 551 * true, so EXIT_* bits are shifted left 16 bits before being stored in
diff --git a/kernel/exit.c b/kernel/exit.c
index 6a488ad2dce5..f5d2f63bae0b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -841,7 +841,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
841 /* Let father know we died 841 /* Let father know we died
842 * 842 *
843 * Thread signals are configurable, but you aren't going to use 843 * Thread signals are configurable, but you aren't going to use
844 * that to send signals to arbitary processes. 844 * that to send signals to arbitrary processes.
845 * That stops right now. 845 * That stops right now.
846 * 846 *
847 * If the parent exec id doesn't match the exec id we saved 847 * If the parent exec id doesn't match the exec id we saved
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 72606ba10b14..c574f9a12c48 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -10,13 +10,6 @@ menu "IRQ subsystem"
10config GENERIC_HARDIRQS 10config GENERIC_HARDIRQS
11 def_bool y 11 def_bool y
12 12
13# Select this to disable the deprecated stuff
14config GENERIC_HARDIRQS_NO_DEPRECATED
15 bool
16
17config GENERIC_HARDIRQS_NO_COMPAT
18 bool
19
20# Options selectable by the architecture code 13# Options selectable by the architecture code
21 14
22# Make sparse irq Kconfig switch below available 15# Make sparse irq Kconfig switch below available
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 394784c57060..342d8f44e401 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -70,10 +70,8 @@ unsigned long probe_irq_on(void)
70 raw_spin_lock_irq(&desc->lock); 70 raw_spin_lock_irq(&desc->lock);
71 if (!desc->action && irq_settings_can_probe(desc)) { 71 if (!desc->action && irq_settings_can_probe(desc)) {
72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; 72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
73 if (irq_startup(desc)) { 73 if (irq_startup(desc))
74 irq_compat_set_pending(desc);
75 desc->istate |= IRQS_PENDING; 74 desc->istate |= IRQS_PENDING;
76 }
77 } 75 }
78 raw_spin_unlock_irq(&desc->lock); 76 raw_spin_unlock_irq(&desc->lock);
79 } 77 }
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 03099d521f5e..4af1e2b244cb 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -34,7 +34,6 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)
34 if (!chip) 34 if (!chip)
35 chip = &no_irq_chip; 35 chip = &no_irq_chip;
36 36
37 irq_chip_set_defaults(chip);
38 desc->irq_data.chip = chip; 37 desc->irq_data.chip = chip;
39 irq_put_desc_unlock(desc, flags); 38 irq_put_desc_unlock(desc, flags);
40 /* 39 /*
@@ -141,25 +140,21 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data);
141static void irq_state_clr_disabled(struct irq_desc *desc) 140static void irq_state_clr_disabled(struct irq_desc *desc)
142{ 141{
143 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
144 irq_compat_clr_disabled(desc);
145} 143}
146 144
147static void irq_state_set_disabled(struct irq_desc *desc) 145static void irq_state_set_disabled(struct irq_desc *desc)
148{ 146{
149 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
150 irq_compat_set_disabled(desc);
151} 148}
152 149
153static void irq_state_clr_masked(struct irq_desc *desc) 150static void irq_state_clr_masked(struct irq_desc *desc)
154{ 151{
155 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
156 irq_compat_clr_masked(desc);
157} 153}
158 154
159static void irq_state_set_masked(struct irq_desc *desc) 155static void irq_state_set_masked(struct irq_desc *desc)
160{ 156{
161 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
162 irq_compat_set_masked(desc);
163} 158}
164 159
165int irq_startup(struct irq_desc *desc) 160int irq_startup(struct irq_desc *desc)
@@ -209,126 +204,6 @@ void irq_disable(struct irq_desc *desc)
209 } 204 }
210} 205}
211 206
212#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
213/* Temporary migration helpers */
214static void compat_irq_mask(struct irq_data *data)
215{
216 data->chip->mask(data->irq);
217}
218
219static void compat_irq_unmask(struct irq_data *data)
220{
221 data->chip->unmask(data->irq);
222}
223
224static void compat_irq_ack(struct irq_data *data)
225{
226 data->chip->ack(data->irq);
227}
228
229static void compat_irq_mask_ack(struct irq_data *data)
230{
231 data->chip->mask_ack(data->irq);
232}
233
234static void compat_irq_eoi(struct irq_data *data)
235{
236 data->chip->eoi(data->irq);
237}
238
239static void compat_irq_enable(struct irq_data *data)
240{
241 data->chip->enable(data->irq);
242}
243
244static void compat_irq_disable(struct irq_data *data)
245{
246 data->chip->disable(data->irq);
247}
248
249static void compat_irq_shutdown(struct irq_data *data)
250{
251 data->chip->shutdown(data->irq);
252}
253
254static unsigned int compat_irq_startup(struct irq_data *data)
255{
256 return data->chip->startup(data->irq);
257}
258
259static int compat_irq_set_affinity(struct irq_data *data,
260 const struct cpumask *dest, bool force)
261{
262 return data->chip->set_affinity(data->irq, dest);
263}
264
265static int compat_irq_set_type(struct irq_data *data, unsigned int type)
266{
267 return data->chip->set_type(data->irq, type);
268}
269
270static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
271{
272 return data->chip->set_wake(data->irq, on);
273}
274
275static int compat_irq_retrigger(struct irq_data *data)
276{
277 return data->chip->retrigger(data->irq);
278}
279
280static void compat_bus_lock(struct irq_data *data)
281{
282 data->chip->bus_lock(data->irq);
283}
284
285static void compat_bus_sync_unlock(struct irq_data *data)
286{
287 data->chip->bus_sync_unlock(data->irq);
288}
289#endif
290
291/*
292 * Fixup enable/disable function pointers
293 */
294void irq_chip_set_defaults(struct irq_chip *chip)
295{
296#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
297 if (chip->enable)
298 chip->irq_enable = compat_irq_enable;
299 if (chip->disable)
300 chip->irq_disable = compat_irq_disable;
301 if (chip->shutdown)
302 chip->irq_shutdown = compat_irq_shutdown;
303 if (chip->startup)
304 chip->irq_startup = compat_irq_startup;
305 if (!chip->end)
306 chip->end = dummy_irq_chip.end;
307 if (chip->bus_lock)
308 chip->irq_bus_lock = compat_bus_lock;
309 if (chip->bus_sync_unlock)
310 chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
311 if (chip->mask)
312 chip->irq_mask = compat_irq_mask;
313 if (chip->unmask)
314 chip->irq_unmask = compat_irq_unmask;
315 if (chip->ack)
316 chip->irq_ack = compat_irq_ack;
317 if (chip->mask_ack)
318 chip->irq_mask_ack = compat_irq_mask_ack;
319 if (chip->eoi)
320 chip->irq_eoi = compat_irq_eoi;
321 if (chip->set_affinity)
322 chip->irq_set_affinity = compat_irq_set_affinity;
323 if (chip->set_type)
324 chip->irq_set_type = compat_irq_set_type;
325 if (chip->set_wake)
326 chip->irq_set_wake = compat_irq_set_wake;
327 if (chip->retrigger)
328 chip->irq_retrigger = compat_irq_retrigger;
329#endif
330}
331
332static inline void mask_ack_irq(struct irq_desc *desc) 207static inline void mask_ack_irq(struct irq_desc *desc)
333{ 208{
334 if (desc->irq_data.chip->irq_mask_ack) 209 if (desc->irq_data.chip->irq_mask_ack)
@@ -381,7 +256,6 @@ void handle_nested_irq(unsigned int irq)
381 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 256 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
382 goto out_unlock; 257 goto out_unlock;
383 258
384 irq_compat_set_progress(desc);
385 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 259 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
386 raw_spin_unlock_irq(&desc->lock); 260 raw_spin_unlock_irq(&desc->lock);
387 261
@@ -391,7 +265,6 @@ void handle_nested_irq(unsigned int irq)
391 265
392 raw_spin_lock_irq(&desc->lock); 266 raw_spin_lock_irq(&desc->lock);
393 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 267 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
394 irq_compat_clr_progress(desc);
395 268
396out_unlock: 269out_unlock:
397 raw_spin_unlock_irq(&desc->lock); 270 raw_spin_unlock_irq(&desc->lock);
@@ -514,7 +387,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
514 * then mask it and get out of here: 387 * then mask it and get out of here:
515 */ 388 */
516 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 389 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
517 irq_compat_set_pending(desc);
518 desc->istate |= IRQS_PENDING; 390 desc->istate |= IRQS_PENDING;
519 mask_irq(desc); 391 mask_irq(desc);
520 goto out; 392 goto out;
@@ -543,7 +415,7 @@ out:
543 * @desc: the interrupt description structure for this irq 415 * @desc: the interrupt description structure for this irq
544 * 416 *
545 * Interrupt occures on the falling and/or rising edge of a hardware 417 * Interrupt occures on the falling and/or rising edge of a hardware
546 * signal. The occurence is latched into the irq controller hardware 418 * signal. The occurrence is latched into the irq controller hardware
547 * and must be acked in order to be reenabled. After the ack another 419 * and must be acked in order to be reenabled. After the ack another
548 * interrupt can happen on the same source even before the first one 420 * interrupt can happen on the same source even before the first one
549 * is handled by the associated event handler. If this happens it 421 * is handled by the associated event handler. If this happens it
@@ -567,7 +439,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
567 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 439 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
568 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 440 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
569 if (!irq_check_poll(desc)) { 441 if (!irq_check_poll(desc)) {
570 irq_compat_set_pending(desc);
571 desc->istate |= IRQS_PENDING; 442 desc->istate |= IRQS_PENDING;
572 mask_ack_irq(desc); 443 mask_ack_irq(desc);
573 goto out_unlock; 444 goto out_unlock;
@@ -643,7 +514,7 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
643 } while ((desc->istate & IRQS_PENDING) && 514 } while ((desc->istate & IRQS_PENDING) &&
644 !irqd_irq_disabled(&desc->irq_data)); 515 !irqd_irq_disabled(&desc->irq_data));
645 516
646out_unlock: 517out_eoi:
647 chip->irq_eoi(&desc->irq_data); 518 chip->irq_eoi(&desc->irq_data);
648 raw_spin_unlock(&desc->lock); 519 raw_spin_unlock(&desc->lock);
649} 520}
diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h
deleted file mode 100644
index 6bbaf66aca85..000000000000
--- a/kernel/irq/compat.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Compat layer for transition period
3 */
4#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
5static inline void irq_compat_set_progress(struct irq_desc *desc)
6{
7 desc->status |= IRQ_INPROGRESS;
8}
9
10static inline void irq_compat_clr_progress(struct irq_desc *desc)
11{
12 desc->status &= ~IRQ_INPROGRESS;
13}
14static inline void irq_compat_set_disabled(struct irq_desc *desc)
15{
16 desc->status |= IRQ_DISABLED;
17}
18static inline void irq_compat_clr_disabled(struct irq_desc *desc)
19{
20 desc->status &= ~IRQ_DISABLED;
21}
22static inline void irq_compat_set_pending(struct irq_desc *desc)
23{
24 desc->status |= IRQ_PENDING;
25}
26
27static inline void irq_compat_clr_pending(struct irq_desc *desc)
28{
29 desc->status &= ~IRQ_PENDING;
30}
31static inline void irq_compat_set_masked(struct irq_desc *desc)
32{
33 desc->status |= IRQ_MASKED;
34}
35
36static inline void irq_compat_clr_masked(struct irq_desc *desc)
37{
38 desc->status &= ~IRQ_MASKED;
39}
40static inline void irq_compat_set_move_pending(struct irq_desc *desc)
41{
42 desc->status |= IRQ_MOVE_PENDING;
43}
44
45static inline void irq_compat_clr_move_pending(struct irq_desc *desc)
46{
47 desc->status &= ~IRQ_MOVE_PENDING;
48}
49static inline void irq_compat_set_affinity(struct irq_desc *desc)
50{
51 desc->status |= IRQ_AFFINITY_SET;
52}
53
54static inline void irq_compat_clr_affinity(struct irq_desc *desc)
55{
56 desc->status &= ~IRQ_AFFINITY_SET;
57}
58#else
59static inline void irq_compat_set_progress(struct irq_desc *desc) { }
60static inline void irq_compat_clr_progress(struct irq_desc *desc) { }
61static inline void irq_compat_set_disabled(struct irq_desc *desc) { }
62static inline void irq_compat_clr_disabled(struct irq_desc *desc) { }
63static inline void irq_compat_set_pending(struct irq_desc *desc) { }
64static inline void irq_compat_clr_pending(struct irq_desc *desc) { }
65static inline void irq_compat_set_masked(struct irq_desc *desc) { }
66static inline void irq_compat_clr_masked(struct irq_desc *desc) { }
67static inline void irq_compat_set_move_pending(struct irq_desc *desc) { }
68static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { }
69static inline void irq_compat_set_affinity(struct irq_desc *desc) { }
70static inline void irq_compat_clr_affinity(struct irq_desc *desc) { }
71#endif
72
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index a0bd875ba3d5..306cba37e9a5 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -4,7 +4,7 @@
4 4
5#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6 6
7#define P(f) if (desc->status & f) printk("%14s set\n", #f) 7#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
9/* FIXME */ 9/* FIXME */
10#define PD(f) do { } while (0) 10#define PD(f) do { } while (0)
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
index 20dc5474947e..b5fcd96c7102 100644
--- a/kernel/irq/dummychip.c
+++ b/kernel/irq/dummychip.c
@@ -31,13 +31,6 @@ static unsigned int noop_ret(struct irq_data *data)
31 return 0; 31 return 0;
32} 32}
33 33
34#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
35static void compat_noop(unsigned int irq) { }
36#define END_INIT .end = compat_noop
37#else
38#define END_INIT
39#endif
40
41/* 34/*
42 * Generic no controller implementation 35 * Generic no controller implementation
43 */ 36 */
@@ -48,7 +41,6 @@ struct irq_chip no_irq_chip = {
48 .irq_enable = noop, 41 .irq_enable = noop,
49 .irq_disable = noop, 42 .irq_disable = noop,
50 .irq_ack = ack_bad, 43 .irq_ack = ack_bad,
51 END_INIT
52}; 44};
53 45
54/* 46/*
@@ -64,5 +56,4 @@ struct irq_chip dummy_irq_chip = {
64 .irq_ack = noop, 56 .irq_ack = noop,
65 .irq_mask = noop, 57 .irq_mask = noop,
66 .irq_unmask = noop, 58 .irq_unmask = noop,
67 END_INIT
68}; 59};
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 1a2fb77f2fd6..90cb55f6d7eb 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -175,9 +175,7 @@ irqreturn_t handle_irq_event(struct irq_desc *desc)
175 struct irqaction *action = desc->action; 175 struct irqaction *action = desc->action;
176 irqreturn_t ret; 176 irqreturn_t ret;
177 177
178 irq_compat_clr_pending(desc);
179 desc->istate &= ~IRQS_PENDING; 178 desc->istate &= ~IRQS_PENDING;
180 irq_compat_set_progress(desc);
181 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 179 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
182 raw_spin_unlock(&desc->lock); 180 raw_spin_unlock(&desc->lock);
183 181
@@ -185,6 +183,5 @@ irqreturn_t handle_irq_event(struct irq_desc *desc)
185 183
186 raw_spin_lock(&desc->lock); 184 raw_spin_lock(&desc->lock);
187 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 185 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
188 irq_compat_clr_progress(desc);
189 return ret; 186 return ret;
190} 187}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 6b8b9713e28d..6546431447d7 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -15,10 +15,6 @@
15 15
16#define istate core_internal_state__do_not_mess_with_it 16#define istate core_internal_state__do_not_mess_with_it
17 17
18#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
19# define status status_use_accessors
20#endif
21
22extern int noirqdebug; 18extern int noirqdebug;
23 19
24/* 20/*
@@ -61,15 +57,11 @@ enum {
61 IRQS_SUSPENDED = 0x00000800, 57 IRQS_SUSPENDED = 0x00000800,
62}; 58};
63 59
64#include "compat.h"
65#include "debug.h" 60#include "debug.h"
66#include "settings.h" 61#include "settings.h"
67 62
68#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 63#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
69 64
70/* Set default functions for irq_chip structures: */
71extern void irq_chip_set_defaults(struct irq_chip *chip);
72
73extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 65extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
74 unsigned long flags); 66 unsigned long flags);
75extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 67extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
@@ -156,13 +148,11 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
156static inline void irqd_set_move_pending(struct irq_data *d) 148static inline void irqd_set_move_pending(struct irq_data *d)
157{ 149{
158 d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; 150 d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
159 irq_compat_set_move_pending(irq_data_to_desc(d));
160} 151}
161 152
162static inline void irqd_clr_move_pending(struct irq_data *d) 153static inline void irqd_clr_move_pending(struct irq_data *d)
163{ 154{
164 d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; 155 d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
165 irq_compat_clr_move_pending(irq_data_to_desc(d));
166} 156}
167 157
168static inline void irqd_clear(struct irq_data *d, unsigned int mask) 158static inline void irqd_clear(struct irq_data *d, unsigned int mask)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index acf540768b8f..07c1611f3899 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -132,7 +132,7 @@ irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
132} 132}
133#else 133#else
134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } 134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135static inline bool irq_move_pending(struct irq_desc *data) { return false; } 135static inline bool irq_move_pending(struct irq_data *data) { return false; }
136static inline void 136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void 138static inline void
@@ -166,7 +166,6 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
166 kref_get(&desc->affinity_notify->kref); 166 kref_get(&desc->affinity_notify->kref);
167 schedule_work(&desc->affinity_notify->work); 167 schedule_work(&desc->affinity_notify->work);
168 } 168 }
169 irq_compat_set_affinity(desc);
170 irqd_set(data, IRQD_AFFINITY_SET); 169 irqd_set(data, IRQD_AFFINITY_SET);
171 170
172 return ret; 171 return ret;
@@ -297,10 +296,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
297 if (cpumask_intersects(desc->irq_data.affinity, 296 if (cpumask_intersects(desc->irq_data.affinity,
298 cpu_online_mask)) 297 cpu_online_mask))
299 set = desc->irq_data.affinity; 298 set = desc->irq_data.affinity;
300 else { 299 else
301 irq_compat_clr_affinity(desc);
302 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 300 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
303 }
304 } 301 }
305 302
306 cpumask_and(mask, cpu_online_mask, set); 303 cpumask_and(mask, cpu_online_mask, set);
@@ -587,8 +584,6 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
587 irqd_set(&desc->irq_data, IRQD_LEVEL); 584 irqd_set(&desc->irq_data, IRQD_LEVEL);
588 } 585 }
589 586
590 if (chip != desc->irq_data.chip)
591 irq_chip_set_defaults(desc->irq_data.chip);
592 ret = 0; 587 ret = 0;
593 break; 588 break;
594 default: 589 default:
@@ -785,7 +780,6 @@ static int irq_thread(void *data)
785 * but AFAICT IRQS_PENDING should be fine as it 780 * but AFAICT IRQS_PENDING should be fine as it
786 * retriggers the interrupt itself --- tglx 781 * retriggers the interrupt itself --- tglx
787 */ 782 */
788 irq_compat_set_pending(desc);
789 desc->istate |= IRQS_PENDING; 783 desc->istate |= IRQS_PENDING;
790 raw_spin_unlock_irq(&desc->lock); 784 raw_spin_unlock_irq(&desc->lock);
791 } else { 785 } else {
@@ -981,8 +975,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
981 new->thread_mask = 1 << ffz(thread_mask); 975 new->thread_mask = 1 << ffz(thread_mask);
982 976
983 if (!shared) { 977 if (!shared) {
984 irq_chip_set_defaults(desc->irq_data.chip);
985
986 init_waitqueue_head(&desc->wait_for_threads); 978 init_waitqueue_head(&desc->wait_for_threads);
987 979
988 /* Setup the type (level, edge polarity) if configured: */ 980 /* Setup the type (level, edge polarity) if configured: */
@@ -1059,6 +1051,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1059 register_irq_proc(irq, desc); 1051 register_irq_proc(irq, desc);
1060 new->dir = NULL; 1052 new->dir = NULL;
1061 register_handler_proc(irq, new); 1053 register_handler_proc(irq, new);
1054 free_cpumask_var(mask);
1062 1055
1063 return 0; 1056 return 0;
1064 1057
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index e33d9c8d5089..47420908fba0 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -35,7 +35,7 @@ void irq_move_masked_irq(struct irq_data *idata)
35 * do the disable, re-program, enable sequence. 35 * do the disable, re-program, enable sequence.
36 * This is *not* particularly important for level triggered 36 * This is *not* particularly important for level triggered
37 * but in a edge trigger case, we might be setting rte 37 * but in a edge trigger case, we might be setting rte
38 * when an active trigger is comming in. This could 38 * when an active trigger is coming in. This could
39 * cause some ioapics to mal-function. 39 * cause some ioapics to mal-function.
40 * Being paranoid i guess! 40 * Being paranoid i guess!
41 * 41 *
@@ -53,11 +53,6 @@ void irq_move_masked_irq(struct irq_data *idata)
53 cpumask_clear(desc->pending_mask); 53 cpumask_clear(desc->pending_mask);
54} 54}
55 55
56void move_masked_irq(int irq)
57{
58 irq_move_masked_irq(irq_get_irq_data(irq));
59}
60
61void irq_move_irq(struct irq_data *idata) 56void irq_move_irq(struct irq_data *idata)
62{ 57{
63 bool masked; 58 bool masked;
@@ -80,8 +75,3 @@ void irq_move_irq(struct irq_data *idata)
80 if (!masked) 75 if (!masked)
81 idata->chip->irq_unmask(idata); 76 idata->chip->irq_unmask(idata);
82} 77}
83
84void move_native_irq(int irq)
85{
86 irq_move_irq(irq_get_irq_data(irq));
87}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 626d092eed9a..dd201bd35103 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -364,6 +364,10 @@ int __weak arch_show_interrupts(struct seq_file *p, int prec)
364 return 0; 364 return 0;
365} 365}
366 366
367#ifndef ACTUAL_NR_IRQS
368# define ACTUAL_NR_IRQS nr_irqs
369#endif
370
367int show_interrupts(struct seq_file *p, void *v) 371int show_interrupts(struct seq_file *p, void *v)
368{ 372{
369 static int prec; 373 static int prec;
@@ -373,10 +377,10 @@ int show_interrupts(struct seq_file *p, void *v)
373 struct irqaction *action; 377 struct irqaction *action;
374 struct irq_desc *desc; 378 struct irq_desc *desc;
375 379
376 if (i > nr_irqs) 380 if (i > ACTUAL_NR_IRQS)
377 return 0; 381 return 0;
378 382
379 if (i == nr_irqs) 383 if (i == ACTUAL_NR_IRQS)
380 return arch_show_interrupts(p, prec); 384 return arch_show_interrupts(p, prec);
381 385
382 /* print header and calculate the width of the first column */ 386 /* print header and calculate the width of the first column */
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index ad683a99b1ec..14dd5761e8c9 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -65,7 +65,6 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
65 if (desc->istate & IRQS_REPLAY) 65 if (desc->istate & IRQS_REPLAY)
66 return; 66 return;
67 if (desc->istate & IRQS_PENDING) { 67 if (desc->istate & IRQS_PENDING) {
68 irq_compat_clr_pending(desc);
69 desc->istate &= ~IRQS_PENDING; 68 desc->istate &= ~IRQS_PENDING;
70 desc->istate |= IRQS_REPLAY; 69 desc->istate |= IRQS_REPLAY;
71 70
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 0227ad358272..0d91730b6330 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -15,17 +15,8 @@ enum {
15 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 15 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
16}; 16};
17 17
18#define IRQ_INPROGRESS GOT_YOU_MORON
19#define IRQ_REPLAY GOT_YOU_MORON
20#define IRQ_WAITING GOT_YOU_MORON
21#define IRQ_DISABLED GOT_YOU_MORON
22#define IRQ_PENDING GOT_YOU_MORON
23#define IRQ_MASKED GOT_YOU_MORON
24#define IRQ_WAKEUP GOT_YOU_MORON
25#define IRQ_MOVE_PENDING GOT_YOU_MORON
26#define IRQ_PER_CPU GOT_YOU_MORON 18#define IRQ_PER_CPU GOT_YOU_MORON
27#define IRQ_NO_BALANCING GOT_YOU_MORON 19#define IRQ_NO_BALANCING GOT_YOU_MORON
28#define IRQ_AFFINITY_SET GOT_YOU_MORON
29#define IRQ_LEVEL GOT_YOU_MORON 20#define IRQ_LEVEL GOT_YOU_MORON
30#define IRQ_NOPROBE GOT_YOU_MORON 21#define IRQ_NOPROBE GOT_YOU_MORON
31#define IRQ_NOREQUEST GOT_YOU_MORON 22#define IRQ_NOREQUEST GOT_YOU_MORON
@@ -37,102 +28,98 @@ enum {
37static inline void 28static inline void
38irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) 29irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
39{ 30{
40 desc->status &= ~(clr & _IRQF_MODIFY_MASK); 31 desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK);
41 desc->status |= (set & _IRQF_MODIFY_MASK); 32 desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
42} 33}
43 34
44static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) 35static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
45{ 36{
46 return desc->status & _IRQ_PER_CPU; 37 return desc->status_use_accessors & _IRQ_PER_CPU;
47} 38}
48 39
49static inline void irq_settings_set_per_cpu(struct irq_desc *desc) 40static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
50{ 41{
51 desc->status |= _IRQ_PER_CPU; 42 desc->status_use_accessors |= _IRQ_PER_CPU;
52} 43}
53 44
54static inline void irq_settings_set_no_balancing(struct irq_desc *desc) 45static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
55{ 46{
56 desc->status |= _IRQ_NO_BALANCING; 47 desc->status_use_accessors |= _IRQ_NO_BALANCING;
57} 48}
58 49
59static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) 50static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
60{ 51{
61 return desc->status & _IRQ_NO_BALANCING; 52 return desc->status_use_accessors & _IRQ_NO_BALANCING;
62} 53}
63 54
64static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) 55static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
65{ 56{
66 return desc->status & IRQ_TYPE_SENSE_MASK; 57 return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
67} 58}
68 59
69static inline void 60static inline void
70irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) 61irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
71{ 62{
72 desc->status &= ~IRQ_TYPE_SENSE_MASK; 63 desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK;
73 desc->status |= mask & IRQ_TYPE_SENSE_MASK; 64 desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK;
74} 65}
75 66
76static inline bool irq_settings_is_level(struct irq_desc *desc) 67static inline bool irq_settings_is_level(struct irq_desc *desc)
77{ 68{
78 return desc->status & _IRQ_LEVEL; 69 return desc->status_use_accessors & _IRQ_LEVEL;
79} 70}
80 71
81static inline void irq_settings_clr_level(struct irq_desc *desc) 72static inline void irq_settings_clr_level(struct irq_desc *desc)
82{ 73{
83 desc->status &= ~_IRQ_LEVEL; 74 desc->status_use_accessors &= ~_IRQ_LEVEL;
84} 75}
85 76
86static inline void irq_settings_set_level(struct irq_desc *desc) 77static inline void irq_settings_set_level(struct irq_desc *desc)
87{ 78{
88 desc->status |= _IRQ_LEVEL; 79 desc->status_use_accessors |= _IRQ_LEVEL;
89} 80}
90 81
91static inline bool irq_settings_can_request(struct irq_desc *desc) 82static inline bool irq_settings_can_request(struct irq_desc *desc)
92{ 83{
93 return !(desc->status & _IRQ_NOREQUEST); 84 return !(desc->status_use_accessors & _IRQ_NOREQUEST);
94} 85}
95 86
96static inline void irq_settings_clr_norequest(struct irq_desc *desc) 87static inline void irq_settings_clr_norequest(struct irq_desc *desc)
97{ 88{
98 desc->status &= ~_IRQ_NOREQUEST; 89 desc->status_use_accessors &= ~_IRQ_NOREQUEST;
99} 90}
100 91
101static inline void irq_settings_set_norequest(struct irq_desc *desc) 92static inline void irq_settings_set_norequest(struct irq_desc *desc)
102{ 93{
103 desc->status |= _IRQ_NOREQUEST; 94 desc->status_use_accessors |= _IRQ_NOREQUEST;
104} 95}
105 96
106static inline bool irq_settings_can_probe(struct irq_desc *desc) 97static inline bool irq_settings_can_probe(struct irq_desc *desc)
107{ 98{
108 return !(desc->status & _IRQ_NOPROBE); 99 return !(desc->status_use_accessors & _IRQ_NOPROBE);
109} 100}
110 101
111static inline void irq_settings_clr_noprobe(struct irq_desc *desc) 102static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
112{ 103{
113 desc->status &= ~_IRQ_NOPROBE; 104 desc->status_use_accessors &= ~_IRQ_NOPROBE;
114} 105}
115 106
116static inline void irq_settings_set_noprobe(struct irq_desc *desc) 107static inline void irq_settings_set_noprobe(struct irq_desc *desc)
117{ 108{
118 desc->status |= _IRQ_NOPROBE; 109 desc->status_use_accessors |= _IRQ_NOPROBE;
119} 110}
120 111
121static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) 112static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
122{ 113{
123 return desc->status & _IRQ_MOVE_PCNTXT; 114 return desc->status_use_accessors & _IRQ_MOVE_PCNTXT;
124} 115}
125 116
126static inline bool irq_settings_can_autoenable(struct irq_desc *desc) 117static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
127{ 118{
128 return !(desc->status & _IRQ_NOAUTOEN); 119 return !(desc->status_use_accessors & _IRQ_NOAUTOEN);
129} 120}
130 121
131static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) 122static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
132{ 123{
133 return desc->status & _IRQ_NESTED_THREAD; 124 return desc->status_use_accessors & _IRQ_NESTED_THREAD;
134} 125}
135
136/* Nothing should touch desc->status from now on */
137#undef status
138#define status USE_THE_PROPER_WRAPPERS_YOU_MORON
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 83f4799f46be..dfbd550401b2 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -93,7 +93,6 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
93 * Already running: If it is shared get the other 93 * Already running: If it is shared get the other
94 * CPU to go looking for our mystery interrupt too 94 * CPU to go looking for our mystery interrupt too
95 */ 95 */
96 irq_compat_set_pending(desc);
97 desc->istate |= IRQS_PENDING; 96 desc->istate |= IRQS_PENDING;
98 goto out; 97 goto out;
99 } 98 }
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ec19b92c7ebd..55936f9cb251 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -144,7 +144,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
144 /* Initialize the list of destination pages */ 144 /* Initialize the list of destination pages */
145 INIT_LIST_HEAD(&image->dest_pages); 145 INIT_LIST_HEAD(&image->dest_pages);
146 146
147 /* Initialize the list of unuseable pages */ 147 /* Initialize the list of unusable pages */
148 INIT_LIST_HEAD(&image->unuseable_pages); 148 INIT_LIST_HEAD(&image->unuseable_pages);
149 149
150 /* Read in the segments */ 150 /* Read in the segments */
@@ -454,7 +454,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
454 /* Deal with the destination pages I have inadvertently allocated. 454 /* Deal with the destination pages I have inadvertently allocated.
455 * 455 *
456 * Ideally I would convert multi-page allocations into single 456 * Ideally I would convert multi-page allocations into single
457 * page allocations, and add everyting to image->dest_pages. 457 * page allocations, and add everything to image->dest_pages.
458 * 458 *
459 * For now it is simpler to just free the pages. 459 * For now it is simpler to just free the pages.
460 */ 460 */
@@ -602,7 +602,7 @@ static void kimage_free_extra_pages(struct kimage *image)
602 /* Walk through and free any extra destination pages I may have */ 602 /* Walk through and free any extra destination pages I may have */
603 kimage_free_page_list(&image->dest_pages); 603 kimage_free_page_list(&image->dest_pages);
604 604
605 /* Walk through and free any unuseable pages I have cached */ 605 /* Walk through and free any unusable pages I have cached */
606 kimage_free_page_list(&image->unuseable_pages); 606 kimage_free_page_list(&image->unuseable_pages);
607 607
608} 608}
@@ -1099,7 +1099,8 @@ size_t crash_get_memory_size(void)
1099 return size; 1099 return size;
1100} 1100}
1101 1101
1102static void free_reserved_phys_range(unsigned long begin, unsigned long end) 1102void __weak crash_free_reserved_phys_range(unsigned long begin,
1103 unsigned long end)
1103{ 1104{
1104 unsigned long addr; 1105 unsigned long addr;
1105 1106
@@ -1135,7 +1136,7 @@ int crash_shrink_memory(unsigned long new_size)
1135 start = roundup(start, PAGE_SIZE); 1136 start = roundup(start, PAGE_SIZE);
1136 end = roundup(start + new_size, PAGE_SIZE); 1137 end = roundup(start + new_size, PAGE_SIZE);
1137 1138
1138 free_reserved_phys_range(end, crashk_res.end); 1139 crash_free_reserved_phys_range(end, crashk_res.end);
1139 1140
1140 if ((start == end) && (crashk_res.parent != NULL)) 1141 if ((start == end) && (crashk_res.parent != NULL))
1141 release_resource(&crashk_res); 1142 release_resource(&crashk_res);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 684ab3f7dd72..3b34d2732bce 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -139,7 +139,7 @@ static void create_kthread(struct kthread_create_info *create)
139 * in @node, to get NUMA affinity for kthread stack, or else give -1. 139 * in @node, to get NUMA affinity for kthread stack, or else give -1.
140 * When woken, the thread will run @threadfn() with @data as its 140 * When woken, the thread will run @threadfn() with @data as its
141 * argument. @threadfn() can either call do_exit() directly if it is a 141 * argument. @threadfn() can either call do_exit() directly if it is a
142 * standalone thread for which noone will call kthread_stop(), or 142 * standalone thread for which no one will call kthread_stop(), or
143 * return when 'kthread_should_stop()' is true (which means 143 * return when 'kthread_should_stop()' is true (which means
144 * kthread_stop() has been called). The return value should be zero 144 * kthread_stop() has been called). The return value should be zero
145 * or a negative error number; it will be passed to kthread_stop(). 145 * or a negative error number; it will be passed to kthread_stop().
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index ee74b35e528d..376066e10413 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -153,7 +153,7 @@ static inline void store_stacktrace(struct task_struct *tsk,
153} 153}
154 154
155/** 155/**
156 * __account_scheduler_latency - record an occured latency 156 * __account_scheduler_latency - record an occurred latency
157 * @tsk - the task struct of the task hitting the latency 157 * @tsk - the task struct of the task hitting the latency
158 * @usecs - the duration of the latency in microseconds 158 * @usecs - the duration of the latency in microseconds
159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible 159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 0d2058da80f5..53a68956f131 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2309,7 +2309,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2309 if (unlikely(curr->hardirqs_enabled)) { 2309 if (unlikely(curr->hardirqs_enabled)) {
2310 /* 2310 /*
2311 * Neither irq nor preemption are disabled here 2311 * Neither irq nor preemption are disabled here
2312 * so this is racy by nature but loosing one hit 2312 * so this is racy by nature but losing one hit
2313 * in a stat is not a big deal. 2313 * in a stat is not a big deal.
2314 */ 2314 */
2315 __debug_atomic_inc(redundant_hardirqs_on); 2315 __debug_atomic_inc(redundant_hardirqs_on);
@@ -2620,7 +2620,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2620 if (!graph_lock()) 2620 if (!graph_lock())
2621 return 0; 2621 return 0;
2622 /* 2622 /*
2623 * Make sure we didnt race: 2623 * Make sure we didn't race:
2624 */ 2624 */
2625 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 2625 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2626 graph_unlock(); 2626 graph_unlock();
diff --git a/kernel/module.c b/kernel/module.c
index 1f9f7bc56ca1..d5938a5c19c4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -809,7 +809,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
809 wait_for_zero_refcount(mod); 809 wait_for_zero_refcount(mod);
810 810
811 mutex_unlock(&module_mutex); 811 mutex_unlock(&module_mutex);
812 /* Final destruction now noone is using it. */ 812 /* Final destruction now no one is using it. */
813 if (mod->exit != NULL) 813 if (mod->exit != NULL)
814 mod->exit(); 814 mod->exit();
815 blocking_notifier_call_chain(&module_notify_list, 815 blocking_notifier_call_chain(&module_notify_list,
@@ -2777,7 +2777,7 @@ static struct module *load_module(void __user *umod,
2777 mod->state = MODULE_STATE_COMING; 2777 mod->state = MODULE_STATE_COMING;
2778 2778
2779 /* Now sew it into the lists so we can get lockdep and oops 2779 /* Now sew it into the lists so we can get lockdep and oops
2780 * info during argument parsing. Noone should access us, since 2780 * info during argument parsing. No one should access us, since
2781 * strong_try_module_get() will fail. 2781 * strong_try_module_get() will fail.
2782 * lockdep/oops can run asynchronous, so use the RCU list insertion 2782 * lockdep/oops can run asynchronous, so use the RCU list insertion
2783 * function to insert in a way safe to concurrent readers. 2783 * function to insert in a way safe to concurrent readers.
@@ -2971,7 +2971,7 @@ static const char *get_ksymbol(struct module *mod,
2971 else 2971 else
2972 nextval = (unsigned long)mod->module_core+mod->core_text_size; 2972 nextval = (unsigned long)mod->module_core+mod->core_text_size;
2973 2973
2974 /* Scan for closest preceeding symbol, and next symbol. (ELF 2974 /* Scan for closest preceding symbol, and next symbol. (ELF
2975 starts real symbols at 1). */ 2975 starts real symbols at 1). */
2976 for (i = 1; i < mod->num_symtab; i++) { 2976 for (i = 1; i < mod->num_symtab; i++) {
2977 if (mod->symtab[i].st_shndx == SHN_UNDEF) 2977 if (mod->symtab[i].st_shndx == SHN_UNDEF)
diff --git a/kernel/mutex.c b/kernel/mutex.c
index a5889fb28ecf..c4195fa98900 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -245,7 +245,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
245 } 245 }
246 __set_task_state(task, state); 246 __set_task_state(task, state);
247 247
248 /* didnt get the lock, go to sleep: */ 248 /* didn't get the lock, go to sleep: */
249 spin_unlock_mutex(&lock->wait_lock, flags); 249 spin_unlock_mutex(&lock->wait_lock, flags);
250 preempt_enable_no_resched(); 250 preempt_enable_no_resched();
251 schedule(); 251 schedule();
diff --git a/kernel/padata.c b/kernel/padata.c
index 751019415d23..b91941df5e63 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -262,7 +262,7 @@ static void padata_reorder(struct parallel_data *pd)
262 /* 262 /*
263 * This cpu has to do the parallel processing of the next 263 * This cpu has to do the parallel processing of the next
264 * object. It's waiting in the cpu's parallelization queue, 264 * object. It's waiting in the cpu's parallelization queue,
265 * so exit imediately. 265 * so exit immediately.
266 */ 266 */
267 if (PTR_ERR(padata) == -ENODATA) { 267 if (PTR_ERR(padata) == -ENODATA) {
268 del_timer(&pd->timer); 268 del_timer(&pd->timer);
@@ -284,7 +284,7 @@ static void padata_reorder(struct parallel_data *pd)
284 /* 284 /*
285 * The next object that needs serialization might have arrived to 285 * The next object that needs serialization might have arrived to
286 * the reorder queues in the meantime, we will be called again 286 * the reorder queues in the meantime, we will be called again
287 * from the timer function if noone else cares for it. 287 * from the timer function if no one else cares for it.
288 */ 288 */
289 if (atomic_read(&pd->reorder_objects) 289 if (atomic_read(&pd->reorder_objects)
290 && !(pinst->flags & PADATA_RESET)) 290 && !(pinst->flags & PADATA_RESET))
@@ -515,7 +515,7 @@ static void __padata_stop(struct padata_instance *pinst)
515 put_online_cpus(); 515 put_online_cpus();
516} 516}
517 517
518/* Replace the internal control stucture with a new one. */ 518/* Replace the internal control structure with a new one. */
519static void padata_replace(struct padata_instance *pinst, 519static void padata_replace(struct padata_instance *pinst,
520 struct parallel_data *pd_new) 520 struct parallel_data *pd_new)
521{ 521{
@@ -768,7 +768,7 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
768} 768}
769 769
770 /** 770 /**
771 * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) 771 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
772 * padata cpumasks. 772 * padata cpumasks.
773 * 773 *
774 * @pinst: padata instance 774 * @pinst: padata instance
diff --git a/kernel/params.c b/kernel/params.c
index 0da1411222b9..7ab388a48a2e 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -95,7 +95,7 @@ static int parse_one(char *param,
95 /* Find parameter */ 95 /* Find parameter */
96 for (i = 0; i < num_params; i++) { 96 for (i = 0; i < num_params; i++) {
97 if (parameq(param, params[i].name)) { 97 if (parameq(param, params[i].name)) {
98 /* Noone handled NULL, so do it here. */ 98 /* No one handled NULL, so do it here. */
99 if (!val && params[i].ops->set != param_set_bool) 99 if (!val && params[i].ops->set != param_set_bool)
100 return -EINVAL; 100 return -EINVAL;
101 DEBUGP("They are equal! Calling %p\n", 101 DEBUGP("They are equal! Calling %p\n",
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index c75925c4d1e2..8e81a9860a0d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -145,8 +145,8 @@ static struct srcu_struct pmus_srcu;
145 */ 145 */
146int sysctl_perf_event_paranoid __read_mostly = 1; 146int sysctl_perf_event_paranoid __read_mostly = 1;
147 147
148/* Minimum for 128 pages + 1 for the user control page */ 148/* Minimum for 512 kiB + 1 user control page */
149int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ 149int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
150 150
151/* 151/*
152 * max perf event sample rate 152 * max perf event sample rate
@@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
364 } 364 }
365 365
366 if (mode & PERF_CGROUP_SWIN) { 366 if (mode & PERF_CGROUP_SWIN) {
367 WARN_ON_ONCE(cpuctx->cgrp);
367 /* set cgrp before ctxsw in to 368 /* set cgrp before ctxsw in to
368 * allow event_filter_match() to not 369 * allow event_filter_match() to not
369 * have to pass task around 370 * have to pass task around
@@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2423 if (!ctx || !ctx->nr_events) 2424 if (!ctx || !ctx->nr_events)
2424 goto out; 2425 goto out;
2425 2426
2427 /*
2428 * We must ctxsw out cgroup events to avoid conflict
2429 * when invoking perf_task_event_sched_in() later on
2430 * in this function. Otherwise we end up trying to
2431 * ctxswin cgroup events which are already scheduled
2432 * in.
2433 */
2434 perf_cgroup_sched_out(current);
2426 task_ctx_sched_out(ctx, EVENT_ALL); 2435 task_ctx_sched_out(ctx, EVENT_ALL);
2427 2436
2428 raw_spin_lock(&ctx->lock); 2437 raw_spin_lock(&ctx->lock);
@@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2447 2456
2448 raw_spin_unlock(&ctx->lock); 2457 raw_spin_unlock(&ctx->lock);
2449 2458
2459 /*
2460 * Also calls ctxswin for cgroup events, if any:
2461 */
2450 perf_event_context_sched_in(ctx, ctx->task); 2462 perf_event_context_sched_in(ctx, ctx->task);
2451out: 2463out:
2452 local_irq_restore(flags); 2464 local_irq_restore(flags);
@@ -6531,6 +6543,11 @@ SYSCALL_DEFINE5(perf_event_open,
6531 goto err_alloc; 6543 goto err_alloc;
6532 } 6544 }
6533 6545
6546 if (task) {
6547 put_task_struct(task);
6548 task = NULL;
6549 }
6550
6534 /* 6551 /*
6535 * Look up the group leader (we will attach this event to it): 6552 * Look up the group leader (we will attach this event to it):
6536 */ 6553 */
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 67fea9d25d55..0791b13df7bf 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1347,7 +1347,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1347 1347
1348 /* 1348 /*
1349 * Now that all the timers on our list have the firing flag, 1349 * Now that all the timers on our list have the firing flag,
1350 * noone will touch their list entries but us. We'll take 1350 * no one will touch their list entries but us. We'll take
1351 * each timer's lock before clearing its firing flag, so no 1351 * each timer's lock before clearing its firing flag, so no
1352 * timer call will interfere. 1352 * timer call will interfere.
1353 */ 1353 */
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 4c0124919f9a..e5498d7405c3 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -313,7 +313,7 @@ static void schedule_next_timer(struct k_itimer *timr)
313 * restarted (i.e. we have flagged this in the sys_private entry of the 313 * restarted (i.e. we have flagged this in the sys_private entry of the
314 * info block). 314 * info block).
315 * 315 *
316 * To protect aginst the timer going away while the interrupt is queued, 316 * To protect against the timer going away while the interrupt is queued,
317 * we require that the it_requeue_pending flag be set. 317 * we require that the it_requeue_pending flag be set.
318 */ 318 */
319void do_schedule_next_timer(struct siginfo *info) 319void do_schedule_next_timer(struct siginfo *info)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 8eaba5f27b10..de9aef8742f4 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -224,7 +224,7 @@ power_attr(state);
224 * writing to 'state'. It first should read from 'wakeup_count' and store 224 * writing to 'state'. It first should read from 'wakeup_count' and store
225 * the read value. Then, after carrying out its own preparations for the system 225 * the read value. Then, after carrying out its own preparations for the system
226 * transition to a sleep state, it should write the stored value to 226 * transition to a sleep state, it should write the stored value to
227 * 'wakeup_count'. If that fails, at least one wakeup event has occured since 227 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
228 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 228 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
229 * is allowed to write to 'state', but the transition will be aborted if there 229 * is allowed to write to 'state', but the transition will be aborted if there
230 * are any wakeup events detected after 'wakeup_count' was written to. 230 * are any wakeup events detected after 'wakeup_count' was written to.
diff --git a/kernel/sched.c b/kernel/sched.c
index f592ce6f8616..48013633d792 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2309,7 +2309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2309 * Cause a process which is running on another CPU to enter 2309 * Cause a process which is running on another CPU to enter
2310 * kernel-mode, without any delay. (to get signals handled.) 2310 * kernel-mode, without any delay. (to get signals handled.)
2311 * 2311 *
2312 * NOTE: this function doesnt have to take the runqueue lock, 2312 * NOTE: this function doesn't have to take the runqueue lock,
2313 * because all it wants to ensure is that the remote task enters 2313 * because all it wants to ensure is that the remote task enters
2314 * the kernel. If the IPI races and the task has been migrated 2314 * the kernel. If the IPI races and the task has been migrated
2315 * to another CPU then no harm is done and the purpose has been 2315 * to another CPU then no harm is done and the purpose has been
@@ -4997,7 +4997,7 @@ recheck:
4997 */ 4997 */
4998 raw_spin_lock_irqsave(&p->pi_lock, flags); 4998 raw_spin_lock_irqsave(&p->pi_lock, flags);
4999 /* 4999 /*
5000 * To be able to change p->policy safely, the apropriate 5000 * To be able to change p->policy safely, the appropriate
5001 * runqueue lock must be held. 5001 * runqueue lock must be held.
5002 */ 5002 */
5003 rq = __task_rq_lock(p); 5003 rq = __task_rq_lock(p);
@@ -5011,6 +5011,17 @@ recheck:
5011 return -EINVAL; 5011 return -EINVAL;
5012 } 5012 }
5013 5013
5014 /*
5015 * If not changing anything there's no need to proceed further:
5016 */
5017 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5018 param->sched_priority == p->rt_priority))) {
5019
5020 __task_rq_unlock(rq);
5021 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5022 return 0;
5023 }
5024
5014#ifdef CONFIG_RT_GROUP_SCHED 5025#ifdef CONFIG_RT_GROUP_SCHED
5015 if (user) { 5026 if (user) {
5016 /* 5027 /*
@@ -5705,7 +5716,7 @@ void show_state_filter(unsigned long state_filter)
5705 do_each_thread(g, p) { 5716 do_each_thread(g, p) {
5706 /* 5717 /*
5707 * reset the NMI-timeout, listing all files on a slow 5718 * reset the NMI-timeout, listing all files on a slow
5708 * console might take alot of time: 5719 * console might take a lot of time:
5709 */ 5720 */
5710 touch_nmi_watchdog(); 5721 touch_nmi_watchdog();
5711 if (!state_filter || (p->state & state_filter)) 5722 if (!state_filter || (p->state & state_filter))
@@ -6320,6 +6331,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6320 break; 6331 break;
6321#endif 6332#endif
6322 } 6333 }
6334
6335 update_max_interval();
6336
6323 return NOTIFY_OK; 6337 return NOTIFY_OK;
6324} 6338}
6325 6339
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 5946ac515602..429242f3c484 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -179,7 +179,7 @@ void sched_autogroup_create_attach(struct task_struct *p)
179 struct autogroup *ag = autogroup_create(); 179 struct autogroup *ag = autogroup_create();
180 180
181 autogroup_move_group(p, ag); 181 autogroup_move_group(p, ag);
182 /* drop extra refrence added by autogroup_create() */ 182 /* drop extra reference added by autogroup_create() */
183 autogroup_kref_put(ag); 183 autogroup_kref_put(ag);
184} 184}
185EXPORT_SYMBOL(sched_autogroup_create_attach); 185EXPORT_SYMBOL(sched_autogroup_create_attach);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3f7ec9e27ee1..7f00772e57c9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/latencytop.h> 23#include <linux/latencytop.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/cpumask.h>
25 26
26/* 27/*
27 * Targeted preemption latency for CPU-bound tasks: 28 * Targeted preemption latency for CPU-bound tasks:
@@ -3061,7 +3062,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3061 3062
3062 /* 3063 /*
3063 * if *imbalance is less than the average load per runnable task 3064 * if *imbalance is less than the average load per runnable task
3064 * there is no gaurantee that any tasks will be moved so we'll have 3065 * there is no guarantee that any tasks will be moved so we'll have
3065 * a think about bumping its value to force at least one task to be 3066 * a think about bumping its value to force at least one task to be
3066 * moved 3067 * moved
3067 */ 3068 */
@@ -3819,6 +3820,17 @@ void select_nohz_load_balancer(int stop_tick)
3819 3820
3820static DEFINE_SPINLOCK(balancing); 3821static DEFINE_SPINLOCK(balancing);
3821 3822
3823static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3824
3825/*
3826 * Scale the max load_balance interval with the number of CPUs in the system.
3827 * This trades load-balance latency on larger machines for less cross talk.
3828 */
3829static void update_max_interval(void)
3830{
3831 max_load_balance_interval = HZ*num_online_cpus()/10;
3832}
3833
3822/* 3834/*
3823 * It checks each scheduling domain to see if it is due to be balanced, 3835 * It checks each scheduling domain to see if it is due to be balanced,
3824 * and initiates a balancing operation if so. 3836 * and initiates a balancing operation if so.
@@ -3848,10 +3860,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3848 3860
3849 /* scale ms to jiffies */ 3861 /* scale ms to jiffies */
3850 interval = msecs_to_jiffies(interval); 3862 interval = msecs_to_jiffies(interval);
3851 if (unlikely(!interval)) 3863 interval = clamp(interval, 1UL, max_load_balance_interval);
3852 interval = 1;
3853 if (interval > HZ*NR_CPUS/10)
3854 interval = HZ*NR_CPUS/10;
3855 3864
3856 need_serialize = sd->flags & SD_SERIALIZE; 3865 need_serialize = sd->flags & SD_SERIALIZE;
3857 3866
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index db308cb08b75..e7cebdc65f82 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1378,7 +1378,7 @@ retry:
1378 task = pick_next_pushable_task(rq); 1378 task = pick_next_pushable_task(rq);
1379 if (task_cpu(next_task) == rq->cpu && task == next_task) { 1379 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1380 /* 1380 /*
1381 * If we get here, the task hasnt moved at all, but 1381 * If we get here, the task hasn't moved at all, but
1382 * it has failed to push. We will not try again, 1382 * it has failed to push. We will not try again,
1383 * since the other cpus will pull from us when they 1383 * since the other cpus will pull from us when they
1384 * are ready. 1384 * are ready.
@@ -1488,7 +1488,7 @@ static int pull_rt_task(struct rq *this_rq)
1488 /* 1488 /*
1489 * We continue with the search, just in 1489 * We continue with the search, just in
1490 * case there's an even higher prio task 1490 * case there's an even higher prio task
1491 * in another runqueue. (low likelyhood 1491 * in another runqueue. (low likelihood
1492 * but possible) 1492 * but possible)
1493 */ 1493 */
1494 } 1494 }
diff --git a/kernel/signal.c b/kernel/signal.c
index 1186cf7fac77..29e233fd7a0f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -226,7 +226,7 @@ static inline void print_dropped_signal(int sig)
226/* 226/*
227 * allocate a new signal queue record 227 * allocate a new signal queue record
228 * - this may be called without locks if and only if t == current, otherwise an 228 * - this may be called without locks if and only if t == current, otherwise an
229 * appopriate lock must be held to stop the target task from exiting 229 * appropriate lock must be held to stop the target task from exiting
230 */ 230 */
231static struct sigqueue * 231static struct sigqueue *
232__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 232__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
@@ -375,15 +375,15 @@ int unhandled_signal(struct task_struct *tsk, int sig)
375 return !tracehook_consider_fatal_signal(tsk, sig); 375 return !tracehook_consider_fatal_signal(tsk, sig);
376} 376}
377 377
378 378/*
379/* Notify the system that a driver wants to block all signals for this 379 * Notify the system that a driver wants to block all signals for this
380 * process, and wants to be notified if any signals at all were to be 380 * process, and wants to be notified if any signals at all were to be
381 * sent/acted upon. If the notifier routine returns non-zero, then the 381 * sent/acted upon. If the notifier routine returns non-zero, then the
382 * signal will be acted upon after all. If the notifier routine returns 0, 382 * signal will be acted upon after all. If the notifier routine returns 0,
383 * then then signal will be blocked. Only one block per process is 383 * then then signal will be blocked. Only one block per process is
384 * allowed. priv is a pointer to private data that the notifier routine 384 * allowed. priv is a pointer to private data that the notifier routine
385 * can use to determine if the signal should be blocked or not. */ 385 * can use to determine if the signal should be blocked or not.
386 386 */
387void 387void
388block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 388block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
389{ 389{
@@ -434,9 +434,10 @@ still_pending:
434 copy_siginfo(info, &first->info); 434 copy_siginfo(info, &first->info);
435 __sigqueue_free(first); 435 __sigqueue_free(first);
436 } else { 436 } else {
437 /* Ok, it wasn't in the queue. This must be 437 /*
438 a fast-pathed signal or we must have been 438 * Ok, it wasn't in the queue. This must be
439 out of queue space. So zero out the info. 439 * a fast-pathed signal or we must have been
440 * out of queue space. So zero out the info.
440 */ 441 */
441 info->si_signo = sig; 442 info->si_signo = sig;
442 info->si_errno = 0; 443 info->si_errno = 0;
@@ -468,7 +469,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
468} 469}
469 470
470/* 471/*
471 * Dequeue a signal and return the element to the caller, which is 472 * Dequeue a signal and return the element to the caller, which is
472 * expected to free it. 473 * expected to free it.
473 * 474 *
474 * All callers have to hold the siglock. 475 * All callers have to hold the siglock.
@@ -490,7 +491,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
490 * itimers are process shared and we restart periodic 491 * itimers are process shared and we restart periodic
491 * itimers in the signal delivery path to prevent DoS 492 * itimers in the signal delivery path to prevent DoS
492 * attacks in the high resolution timer case. This is 493 * attacks in the high resolution timer case. This is
493 * compliant with the old way of self restarting 494 * compliant with the old way of self-restarting
494 * itimers, as the SIGALRM is a legacy signal and only 495 * itimers, as the SIGALRM is a legacy signal and only
495 * queued once. Changing the restart behaviour to 496 * queued once. Changing the restart behaviour to
496 * restart the timer in the signal dequeue path is 497 * restart the timer in the signal dequeue path is
@@ -923,14 +924,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
923 if (info == SEND_SIG_FORCED) 924 if (info == SEND_SIG_FORCED)
924 goto out_set; 925 goto out_set;
925 926
926 /* Real-time signals must be queued if sent by sigqueue, or 927 /*
927 some other real-time mechanism. It is implementation 928 * Real-time signals must be queued if sent by sigqueue, or
928 defined whether kill() does so. We attempt to do so, on 929 * some other real-time mechanism. It is implementation
929 the principle of least surprise, but since kill is not 930 * defined whether kill() does so. We attempt to do so, on
930 allowed to fail with EAGAIN when low on memory we just 931 * the principle of least surprise, but since kill is not
931 make sure at least one signal gets delivered and don't 932 * allowed to fail with EAGAIN when low on memory we just
932 pass on the info struct. */ 933 * make sure at least one signal gets delivered and don't
933 934 * pass on the info struct.
935 */
934 if (sig < SIGRTMIN) 936 if (sig < SIGRTMIN)
935 override_rlimit = (is_si_special(info) || info->si_code >= 0); 937 override_rlimit = (is_si_special(info) || info->si_code >= 0);
936 else 938 else
@@ -1201,8 +1203,7 @@ retry:
1201 return error; 1203 return error;
1202} 1204}
1203 1205
1204int 1206int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1205kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1206{ 1207{
1207 int error; 1208 int error;
1208 rcu_read_lock(); 1209 rcu_read_lock();
@@ -1299,8 +1300,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1299 * These are for backward compatibility with the rest of the kernel source. 1300 * These are for backward compatibility with the rest of the kernel source.
1300 */ 1301 */
1301 1302
1302int 1303int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1303send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1304{ 1304{
1305 /* 1305 /*
1306 * Make sure legacy kernel users don't send in bad values 1306 * Make sure legacy kernel users don't send in bad values
@@ -1368,7 +1368,7 @@ EXPORT_SYMBOL(kill_pid);
1368 * These functions support sending signals using preallocated sigqueue 1368 * These functions support sending signals using preallocated sigqueue
1369 * structures. This is needed "because realtime applications cannot 1369 * structures. This is needed "because realtime applications cannot
1370 * afford to lose notifications of asynchronous events, like timer 1370 * afford to lose notifications of asynchronous events, like timer
1371 * expirations or I/O completions". In the case of Posix Timers 1371 * expirations or I/O completions". In the case of POSIX Timers
1372 * we allocate the sigqueue structure from the timer_create. If this 1372 * we allocate the sigqueue structure from the timer_create. If this
1373 * allocation fails we are able to report the failure to the application 1373 * allocation fails we are able to report the failure to the application
1374 * with an EAGAIN error. 1374 * with an EAGAIN error.
@@ -1553,7 +1553,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1553 info.si_signo = SIGCHLD; 1553 info.si_signo = SIGCHLD;
1554 info.si_errno = 0; 1554 info.si_errno = 0;
1555 /* 1555 /*
1556 * see comment in do_notify_parent() abot the following 3 lines 1556 * see comment in do_notify_parent() about the following 4 lines
1557 */ 1557 */
1558 rcu_read_lock(); 1558 rcu_read_lock();
1559 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); 1559 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
@@ -1611,7 +1611,7 @@ static inline int may_ptrace_stop(void)
1611} 1611}
1612 1612
1613/* 1613/*
1614 * Return nonzero if there is a SIGKILL that should be waking us up. 1614 * Return non-zero if there is a SIGKILL that should be waking us up.
1615 * Called with the siglock held. 1615 * Called with the siglock held.
1616 */ 1616 */
1617static int sigkill_pending(struct task_struct *tsk) 1617static int sigkill_pending(struct task_struct *tsk)
@@ -1735,7 +1735,7 @@ void ptrace_notify(int exit_code)
1735/* 1735/*
1736 * This performs the stopping for SIGSTOP and other stop signals. 1736 * This performs the stopping for SIGSTOP and other stop signals.
1737 * We have to stop all threads in the thread group. 1737 * We have to stop all threads in the thread group.
1738 * Returns nonzero if we've actually stopped and released the siglock. 1738 * Returns non-zero if we've actually stopped and released the siglock.
1739 * Returns zero if we didn't stop and still hold the siglock. 1739 * Returns zero if we didn't stop and still hold the siglock.
1740 */ 1740 */
1741static int do_signal_stop(int signr) 1741static int do_signal_stop(int signr)
@@ -1823,10 +1823,12 @@ static int ptrace_signal(int signr, siginfo_t *info,
1823 1823
1824 current->exit_code = 0; 1824 current->exit_code = 0;
1825 1825
1826 /* Update the siginfo structure if the signal has 1826 /*
1827 changed. If the debugger wanted something 1827 * Update the siginfo structure if the signal has
1828 specific in the siginfo structure then it should 1828 * changed. If the debugger wanted something
1829 have updated *info via PTRACE_SETSIGINFO. */ 1829 * specific in the siginfo structure then it should
1830 * have updated *info via PTRACE_SETSIGINFO.
1831 */
1830 if (signr != info->si_signo) { 1832 if (signr != info->si_signo) {
1831 info->si_signo = signr; 1833 info->si_signo = signr;
1832 info->si_errno = 0; 1834 info->si_errno = 0;
@@ -1885,7 +1887,7 @@ relock:
1885 for (;;) { 1887 for (;;) {
1886 struct k_sigaction *ka; 1888 struct k_sigaction *ka;
1887 /* 1889 /*
1888 * Tracing can induce an artifical signal and choose sigaction. 1890 * Tracing can induce an artificial signal and choose sigaction.
1889 * The return value in @signr determines the default action, 1891 * The return value in @signr determines the default action,
1890 * but @info->si_signo is the signal number we will report. 1892 * but @info->si_signo is the signal number we will report.
1891 */ 1893 */
@@ -2034,7 +2036,8 @@ void exit_signals(struct task_struct *tsk)
2034 if (!signal_pending(tsk)) 2036 if (!signal_pending(tsk))
2035 goto out; 2037 goto out;
2036 2038
2037 /* It could be that __group_complete_signal() choose us to 2039 /*
2040 * It could be that __group_complete_signal() choose us to
2038 * notify about group-wide signal. Another thread should be 2041 * notify about group-wide signal. Another thread should be
2039 * woken now to take the signal since we will not. 2042 * woken now to take the signal since we will not.
2040 */ 2043 */
@@ -2072,6 +2075,9 @@ EXPORT_SYMBOL(unblock_all_signals);
2072 * System call entry points. 2075 * System call entry points.
2073 */ 2076 */
2074 2077
2078/**
2079 * sys_restart_syscall - restart a system call
2080 */
2075SYSCALL_DEFINE0(restart_syscall) 2081SYSCALL_DEFINE0(restart_syscall)
2076{ 2082{
2077 struct restart_block *restart = &current_thread_info()->restart_block; 2083 struct restart_block *restart = &current_thread_info()->restart_block;
@@ -2125,6 +2131,13 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2125 return error; 2131 return error;
2126} 2132}
2127 2133
2134/**
2135 * sys_rt_sigprocmask - change the list of currently blocked signals
2136 * @how: whether to add, remove, or set signals
2137 * @set: stores pending signals
2138 * @oset: previous value of signal mask if non-null
2139 * @sigsetsize: size of sigset_t type
2140 */
2128SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, 2141SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2129 sigset_t __user *, oset, size_t, sigsetsize) 2142 sigset_t __user *, oset, size_t, sigsetsize)
2130{ 2143{
@@ -2183,8 +2196,14 @@ long do_sigpending(void __user *set, unsigned long sigsetsize)
2183 2196
2184out: 2197out:
2185 return error; 2198 return error;
2186} 2199}
2187 2200
2201/**
2202 * sys_rt_sigpending - examine a pending signal that has been raised
2203 * while blocked
2204 * @set: stores pending signals
2205 * @sigsetsize: size of sigset_t type or larger
2206 */
2188SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) 2207SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2189{ 2208{
2190 return do_sigpending(set, sigsetsize); 2209 return do_sigpending(set, sigsetsize);
@@ -2233,9 +2252,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2233 err |= __put_user(from->si_trapno, &to->si_trapno); 2252 err |= __put_user(from->si_trapno, &to->si_trapno);
2234#endif 2253#endif
2235#ifdef BUS_MCEERR_AO 2254#ifdef BUS_MCEERR_AO
2236 /* 2255 /*
2237 * Other callers might not initialize the si_lsb field, 2256 * Other callers might not initialize the si_lsb field,
2238 * so check explicitely for the right codes here. 2257 * so check explicitly for the right codes here.
2239 */ 2258 */
2240 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2259 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2241 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2260 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
@@ -2264,6 +2283,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2264 2283
2265#endif 2284#endif
2266 2285
2286/**
2287 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2288 * in @uthese
2289 * @uthese: queued signals to wait for
2290 * @uinfo: if non-null, the signal's siginfo is returned here
2291 * @uts: upper bound on process time suspension
2292 * @sigsetsize: size of sigset_t type
2293 */
2267SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 2294SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2268 siginfo_t __user *, uinfo, const struct timespec __user *, uts, 2295 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2269 size_t, sigsetsize) 2296 size_t, sigsetsize)
@@ -2280,7 +2307,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2280 2307
2281 if (copy_from_user(&these, uthese, sizeof(these))) 2308 if (copy_from_user(&these, uthese, sizeof(these)))
2282 return -EFAULT; 2309 return -EFAULT;
2283 2310
2284 /* 2311 /*
2285 * Invert the set of allowed signals to get those we 2312 * Invert the set of allowed signals to get those we
2286 * want to block. 2313 * want to block.
@@ -2305,9 +2332,11 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2305 + (ts.tv_sec || ts.tv_nsec)); 2332 + (ts.tv_sec || ts.tv_nsec));
2306 2333
2307 if (timeout) { 2334 if (timeout) {
2308 /* None ready -- temporarily unblock those we're 2335 /*
2336 * None ready -- temporarily unblock those we're
2309 * interested while we are sleeping in so that we'll 2337 * interested while we are sleeping in so that we'll
2310 * be awakened when they arrive. */ 2338 * be awakened when they arrive.
2339 */
2311 current->real_blocked = current->blocked; 2340 current->real_blocked = current->blocked;
2312 sigandsets(&current->blocked, &current->blocked, &these); 2341 sigandsets(&current->blocked, &current->blocked, &these);
2313 recalc_sigpending(); 2342 recalc_sigpending();
@@ -2339,6 +2368,11 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2339 return ret; 2368 return ret;
2340} 2369}
2341 2370
2371/**
2372 * sys_kill - send a signal to a process
2373 * @pid: the PID of the process
2374 * @sig: signal to be sent
2375 */
2342SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 2376SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2343{ 2377{
2344 struct siginfo info; 2378 struct siginfo info;
@@ -2414,7 +2448,11 @@ SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2414 return do_tkill(tgid, pid, sig); 2448 return do_tkill(tgid, pid, sig);
2415} 2449}
2416 2450
2417/* 2451/**
2452 * sys_tkill - send signal to one specific task
2453 * @pid: the PID of the task
2454 * @sig: signal to be sent
2455 *
2418 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2456 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2419 */ 2457 */
2420SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 2458SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
@@ -2426,6 +2464,12 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2426 return do_tkill(0, pid, sig); 2464 return do_tkill(0, pid, sig);
2427} 2465}
2428 2466
2467/**
2468 * sys_rt_sigqueueinfo - send signal information to a signal
2469 * @pid: the PID of the thread
2470 * @sig: signal to be sent
2471 * @uinfo: signal info to be sent
2472 */
2429SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 2473SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2430 siginfo_t __user *, uinfo) 2474 siginfo_t __user *, uinfo)
2431{ 2475{
@@ -2553,12 +2597,11 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2553 2597
2554 error = -EINVAL; 2598 error = -EINVAL;
2555 /* 2599 /*
2556 * 2600 * Note - this code used to test ss_flags incorrectly:
2557 * Note - this code used to test ss_flags incorrectly
2558 * old code may have been written using ss_flags==0 2601 * old code may have been written using ss_flags==0
2559 * to mean ss_flags==SS_ONSTACK (as this was the only 2602 * to mean ss_flags==SS_ONSTACK (as this was the only
2560 * way that worked) - this fix preserves that older 2603 * way that worked) - this fix preserves that older
2561 * mechanism 2604 * mechanism.
2562 */ 2605 */
2563 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2606 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2564 goto out; 2607 goto out;
@@ -2592,6 +2635,10 @@ out:
2592 2635
2593#ifdef __ARCH_WANT_SYS_SIGPENDING 2636#ifdef __ARCH_WANT_SYS_SIGPENDING
2594 2637
2638/**
2639 * sys_sigpending - examine pending signals
2640 * @set: where mask of pending signal is returned
2641 */
2595SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) 2642SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2596{ 2643{
2597 return do_sigpending(set, sizeof(*set)); 2644 return do_sigpending(set, sizeof(*set));
@@ -2600,8 +2647,15 @@ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2600#endif 2647#endif
2601 2648
2602#ifdef __ARCH_WANT_SYS_SIGPROCMASK 2649#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2603/* Some platforms have their own version with special arguments others 2650/**
2604 support only sys_rt_sigprocmask. */ 2651 * sys_sigprocmask - examine and change blocked signals
2652 * @how: whether to add, remove, or set signals
2653 * @set: signals to add or remove (if non-null)
2654 * @oset: previous value of signal mask if non-null
2655 *
2656 * Some platforms have their own version with special arguments;
2657 * others support only sys_rt_sigprocmask.
2658 */
2605 2659
2606SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, 2660SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2607 old_sigset_t __user *, oset) 2661 old_sigset_t __user *, oset)
@@ -2654,6 +2708,13 @@ out:
2654#endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2708#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2655 2709
2656#ifdef __ARCH_WANT_SYS_RT_SIGACTION 2710#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2711/**
2712 * sys_rt_sigaction - alter an action taken by a process
2713 * @sig: signal to be sent
2714 * @act: the thread group ID of the thread
2715 * @oact: the PID of the thread
2716 * @sigsetsize: size of sigset_t type
2717 */
2657SYSCALL_DEFINE4(rt_sigaction, int, sig, 2718SYSCALL_DEFINE4(rt_sigaction, int, sig,
2658 const struct sigaction __user *, act, 2719 const struct sigaction __user *, act,
2659 struct sigaction __user *, oact, 2720 struct sigaction __user *, oact,
@@ -2740,6 +2801,12 @@ SYSCALL_DEFINE0(pause)
2740#endif 2801#endif
2741 2802
2742#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2803#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2804/**
2805 * sys_rt_sigsuspend - replace the signal mask for a value with the
2806 * @unewset value until a signal is received
2807 * @unewset: new signal mask value
2808 * @sigsetsize: size of sigset_t type
2809 */
2743SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 2810SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2744{ 2811{
2745 sigset_t newset; 2812 sigset_t newset;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 735d87095172..174f976c2874 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -567,7 +567,7 @@ static void __tasklet_hrtimer_trampoline(unsigned long data)
567/** 567/**
568 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks 568 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
569 * @ttimer: tasklet_hrtimer which is initialized 569 * @ttimer: tasklet_hrtimer which is initialized
570 * @function: hrtimer callback funtion which gets called from softirq context 570 * @function: hrtimer callback function which gets called from softirq context
571 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) 571 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
572 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) 572 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
573 */ 573 */
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index b2fa506667c0..a470154e0408 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -34,7 +34,7 @@
34 * inaccuracies caused by missed or lost timer 34 * inaccuracies caused by missed or lost timer
35 * interrupts and the inability for the timer 35 * interrupts and the inability for the timer
36 * interrupt hardware to accuratly tick at the 36 * interrupt hardware to accuratly tick at the
37 * requested HZ value. It is also not reccomended 37 * requested HZ value. It is also not recommended
38 * for "tick-less" systems. 38 * for "tick-less" systems.
39 */ 39 */
40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) 40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 5f1bb8e2008f..f6117a4c7cb8 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -652,6 +652,8 @@ int do_adjtimex(struct timex *txc)
652 struct timespec delta; 652 struct timespec delta;
653 delta.tv_sec = txc->time.tv_sec; 653 delta.tv_sec = txc->time.tv_sec;
654 delta.tv_nsec = txc->time.tv_usec; 654 delta.tv_nsec = txc->time.tv_usec;
655 if (!capable(CAP_SYS_TIME))
656 return -EPERM;
655 if (!(txc->modes & ADJ_NANO)) 657 if (!(txc->modes & ADJ_NANO))
656 delta.tv_nsec *= 1000; 658 delta.tv_nsec *= 1000;
657 result = timekeeping_inject_offset(&delta); 659 result = timekeeping_inject_offset(&delta);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 2f3b585b8d7d..a5d0a3a85dd8 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -236,7 +236,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
236 unsigned int timer_flag) 236 unsigned int timer_flag)
237{ 237{
238 /* 238 /*
239 * It doesnt matter which lock we take: 239 * It doesn't matter which lock we take:
240 */ 240 */
241 raw_spinlock_t *lock; 241 raw_spinlock_t *lock;
242 struct entry *entry, input; 242 struct entry *entry, input;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index c075f4ea6b94..ee24fa1935ac 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1268,7 +1268,7 @@ static int ftrace_update_code(struct module *mod)
1268 p->flags = 0L; 1268 p->flags = 0L;
1269 1269
1270 /* 1270 /*
1271 * Do the initial record convertion from mcount jump 1271 * Do the initial record conversion from mcount jump
1272 * to the NOP instructions. 1272 * to the NOP instructions.
1273 */ 1273 */
1274 if (!ftrace_code_disable(mod, p)) { 1274 if (!ftrace_code_disable(mod, p)) {
@@ -3425,7 +3425,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3425 atomic_set(&t->tracing_graph_pause, 0); 3425 atomic_set(&t->tracing_graph_pause, 0);
3426 atomic_set(&t->trace_overrun, 0); 3426 atomic_set(&t->trace_overrun, 0);
3427 t->ftrace_timestamp = 0; 3427 t->ftrace_timestamp = 0;
3428 /* make curr_ret_stack visable before we add the ret_stack */ 3428 /* make curr_ret_stack visible before we add the ret_stack */
3429 smp_wmb(); 3429 smp_wmb();
3430 t->ret_stack = ret_stack; 3430 t->ret_stack = ret_stack;
3431} 3431}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d9c8bcafb120..0ef7b4b2a1f7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1478,7 +1478,7 @@ static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1478 return local_read(&bpage->entries) & RB_WRITE_MASK; 1478 return local_read(&bpage->entries) & RB_WRITE_MASK;
1479} 1479}
1480 1480
1481/* Size is determined by what has been commited */ 1481/* Size is determined by what has been committed */
1482static inline unsigned rb_page_size(struct buffer_page *bpage) 1482static inline unsigned rb_page_size(struct buffer_page *bpage)
1483{ 1483{
1484 return rb_page_commit(bpage); 1484 return rb_page_commit(bpage);
@@ -2932,7 +2932,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2932 /* 2932 /*
2933 * cpu_buffer->pages just needs to point to the buffer, it 2933 * cpu_buffer->pages just needs to point to the buffer, it
2934 * has no specific buffer page to point to. Lets move it out 2934 * has no specific buffer page to point to. Lets move it out
2935 * of our way so we don't accidently swap it. 2935 * of our way so we don't accidentally swap it.
2936 */ 2936 */
2937 cpu_buffer->pages = reader->list.prev; 2937 cpu_buffer->pages = reader->list.prev;
2938 2938
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9541c27c1cf2..d38c16a06a6f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3239,7 +3239,7 @@ waitagain:
3239 trace_seq_init(&iter->seq); 3239 trace_seq_init(&iter->seq);
3240 3240
3241 /* 3241 /*
3242 * If there was nothing to send to user, inspite of consuming trace 3242 * If there was nothing to send to user, in spite of consuming trace
3243 * entries, go back to wait for more entries. 3243 * entries, go back to wait for more entries.
3244 */ 3244 */
3245 if (sret == -EBUSY) 3245 if (sret == -EBUSY)
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 685a67d55db0..6302747a1398 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -46,7 +46,7 @@ u64 notrace trace_clock_local(void)
46} 46}
47 47
48/* 48/*
49 * trace_clock(): 'inbetween' trace clock. Not completely serialized, 49 * trace_clock(): 'between' trace clock. Not completely serialized,
50 * but not completely incorrect when crossing CPUs either. 50 * but not completely incorrect when crossing CPUs either.
51 * 51 *
52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of 52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 1516cb3ec549..e32744c84d94 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -27,7 +27,7 @@
27 * in the structure. 27 * in the structure.
28 * 28 *
29 * * for structures within structures, the format of the internal 29 * * for structures within structures, the format of the internal
30 * structure is layed out. This allows the internal structure 30 * structure is laid out. This allows the internal structure
31 * to be deciphered for the format file. Although these macros 31 * to be deciphered for the format file. Although these macros
32 * may become out of sync with the internal structure, they 32 * may become out of sync with the internal structure, they
33 * will create a compile error if it happens. Since the 33 * will create a compile error if it happens. Since the
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 76b05980225c..962cdb24ed81 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -905,7 +905,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
905 * 905 *
906 * returns 1 if 906 * returns 1 if
907 * - we are inside irq code 907 * - we are inside irq code
908 * - we just extered irq code 908 * - we just entered irq code
909 * 909 *
910 * retunns 0 if 910 * retunns 0 if
911 * - funcgraph-interrupts option is set 911 * - funcgraph-interrupts option is set
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 92b6e1e12d98..a4969b47afc1 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -80,7 +80,7 @@ static struct tracer_flags tracer_flags = {
80 * skip the latency if the sequence has changed - some other section 80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console 81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare 82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesnt 83 * and what happens together happens separately as well, so this doesn't
84 * decrease the validity of the maximum found: 84 * decrease the validity of the maximum found:
85 */ 85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence; 86static __cacheline_aligned_in_smp unsigned long max_sequence;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8435b43b1782..35d55a386145 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1839,7 +1839,7 @@ static void unregister_probe_event(struct trace_probe *tp)
1839 kfree(tp->call.print_fmt); 1839 kfree(tp->call.print_fmt);
1840} 1840}
1841 1841
1842/* Make a debugfs interface for controling probe points */ 1842/* Make a debugfs interface for controlling probe points */
1843static __init int init_kprobe_trace(void) 1843static __init int init_kprobe_trace(void)
1844{ 1844{
1845 struct dentry *d_tracer; 1845 struct dentry *d_tracer;
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index eb27fd3430a2..92cb706c7fc8 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
20 20
21/* 21/*
22 * Removes a registered user return notifier. Must be called from atomic 22 * Removes a registered user return notifier. Must be called from atomic
23 * context, and from the same cpu registration occured in. 23 * context, and from the same cpu registration occurred in.
24 */ 24 */
25void user_return_notifier_unregister(struct user_return_notifier *urn) 25void user_return_notifier_unregister(struct user_return_notifier *urn)
26{ 26{
diff --git a/kernel/wait.c b/kernel/wait.c
index b0310eb6cc1e..f45ea8d2a1ce 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -142,7 +142,7 @@ EXPORT_SYMBOL(finish_wait);
142 * woken up through the queue. 142 * woken up through the queue.
143 * 143 *
144 * This prevents waiter starvation where an exclusive waiter 144 * This prevents waiter starvation where an exclusive waiter
145 * aborts and is woken up concurrently and noone wakes up 145 * aborts and is woken up concurrently and no one wakes up
146 * the next waiter. 146 * the next waiter.
147 */ 147 */
148void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 148void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 04ef830690ec..8859a41806dd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1291,7 +1291,7 @@ __acquires(&gcwq->lock)
1291 return true; 1291 return true;
1292 spin_unlock_irq(&gcwq->lock); 1292 spin_unlock_irq(&gcwq->lock);
1293 1293
1294 /* CPU has come up inbetween, retry migration */ 1294 /* CPU has come up in between, retry migration */
1295 cpu_relax(); 1295 cpu_relax();
1296 } 1296 }
1297} 1297}