diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 14:22:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 14:22:57 -0400 |
commit | 683b6c6f82a60fabf47012581c2cfbf1b037ab95 (patch) | |
tree | 6a3fdf26b98aebf4b0b9eca8d242ba89e0565d8b /kernel | |
parent | 1ead65812486cda65093683a99b8907a7242fa93 (diff) | |
parent | 1b422ecd27866985b9f35d0d2b5ae6e9122dd4c0 (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq code updates from Thomas Gleixner:
"The irq department proudly presents:
- Another tree wide sweep of irq infrastructure abuse. Clear winner
of the trainwreck engineering contest was:
#include "../../../kernel/irq/settings.h"
- Tree wide update of irq_set_affinity() callbacks which miss a cpu
online check when picking a single cpu out of the affinity mask.
- Tree wide consolidation of interrupt statistics.
- Updates to the threaded interrupt infrastructure to allow explicit
wakeup of the interrupt thread and a variant of synchronize_irq()
which synchronizes only the hard interrupt handler. Both are
needed to replace the homebrewn thread handling in the mmc/sdhci
code.
- New irq chip callbacks to allow proper support for GPIO based irqs.
The GPIO based interrupts need to request/release GPIO resources
from request/free_irq.
- A few new ARM interrupt chips. No revolutionary new hardware, just
differently wreckaged variations of the scheme.
- Small improvments, cleanups and updates all over the place"
I was hoping that that trainwreck engineering contest was a April Fools'
joke. But no.
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (68 commits)
irqchip: sun7i/sun6i: Disable NMI before registering the handler
ARM: sun7i/sun6i: dts: Fix IRQ number for sun6i NMI controller
ARM: sun7i/sun6i: irqchip: Update the documentation
ARM: sun7i/sun6i: dts: Add NMI irqchip support
ARM: sun7i/sun6i: irqchip: Add irqchip driver for NMI controller
genirq: Export symbol no_action()
arm: omap: Fix typo in ams-delta-fiq.c
m68k: atari: Fix the last kernel_stat.h fallout
irqchip: sun4i: Simplify sun4i_irq_ack
irqchip: sun4i: Use handle_fasteoi_irq for all interrupts
genirq: procfs: Make smp_affinity values go+r
softirq: Add linux/irq.h to make it compile again
m68k: amiga: Add linux/irq.h to make it compile again
irqchip: sun4i: Don't ack IRQs > 0, fix acking of IRQ 0
irqchip: sun4i: Fix a comment about mask register initialization
irqchip: sun4i: Fix irq 0 not working
genirq: Add a new IRQCHIP_EOI_THREADED flag
genirq: Document IRQCHIP_ONESHOT_SAFE flag
ARM: sunxi: dt: Convert to the new irq controller compatibles
irqchip: sunxi: Change compatibles
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/chip.c | 48 | ||||
-rw-r--r-- | kernel/irq/handle.c | 5 | ||||
-rw-r--r-- | kernel/irq/internals.h | 9 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 5 | ||||
-rw-r--r-- | kernel/irq/manage.c | 129 | ||||
-rw-r--r-- | kernel/irq/proc.c | 8 | ||||
-rw-r--r-- | kernel/softirq.c | 1 |
7 files changed, 168 insertions, 37 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index dc04c166c54d..6397df2d6945 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -281,6 +281,19 @@ void unmask_irq(struct irq_desc *desc) | |||
281 | } | 281 | } |
282 | } | 282 | } |
283 | 283 | ||
284 | void unmask_threaded_irq(struct irq_desc *desc) | ||
285 | { | ||
286 | struct irq_chip *chip = desc->irq_data.chip; | ||
287 | |||
288 | if (chip->flags & IRQCHIP_EOI_THREADED) | ||
289 | chip->irq_eoi(&desc->irq_data); | ||
290 | |||
291 | if (chip->irq_unmask) { | ||
292 | chip->irq_unmask(&desc->irq_data); | ||
293 | irq_state_clr_masked(desc); | ||
294 | } | ||
295 | } | ||
296 | |||
284 | /* | 297 | /* |
285 | * handle_nested_irq - Handle a nested irq from a irq thread | 298 | * handle_nested_irq - Handle a nested irq from a irq thread |
286 | * @irq: the interrupt number | 299 | * @irq: the interrupt number |
@@ -435,6 +448,27 @@ static inline void preflow_handler(struct irq_desc *desc) | |||
435 | static inline void preflow_handler(struct irq_desc *desc) { } | 448 | static inline void preflow_handler(struct irq_desc *desc) { } |
436 | #endif | 449 | #endif |
437 | 450 | ||
451 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) | ||
452 | { | ||
453 | if (!(desc->istate & IRQS_ONESHOT)) { | ||
454 | chip->irq_eoi(&desc->irq_data); | ||
455 | return; | ||
456 | } | ||
457 | /* | ||
458 | * We need to unmask in the following cases: | ||
459 | * - Oneshot irq which did not wake the thread (caused by a | ||
460 | * spurious interrupt or a primary handler handling it | ||
461 | * completely). | ||
462 | */ | ||
463 | if (!irqd_irq_disabled(&desc->irq_data) && | ||
464 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { | ||
465 | chip->irq_eoi(&desc->irq_data); | ||
466 | unmask_irq(desc); | ||
467 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { | ||
468 | chip->irq_eoi(&desc->irq_data); | ||
469 | } | ||
470 | } | ||
471 | |||
438 | /** | 472 | /** |
439 | * handle_fasteoi_irq - irq handler for transparent controllers | 473 | * handle_fasteoi_irq - irq handler for transparent controllers |
440 | * @irq: the interrupt number | 474 | * @irq: the interrupt number |
@@ -448,6 +482,8 @@ static inline void preflow_handler(struct irq_desc *desc) { } | |||
448 | void | 482 | void |
449 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | 483 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
450 | { | 484 | { |
485 | struct irq_chip *chip = desc->irq_data.chip; | ||
486 | |||
451 | raw_spin_lock(&desc->lock); | 487 | raw_spin_lock(&desc->lock); |
452 | 488 | ||
453 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 489 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
@@ -473,18 +509,14 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
473 | preflow_handler(desc); | 509 | preflow_handler(desc); |
474 | handle_irq_event(desc); | 510 | handle_irq_event(desc); |
475 | 511 | ||
476 | if (desc->istate & IRQS_ONESHOT) | 512 | cond_unmask_eoi_irq(desc, chip); |
477 | cond_unmask_irq(desc); | ||
478 | 513 | ||
479 | out_eoi: | ||
480 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | ||
481 | out_unlock: | ||
482 | raw_spin_unlock(&desc->lock); | 514 | raw_spin_unlock(&desc->lock); |
483 | return; | 515 | return; |
484 | out: | 516 | out: |
485 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | 517 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
486 | goto out_eoi; | 518 | chip->irq_eoi(&desc->irq_data); |
487 | goto out_unlock; | 519 | raw_spin_unlock(&desc->lock); |
488 | } | 520 | } |
489 | 521 | ||
490 | /** | 522 | /** |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 131ca176b497..635480270858 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -41,6 +41,7 @@ irqreturn_t no_action(int cpl, void *dev_id) | |||
41 | { | 41 | { |
42 | return IRQ_NONE; | 42 | return IRQ_NONE; |
43 | } | 43 | } |
44 | EXPORT_SYMBOL_GPL(no_action); | ||
44 | 45 | ||
45 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | 46 | static void warn_no_thread(unsigned int irq, struct irqaction *action) |
46 | { | 47 | { |
@@ -51,7 +52,7 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
51 | "but no thread function available.", irq, action->name); | 52 | "but no thread function available.", irq, action->name); |
52 | } | 53 | } |
53 | 54 | ||
54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) | 55 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) |
55 | { | 56 | { |
56 | /* | 57 | /* |
57 | * In case the thread crashed and was killed we just pretend that | 58 | * In case the thread crashed and was killed we just pretend that |
@@ -157,7 +158,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
157 | break; | 158 | break; |
158 | } | 159 | } |
159 | 160 | ||
160 | irq_wake_thread(desc, action); | 161 | __irq_wake_thread(desc, action); |
161 | 162 | ||
162 | /* Fall through to add to randomness */ | 163 | /* Fall through to add to randomness */ |
163 | case IRQ_HANDLED: | 164 | case IRQ_HANDLED: |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 001fa5bab490..ddf1ffeb79f1 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -6,6 +6,7 @@ | |||
6 | * of this file for your non core code. | 6 | * of this file for your non core code. |
7 | */ | 7 | */ |
8 | #include <linux/irqdesc.h> | 8 | #include <linux/irqdesc.h> |
9 | #include <linux/kernel_stat.h> | ||
9 | 10 | ||
10 | #ifdef CONFIG_SPARSE_IRQ | 11 | #ifdef CONFIG_SPARSE_IRQ |
11 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | 12 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
@@ -73,6 +74,7 @@ extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); | |||
73 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); | 74 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); |
74 | extern void mask_irq(struct irq_desc *desc); | 75 | extern void mask_irq(struct irq_desc *desc); |
75 | extern void unmask_irq(struct irq_desc *desc); | 76 | extern void unmask_irq(struct irq_desc *desc); |
77 | extern void unmask_threaded_irq(struct irq_desc *desc); | ||
76 | 78 | ||
77 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 79 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
78 | 80 | ||
@@ -82,6 +84,7 @@ irqreturn_t handle_irq_event(struct irq_desc *desc); | |||
82 | /* Resending of interrupts :*/ | 84 | /* Resending of interrupts :*/ |
83 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 85 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
84 | bool irq_wait_for_poll(struct irq_desc *desc); | 86 | bool irq_wait_for_poll(struct irq_desc *desc); |
87 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); | ||
85 | 88 | ||
86 | #ifdef CONFIG_PROC_FS | 89 | #ifdef CONFIG_PROC_FS |
87 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 90 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
@@ -179,3 +182,9 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | |||
179 | { | 182 | { |
180 | return d->state_use_accessors & mask; | 183 | return d->state_use_accessors & mask; |
181 | } | 184 | } |
185 | |||
186 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) | ||
187 | { | ||
188 | __this_cpu_inc(*desc->kstat_irqs); | ||
189 | __this_cpu_inc(kstat.irqs_sum); | ||
190 | } | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 8ab8e9390297..a7174617616b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -489,6 +489,11 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
489 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 489 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
490 | } | 490 | } |
491 | 491 | ||
492 | void kstat_incr_irq_this_cpu(unsigned int irq) | ||
493 | { | ||
494 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | ||
495 | } | ||
496 | |||
492 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 497 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
493 | { | 498 | { |
494 | struct irq_desc *desc = irq_to_desc(irq); | 499 | struct irq_desc *desc = irq_to_desc(irq); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d3bf660cb57f..2486a4c1a710 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg) | |||
32 | early_param("threadirqs", setup_forced_irqthreads); | 32 | early_param("threadirqs", setup_forced_irqthreads); |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /** | 35 | static void __synchronize_hardirq(struct irq_desc *desc) |
36 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
37 | * @irq: interrupt number to wait for | ||
38 | * | ||
39 | * This function waits for any pending IRQ handlers for this interrupt | ||
40 | * to complete before returning. If you use this function while | ||
41 | * holding a resource the IRQ handler may need you will deadlock. | ||
42 | * | ||
43 | * This function may be called - with care - from IRQ context. | ||
44 | */ | ||
45 | void synchronize_irq(unsigned int irq) | ||
46 | { | 36 | { |
47 | struct irq_desc *desc = irq_to_desc(irq); | ||
48 | bool inprogress; | 37 | bool inprogress; |
49 | 38 | ||
50 | if (!desc) | ||
51 | return; | ||
52 | |||
53 | do { | 39 | do { |
54 | unsigned long flags; | 40 | unsigned long flags; |
55 | 41 | ||
@@ -67,12 +53,56 @@ void synchronize_irq(unsigned int irq) | |||
67 | 53 | ||
68 | /* Oops, that failed? */ | 54 | /* Oops, that failed? */ |
69 | } while (inprogress); | 55 | } while (inprogress); |
56 | } | ||
70 | 57 | ||
71 | /* | 58 | /** |
72 | * We made sure that no hardirq handler is running. Now verify | 59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) |
73 | * that no threaded handlers are active. | 60 | * @irq: interrupt number to wait for |
74 | */ | 61 | * |
75 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | 62 | * This function waits for any pending hard IRQ handlers for this |
63 | * interrupt to complete before returning. If you use this | ||
64 | * function while holding a resource the IRQ handler may need you | ||
65 | * will deadlock. It does not take associated threaded handlers | ||
66 | * into account. | ||
67 | * | ||
68 | * Do not use this for shutdown scenarios where you must be sure | ||
69 | * that all parts (hardirq and threaded handler) have completed. | ||
70 | * | ||
71 | * This function may be called - with care - from IRQ context. | ||
72 | */ | ||
73 | void synchronize_hardirq(unsigned int irq) | ||
74 | { | ||
75 | struct irq_desc *desc = irq_to_desc(irq); | ||
76 | |||
77 | if (desc) | ||
78 | __synchronize_hardirq(desc); | ||
79 | } | ||
80 | EXPORT_SYMBOL(synchronize_hardirq); | ||
81 | |||
82 | /** | ||
83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
84 | * @irq: interrupt number to wait for | ||
85 | * | ||
86 | * This function waits for any pending IRQ handlers for this interrupt | ||
87 | * to complete before returning. If you use this function while | ||
88 | * holding a resource the IRQ handler may need you will deadlock. | ||
89 | * | ||
90 | * This function may be called - with care - from IRQ context. | ||
91 | */ | ||
92 | void synchronize_irq(unsigned int irq) | ||
93 | { | ||
94 | struct irq_desc *desc = irq_to_desc(irq); | ||
95 | |||
96 | if (desc) { | ||
97 | __synchronize_hardirq(desc); | ||
98 | /* | ||
99 | * We made sure that no hardirq handler is | ||
100 | * running. Now verify that no threaded handlers are | ||
101 | * active. | ||
102 | */ | ||
103 | wait_event(desc->wait_for_threads, | ||
104 | !atomic_read(&desc->threads_active)); | ||
105 | } | ||
76 | } | 106 | } |
77 | EXPORT_SYMBOL(synchronize_irq); | 107 | EXPORT_SYMBOL(synchronize_irq); |
78 | 108 | ||
@@ -718,7 +748,7 @@ again: | |||
718 | 748 | ||
719 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | 749 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
720 | irqd_irq_masked(&desc->irq_data)) | 750 | irqd_irq_masked(&desc->irq_data)) |
721 | unmask_irq(desc); | 751 | unmask_threaded_irq(desc); |
722 | 752 | ||
723 | out_unlock: | 753 | out_unlock: |
724 | raw_spin_unlock_irq(&desc->lock); | 754 | raw_spin_unlock_irq(&desc->lock); |
@@ -727,7 +757,7 @@ out_unlock: | |||
727 | 757 | ||
728 | #ifdef CONFIG_SMP | 758 | #ifdef CONFIG_SMP |
729 | /* | 759 | /* |
730 | * Check whether we need to chasnge the affinity of the interrupt thread. | 760 | * Check whether we need to change the affinity of the interrupt thread. |
731 | */ | 761 | */ |
732 | static void | 762 | static void |
733 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 763 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
@@ -880,6 +910,33 @@ static int irq_thread(void *data) | |||
880 | return 0; | 910 | return 0; |
881 | } | 911 | } |
882 | 912 | ||
913 | /** | ||
914 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | ||
915 | * @irq: Interrupt line | ||
916 | * @dev_id: Device identity for which the thread should be woken | ||
917 | * | ||
918 | */ | ||
919 | void irq_wake_thread(unsigned int irq, void *dev_id) | ||
920 | { | ||
921 | struct irq_desc *desc = irq_to_desc(irq); | ||
922 | struct irqaction *action; | ||
923 | unsigned long flags; | ||
924 | |||
925 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
926 | return; | ||
927 | |||
928 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
929 | for (action = desc->action; action; action = action->next) { | ||
930 | if (action->dev_id == dev_id) { | ||
931 | if (action->thread) | ||
932 | __irq_wake_thread(desc, action); | ||
933 | break; | ||
934 | } | ||
935 | } | ||
936 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
937 | } | ||
938 | EXPORT_SYMBOL_GPL(irq_wake_thread); | ||
939 | |||
883 | static void irq_setup_forced_threading(struct irqaction *new) | 940 | static void irq_setup_forced_threading(struct irqaction *new) |
884 | { | 941 | { |
885 | if (!force_irqthreads) | 942 | if (!force_irqthreads) |
@@ -896,6 +953,23 @@ static void irq_setup_forced_threading(struct irqaction *new) | |||
896 | } | 953 | } |
897 | } | 954 | } |
898 | 955 | ||
956 | static int irq_request_resources(struct irq_desc *desc) | ||
957 | { | ||
958 | struct irq_data *d = &desc->irq_data; | ||
959 | struct irq_chip *c = d->chip; | ||
960 | |||
961 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | ||
962 | } | ||
963 | |||
964 | static void irq_release_resources(struct irq_desc *desc) | ||
965 | { | ||
966 | struct irq_data *d = &desc->irq_data; | ||
967 | struct irq_chip *c = d->chip; | ||
968 | |||
969 | if (c->irq_release_resources) | ||
970 | c->irq_release_resources(d); | ||
971 | } | ||
972 | |||
899 | /* | 973 | /* |
900 | * Internal function to register an irqaction - typically used to | 974 | * Internal function to register an irqaction - typically used to |
901 | * allocate special interrupts that are part of the architecture. | 975 | * allocate special interrupts that are part of the architecture. |
@@ -1091,6 +1165,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1091 | } | 1165 | } |
1092 | 1166 | ||
1093 | if (!shared) { | 1167 | if (!shared) { |
1168 | ret = irq_request_resources(desc); | ||
1169 | if (ret) { | ||
1170 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | ||
1171 | new->name, irq, desc->irq_data.chip->name); | ||
1172 | goto out_mask; | ||
1173 | } | ||
1174 | |||
1094 | init_waitqueue_head(&desc->wait_for_threads); | 1175 | init_waitqueue_head(&desc->wait_for_threads); |
1095 | 1176 | ||
1096 | /* Setup the type (level, edge polarity) if configured: */ | 1177 | /* Setup the type (level, edge polarity) if configured: */ |
@@ -1261,8 +1342,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1261 | *action_ptr = action->next; | 1342 | *action_ptr = action->next; |
1262 | 1343 | ||
1263 | /* If this was the last handler, shut down the IRQ line: */ | 1344 | /* If this was the last handler, shut down the IRQ line: */ |
1264 | if (!desc->action) | 1345 | if (!desc->action) { |
1265 | irq_shutdown(desc); | 1346 | irq_shutdown(desc); |
1347 | irq_release_resources(desc); | ||
1348 | } | ||
1266 | 1349 | ||
1267 | #ifdef CONFIG_SMP | 1350 | #ifdef CONFIG_SMP |
1268 | /* make sure affinity_hint is cleaned up */ | 1351 | /* make sure affinity_hint is cleaned up */ |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 36f6ee181b0c..ac1ba2f11032 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -324,15 +324,15 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
324 | 324 | ||
325 | #ifdef CONFIG_SMP | 325 | #ifdef CONFIG_SMP |
326 | /* create /proc/irq/<irq>/smp_affinity */ | 326 | /* create /proc/irq/<irq>/smp_affinity */ |
327 | proc_create_data("smp_affinity", 0600, desc->dir, | 327 | proc_create_data("smp_affinity", 0644, desc->dir, |
328 | &irq_affinity_proc_fops, (void *)(long)irq); | 328 | &irq_affinity_proc_fops, (void *)(long)irq); |
329 | 329 | ||
330 | /* create /proc/irq/<irq>/affinity_hint */ | 330 | /* create /proc/irq/<irq>/affinity_hint */ |
331 | proc_create_data("affinity_hint", 0400, desc->dir, | 331 | proc_create_data("affinity_hint", 0444, desc->dir, |
332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); | 332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); |
333 | 333 | ||
334 | /* create /proc/irq/<irq>/smp_affinity_list */ | 334 | /* create /proc/irq/<irq>/smp_affinity_list */ |
335 | proc_create_data("smp_affinity_list", 0600, desc->dir, | 335 | proc_create_data("smp_affinity_list", 0644, desc->dir, |
336 | &irq_affinity_list_proc_fops, (void *)(long)irq); | 336 | &irq_affinity_list_proc_fops, (void *)(long)irq); |
337 | 337 | ||
338 | proc_create_data("node", 0444, desc->dir, | 338 | proc_create_data("node", 0444, desc->dir, |
@@ -372,7 +372,7 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
372 | static void register_default_affinity_proc(void) | 372 | static void register_default_affinity_proc(void) |
373 | { | 373 | { |
374 | #ifdef CONFIG_SMP | 374 | #ifdef CONFIG_SMP |
375 | proc_create("irq/default_smp_affinity", 0600, NULL, | 375 | proc_create("irq/default_smp_affinity", 0644, NULL, |
376 | &default_affinity_proc_fops); | 376 | &default_affinity_proc_fops); |
377 | #endif | 377 | #endif |
378 | } | 378 | } |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 490fcbb1dc5b..b50990a5bea0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/smpboot.h> | 26 | #include <linux/smpboot.h> |
27 | #include <linux/tick.h> | 27 | #include <linux/tick.h> |
28 | #include <linux/irq.h> | ||
28 | 29 | ||
29 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
30 | #include <trace/events/irq.h> | 31 | #include <trace/events/irq.h> |