diff options
35 files changed, 2112 insertions, 1120 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f4a04c0c7edc..738c6fda3fb0 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -2444,6 +2444,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
| 2444 | <deci-seconds>: poll all this frequency | 2444 | <deci-seconds>: poll all this frequency |
| 2445 | 0: no polling (default) | 2445 | 0: no polling (default) |
| 2446 | 2446 | ||
| 2447 | threadirqs [KNL] | ||
| 2448 | Force threading of all interrupt handlers except those | ||
| 2449 | marked explicitely IRQF_NO_THREAD. | ||
| 2450 | |||
| 2447 | topology= [S390] | 2451 | topology= [S390] |
| 2448 | Format: {off | on} | 2452 | Format: {off | on} |
| 2449 | Specify if the kernel should make use of the cpu | 2453 | Specify if the kernel should make use of the cpu |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 43214e303294..a42660c7356a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -68,6 +68,8 @@ config X86 | |||
| 68 | select GENERIC_FIND_NEXT_BIT | 68 | select GENERIC_FIND_NEXT_BIT |
| 69 | select GENERIC_IRQ_PROBE | 69 | select GENERIC_IRQ_PROBE |
| 70 | select GENERIC_PENDING_IRQ if SMP | 70 | select GENERIC_PENDING_IRQ if SMP |
| 71 | select GENERIC_IRQ_SHOW | ||
| 72 | select IRQ_FORCED_THREADING | ||
| 71 | select USE_GENERIC_SMP_HELPERS if SMP | 73 | select USE_GENERIC_SMP_HELPERS if SMP |
| 72 | 74 | ||
| 73 | config INSTRUCTION_DECODER | 75 | config INSTRUCTION_DECODER |
| @@ -813,7 +815,7 @@ config X86_LOCAL_APIC | |||
| 813 | 815 | ||
| 814 | config X86_IO_APIC | 816 | config X86_IO_APIC |
| 815 | def_bool y | 817 | def_bool y |
| 816 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC | 818 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC |
| 817 | 819 | ||
| 818 | config X86_VISWS_APIC | 820 | config X86_VISWS_APIC |
| 819 | def_bool y | 821 | def_bool y |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 47a30ff8e517..d87988bacf3e 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
| @@ -426,4 +426,16 @@ struct local_apic { | |||
| 426 | #else | 426 | #else |
| 427 | #define BAD_APICID 0xFFFFu | 427 | #define BAD_APICID 0xFFFFu |
| 428 | #endif | 428 | #endif |
| 429 | |||
| 430 | enum ioapic_irq_destination_types { | ||
| 431 | dest_Fixed = 0, | ||
| 432 | dest_LowestPrio = 1, | ||
| 433 | dest_SMI = 2, | ||
| 434 | dest__reserved_1 = 3, | ||
| 435 | dest_NMI = 4, | ||
| 436 | dest_INIT = 5, | ||
| 437 | dest__reserved_2 = 6, | ||
| 438 | dest_ExtINT = 7 | ||
| 439 | }; | ||
| 440 | |||
| 429 | #endif /* _ASM_X86_APICDEF_H */ | 441 | #endif /* _ASM_X86_APICDEF_H */ |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index f327d386d6cc..c4bd267dfc50 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
| @@ -63,17 +63,6 @@ union IO_APIC_reg_03 { | |||
| 63 | } __attribute__ ((packed)) bits; | 63 | } __attribute__ ((packed)) bits; |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | enum ioapic_irq_destination_types { | ||
| 67 | dest_Fixed = 0, | ||
| 68 | dest_LowestPrio = 1, | ||
| 69 | dest_SMI = 2, | ||
| 70 | dest__reserved_1 = 3, | ||
| 71 | dest_NMI = 4, | ||
| 72 | dest_INIT = 5, | ||
| 73 | dest__reserved_2 = 6, | ||
| 74 | dest_ExtINT = 7 | ||
| 75 | }; | ||
| 76 | |||
| 77 | struct IO_APIC_route_entry { | 66 | struct IO_APIC_route_entry { |
| 78 | __u32 vector : 8, | 67 | __u32 vector : 8, |
| 79 | delivery_mode : 3, /* 000: FIXED | 68 | delivery_mode : 3, /* 000: FIXED |
| @@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry { | |||
| 106 | index : 15; | 95 | index : 15; |
| 107 | } __attribute__ ((packed)); | 96 | } __attribute__ ((packed)); |
| 108 | 97 | ||
| 98 | #define IOAPIC_AUTO -1 | ||
| 99 | #define IOAPIC_EDGE 0 | ||
| 100 | #define IOAPIC_LEVEL 1 | ||
| 101 | |||
| 109 | #ifdef CONFIG_X86_IO_APIC | 102 | #ifdef CONFIG_X86_IO_APIC |
| 110 | 103 | ||
| 111 | /* | 104 | /* |
| @@ -150,11 +143,6 @@ extern int timer_through_8259; | |||
| 150 | #define io_apic_assign_pci_irqs \ | 143 | #define io_apic_assign_pci_irqs \ |
| 151 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 144 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
| 152 | 145 | ||
| 153 | extern u8 io_apic_unique_id(u8 id); | ||
| 154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); | ||
| 155 | extern int io_apic_get_version(int ioapic); | ||
| 156 | extern int io_apic_get_redir_entries(int ioapic); | ||
| 157 | |||
| 158 | struct io_apic_irq_attr; | 146 | struct io_apic_irq_attr; |
| 159 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 147 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
| 160 | struct io_apic_irq_attr *irq_attr); | 148 | struct io_apic_irq_attr *irq_attr); |
| @@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi); | |||
| 162 | extern void ioapic_and_gsi_init(void); | 150 | extern void ioapic_and_gsi_init(void); |
| 163 | extern void ioapic_insert_resources(void); | 151 | extern void ioapic_insert_resources(void); |
| 164 | 152 | ||
| 153 | int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr); | ||
| 154 | |||
| 165 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); | 155 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); |
| 166 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); | 156 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); |
| 167 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | 157 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
| @@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void); | |||
| 186 | 176 | ||
| 187 | extern void mp_save_irq(struct mpc_intsrc *m); | 177 | extern void mp_save_irq(struct mpc_intsrc *m); |
| 188 | 178 | ||
| 179 | extern void disable_ioapic_support(void); | ||
| 180 | |||
| 189 | #else /* !CONFIG_X86_IO_APIC */ | 181 | #else /* !CONFIG_X86_IO_APIC */ |
| 190 | 182 | ||
| 191 | #define io_apic_assign_pci_irqs 0 | 183 | #define io_apic_assign_pci_irqs 0 |
| @@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; } | |||
| 199 | struct io_apic_irq_attr; | 191 | struct io_apic_irq_attr; |
| 200 | static inline int io_apic_set_pci_routing(struct device *dev, int irq, | 192 | static inline int io_apic_set_pci_routing(struct device *dev, int irq, |
| 201 | struct io_apic_irq_attr *irq_attr) { return 0; } | 193 | struct io_apic_irq_attr *irq_attr) { return 0; } |
| 194 | |||
| 195 | static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void) | ||
| 196 | { | ||
| 197 | return NULL; | ||
| 198 | } | ||
| 199 | |||
| 200 | static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { } | ||
| 201 | static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent) | ||
| 202 | { | ||
| 203 | return -ENOMEM; | ||
| 204 | } | ||
| 205 | |||
| 206 | static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { } | ||
| 207 | static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent) | ||
| 208 | { | ||
| 209 | return -ENOMEM; | ||
| 210 | } | ||
| 211 | |||
| 212 | static inline void mp_save_irq(struct mpc_intsrc *m) { }; | ||
| 213 | static inline void disable_ioapic_support(void) { } | ||
| 202 | #endif | 214 | #endif |
| 203 | 215 | ||
| 204 | #endif /* _ASM_X86_IO_APIC_H */ | 216 | #endif /* _ASM_X86_IO_APIC_H */ |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 3606feb7d67c..48dcd2e83b46 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <asm/i8259.h> | 43 | #include <asm/i8259.h> |
| 44 | #include <asm/proto.h> | 44 | #include <asm/proto.h> |
| 45 | #include <asm/apic.h> | 45 | #include <asm/apic.h> |
| 46 | #include <asm/io_apic.h> | ||
| 46 | #include <asm/desc.h> | 47 | #include <asm/desc.h> |
| 47 | #include <asm/hpet.h> | 48 | #include <asm/hpet.h> |
| 48 | #include <asm/idle.h> | 49 | #include <asm/idle.h> |
| @@ -1209,7 +1210,7 @@ void __cpuinit setup_local_APIC(void) | |||
| 1209 | rdtscll(tsc); | 1210 | rdtscll(tsc); |
| 1210 | 1211 | ||
| 1211 | if (disable_apic) { | 1212 | if (disable_apic) { |
| 1212 | arch_disable_smp_support(); | 1213 | disable_ioapic_support(); |
| 1213 | return; | 1214 | return; |
| 1214 | } | 1215 | } |
| 1215 | 1216 | ||
| @@ -1448,7 +1449,7 @@ int __init enable_IR(void) | |||
| 1448 | void __init enable_IR_x2apic(void) | 1449 | void __init enable_IR_x2apic(void) |
| 1449 | { | 1450 | { |
| 1450 | unsigned long flags; | 1451 | unsigned long flags; |
| 1451 | struct IO_APIC_route_entry **ioapic_entries = NULL; | 1452 | struct IO_APIC_route_entry **ioapic_entries; |
| 1452 | int ret, x2apic_enabled = 0; | 1453 | int ret, x2apic_enabled = 0; |
| 1453 | int dmar_table_init_ret; | 1454 | int dmar_table_init_ret; |
| 1454 | 1455 | ||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ca9e2a3545a9..4b5ebd26f565 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -108,7 +108,10 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | |||
| 108 | 108 | ||
| 109 | int skip_ioapic_setup; | 109 | int skip_ioapic_setup; |
| 110 | 110 | ||
| 111 | void arch_disable_smp_support(void) | 111 | /** |
| 112 | * disable_ioapic_support() - disables ioapic support at runtime | ||
| 113 | */ | ||
| 114 | void disable_ioapic_support(void) | ||
| 112 | { | 115 | { |
| 113 | #ifdef CONFIG_PCI | 116 | #ifdef CONFIG_PCI |
| 114 | noioapicquirk = 1; | 117 | noioapicquirk = 1; |
| @@ -120,11 +123,14 @@ void arch_disable_smp_support(void) | |||
| 120 | static int __init parse_noapic(char *str) | 123 | static int __init parse_noapic(char *str) |
| 121 | { | 124 | { |
| 122 | /* disable IO-APIC */ | 125 | /* disable IO-APIC */ |
| 123 | arch_disable_smp_support(); | 126 | disable_ioapic_support(); |
| 124 | return 0; | 127 | return 0; |
| 125 | } | 128 | } |
| 126 | early_param("noapic", parse_noapic); | 129 | early_param("noapic", parse_noapic); |
| 127 | 130 | ||
| 131 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | ||
| 132 | struct io_apic_irq_attr *attr); | ||
| 133 | |||
| 128 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ | 134 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ |
| 129 | void mp_save_irq(struct mpc_intsrc *m) | 135 | void mp_save_irq(struct mpc_intsrc *m) |
| 130 | { | 136 | { |
| @@ -181,7 +187,7 @@ int __init arch_early_irq_init(void) | |||
| 181 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); | 187 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); |
| 182 | 188 | ||
| 183 | for (i = 0; i < count; i++) { | 189 | for (i = 0; i < count; i++) { |
| 184 | set_irq_chip_data(i, &cfg[i]); | 190 | irq_set_chip_data(i, &cfg[i]); |
| 185 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); | 191 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); |
| 186 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); | 192 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); |
| 187 | /* | 193 | /* |
| @@ -200,7 +206,7 @@ int __init arch_early_irq_init(void) | |||
| 200 | #ifdef CONFIG_SPARSE_IRQ | 206 | #ifdef CONFIG_SPARSE_IRQ |
| 201 | static struct irq_cfg *irq_cfg(unsigned int irq) | 207 | static struct irq_cfg *irq_cfg(unsigned int irq) |
| 202 | { | 208 | { |
| 203 | return get_irq_chip_data(irq); | 209 | return irq_get_chip_data(irq); |
| 204 | } | 210 | } |
| 205 | 211 | ||
| 206 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) | 212 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
| @@ -226,7 +232,7 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) | |||
| 226 | { | 232 | { |
| 227 | if (!cfg) | 233 | if (!cfg) |
| 228 | return; | 234 | return; |
| 229 | set_irq_chip_data(at, NULL); | 235 | irq_set_chip_data(at, NULL); |
| 230 | free_cpumask_var(cfg->domain); | 236 | free_cpumask_var(cfg->domain); |
| 231 | free_cpumask_var(cfg->old_domain); | 237 | free_cpumask_var(cfg->old_domain); |
| 232 | kfree(cfg); | 238 | kfree(cfg); |
| @@ -256,14 +262,14 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | |||
| 256 | if (res < 0) { | 262 | if (res < 0) { |
| 257 | if (res != -EEXIST) | 263 | if (res != -EEXIST) |
| 258 | return NULL; | 264 | return NULL; |
| 259 | cfg = get_irq_chip_data(at); | 265 | cfg = irq_get_chip_data(at); |
| 260 | if (cfg) | 266 | if (cfg) |
| 261 | return cfg; | 267 | return cfg; |
| 262 | } | 268 | } |
| 263 | 269 | ||
| 264 | cfg = alloc_irq_cfg(at, node); | 270 | cfg = alloc_irq_cfg(at, node); |
| 265 | if (cfg) | 271 | if (cfg) |
| 266 | set_irq_chip_data(at, cfg); | 272 | irq_set_chip_data(at, cfg); |
| 267 | else | 273 | else |
| 268 | irq_free_desc(at); | 274 | irq_free_desc(at); |
| 269 | return cfg; | 275 | return cfg; |
| @@ -818,7 +824,7 @@ static int EISA_ELCR(unsigned int irq) | |||
| 818 | #define default_MCA_trigger(idx) (1) | 824 | #define default_MCA_trigger(idx) (1) |
| 819 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) | 825 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) |
| 820 | 826 | ||
| 821 | static int MPBIOS_polarity(int idx) | 827 | static int irq_polarity(int idx) |
| 822 | { | 828 | { |
| 823 | int bus = mp_irqs[idx].srcbus; | 829 | int bus = mp_irqs[idx].srcbus; |
| 824 | int polarity; | 830 | int polarity; |
| @@ -860,7 +866,7 @@ static int MPBIOS_polarity(int idx) | |||
| 860 | return polarity; | 866 | return polarity; |
| 861 | } | 867 | } |
| 862 | 868 | ||
| 863 | static int MPBIOS_trigger(int idx) | 869 | static int irq_trigger(int idx) |
| 864 | { | 870 | { |
| 865 | int bus = mp_irqs[idx].srcbus; | 871 | int bus = mp_irqs[idx].srcbus; |
| 866 | int trigger; | 872 | int trigger; |
| @@ -932,16 +938,6 @@ static int MPBIOS_trigger(int idx) | |||
| 932 | return trigger; | 938 | return trigger; |
| 933 | } | 939 | } |
| 934 | 940 | ||
| 935 | static inline int irq_polarity(int idx) | ||
| 936 | { | ||
| 937 | return MPBIOS_polarity(idx); | ||
| 938 | } | ||
| 939 | |||
| 940 | static inline int irq_trigger(int idx) | ||
| 941 | { | ||
| 942 | return MPBIOS_trigger(idx); | ||
| 943 | } | ||
| 944 | |||
| 945 | static int pin_2_irq(int idx, int apic, int pin) | 941 | static int pin_2_irq(int idx, int apic, int pin) |
| 946 | { | 942 | { |
| 947 | int irq; | 943 | int irq; |
| @@ -1189,7 +1185,7 @@ void __setup_vector_irq(int cpu) | |||
| 1189 | raw_spin_lock(&vector_lock); | 1185 | raw_spin_lock(&vector_lock); |
| 1190 | /* Mark the inuse vectors */ | 1186 | /* Mark the inuse vectors */ |
| 1191 | for_each_active_irq(irq) { | 1187 | for_each_active_irq(irq) { |
| 1192 | cfg = get_irq_chip_data(irq); | 1188 | cfg = irq_get_chip_data(irq); |
| 1193 | if (!cfg) | 1189 | if (!cfg) |
| 1194 | continue; | 1190 | continue; |
| 1195 | /* | 1191 | /* |
| @@ -1220,10 +1216,6 @@ void __setup_vector_irq(int cpu) | |||
| 1220 | static struct irq_chip ioapic_chip; | 1216 | static struct irq_chip ioapic_chip; |
| 1221 | static struct irq_chip ir_ioapic_chip; | 1217 | static struct irq_chip ir_ioapic_chip; |
| 1222 | 1218 | ||
| 1223 | #define IOAPIC_AUTO -1 | ||
| 1224 | #define IOAPIC_EDGE 0 | ||
| 1225 | #define IOAPIC_LEVEL 1 | ||
| 1226 | |||
| 1227 | #ifdef CONFIG_X86_32 | 1219 | #ifdef CONFIG_X86_32 |
| 1228 | static inline int IO_APIC_irq_trigger(int irq) | 1220 | static inline int IO_APIC_irq_trigger(int irq) |
| 1229 | { | 1221 | { |
| @@ -1248,35 +1240,31 @@ static inline int IO_APIC_irq_trigger(int irq) | |||
| 1248 | } | 1240 | } |
| 1249 | #endif | 1241 | #endif |
| 1250 | 1242 | ||
| 1251 | static void ioapic_register_intr(unsigned int irq, unsigned long trigger) | 1243 | static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, |
| 1244 | unsigned long trigger) | ||
| 1252 | { | 1245 | { |
| 1246 | struct irq_chip *chip = &ioapic_chip; | ||
| 1247 | irq_flow_handler_t hdl; | ||
| 1248 | bool fasteoi; | ||
| 1253 | 1249 | ||
| 1254 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1250 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
| 1255 | trigger == IOAPIC_LEVEL) | 1251 | trigger == IOAPIC_LEVEL) { |
| 1256 | irq_set_status_flags(irq, IRQ_LEVEL); | 1252 | irq_set_status_flags(irq, IRQ_LEVEL); |
| 1257 | else | 1253 | fasteoi = true; |
| 1254 | } else { | ||
| 1258 | irq_clear_status_flags(irq, IRQ_LEVEL); | 1255 | irq_clear_status_flags(irq, IRQ_LEVEL); |
| 1256 | fasteoi = false; | ||
| 1257 | } | ||
| 1259 | 1258 | ||
| 1260 | if (irq_remapped(get_irq_chip_data(irq))) { | 1259 | if (irq_remapped(cfg)) { |
| 1261 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 1260 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 1262 | if (trigger) | 1261 | chip = &ir_ioapic_chip; |
| 1263 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 1262 | fasteoi = trigger != 0; |
| 1264 | handle_fasteoi_irq, | ||
| 1265 | "fasteoi"); | ||
| 1266 | else | ||
| 1267 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | ||
| 1268 | handle_edge_irq, "edge"); | ||
| 1269 | return; | ||
| 1270 | } | 1263 | } |
| 1271 | 1264 | ||
| 1272 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1265 | hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; |
| 1273 | trigger == IOAPIC_LEVEL) | 1266 | irq_set_chip_and_handler_name(irq, chip, hdl, |
| 1274 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 1267 | fasteoi ? "fasteoi" : "edge"); |
| 1275 | handle_fasteoi_irq, | ||
| 1276 | "fasteoi"); | ||
| 1277 | else | ||
| 1278 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | ||
| 1279 | handle_edge_irq, "edge"); | ||
| 1280 | } | 1268 | } |
| 1281 | 1269 | ||
| 1282 | static int setup_ioapic_entry(int apic_id, int irq, | 1270 | static int setup_ioapic_entry(int apic_id, int irq, |
| @@ -1374,7 +1362,7 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, | |||
| 1374 | return; | 1362 | return; |
| 1375 | } | 1363 | } |
| 1376 | 1364 | ||
| 1377 | ioapic_register_intr(irq, trigger); | 1365 | ioapic_register_intr(irq, cfg, trigger); |
| 1378 | if (irq < legacy_pic->nr_legacy_irqs) | 1366 | if (irq < legacy_pic->nr_legacy_irqs) |
| 1379 | legacy_pic->mask(irq); | 1367 | legacy_pic->mask(irq); |
| 1380 | 1368 | ||
| @@ -1385,33 +1373,26 @@ static struct { | |||
| 1385 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); | 1373 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); |
| 1386 | } mp_ioapic_routing[MAX_IO_APICS]; | 1374 | } mp_ioapic_routing[MAX_IO_APICS]; |
| 1387 | 1375 | ||
| 1388 | static void __init setup_IO_APIC_irqs(void) | 1376 | static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) |
| 1389 | { | 1377 | { |
| 1390 | int apic_id, pin, idx, irq, notcon = 0; | 1378 | if (idx != -1) |
| 1391 | int node = cpu_to_node(0); | 1379 | return false; |
| 1392 | struct irq_cfg *cfg; | ||
| 1393 | 1380 | ||
| 1394 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1381 | apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", |
| 1382 | mp_ioapics[apic_id].apicid, pin); | ||
| 1383 | return true; | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | static void __init __io_apic_setup_irqs(unsigned int apic_id) | ||
| 1387 | { | ||
| 1388 | int idx, node = cpu_to_node(0); | ||
| 1389 | struct io_apic_irq_attr attr; | ||
| 1390 | unsigned int pin, irq; | ||
| 1395 | 1391 | ||
| 1396 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) | ||
| 1397 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { | 1392 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { |
| 1398 | idx = find_irq_entry(apic_id, pin, mp_INT); | 1393 | idx = find_irq_entry(apic_id, pin, mp_INT); |
| 1399 | if (idx == -1) { | 1394 | if (io_apic_pin_not_connected(idx, apic_id, pin)) |
| 1400 | if (!notcon) { | ||
| 1401 | notcon = 1; | ||
| 1402 | apic_printk(APIC_VERBOSE, | ||
| 1403 | KERN_DEBUG " %d-%d", | ||
| 1404 | mp_ioapics[apic_id].apicid, pin); | ||
| 1405 | } else | ||
| 1406 | apic_printk(APIC_VERBOSE, " %d-%d", | ||
| 1407 | mp_ioapics[apic_id].apicid, pin); | ||
| 1408 | continue; | 1395 | continue; |
| 1409 | } | ||
| 1410 | if (notcon) { | ||
| 1411 | apic_printk(APIC_VERBOSE, | ||
| 1412 | " (apicid-pin) not connected\n"); | ||
| 1413 | notcon = 0; | ||
| 1414 | } | ||
| 1415 | 1396 | ||
| 1416 | irq = pin_2_irq(idx, apic_id, pin); | 1397 | irq = pin_2_irq(idx, apic_id, pin); |
| 1417 | 1398 | ||
| @@ -1423,25 +1404,24 @@ static void __init setup_IO_APIC_irqs(void) | |||
| 1423 | * installed and if it returns 1: | 1404 | * installed and if it returns 1: |
| 1424 | */ | 1405 | */ |
| 1425 | if (apic->multi_timer_check && | 1406 | if (apic->multi_timer_check && |
| 1426 | apic->multi_timer_check(apic_id, irq)) | 1407 | apic->multi_timer_check(apic_id, irq)) |
| 1427 | continue; | 1408 | continue; |
| 1428 | 1409 | ||
| 1429 | cfg = alloc_irq_and_cfg_at(irq, node); | 1410 | set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), |
| 1430 | if (!cfg) | 1411 | irq_polarity(idx)); |
| 1431 | continue; | ||
| 1432 | 1412 | ||
| 1433 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1413 | io_apic_setup_irq_pin(irq, node, &attr); |
| 1434 | /* | ||
| 1435 | * don't mark it in pin_programmed, so later acpi could | ||
| 1436 | * set it correctly when irq < 16 | ||
| 1437 | */ | ||
| 1438 | setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), | ||
| 1439 | irq_polarity(idx)); | ||
| 1440 | } | 1414 | } |
| 1415 | } | ||
| 1441 | 1416 | ||
| 1442 | if (notcon) | 1417 | static void __init setup_IO_APIC_irqs(void) |
| 1443 | apic_printk(APIC_VERBOSE, | 1418 | { |
| 1444 | " (apicid-pin) not connected\n"); | 1419 | unsigned int apic_id; |
| 1420 | |||
| 1421 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | ||
| 1422 | |||
| 1423 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) | ||
| 1424 | __io_apic_setup_irqs(apic_id); | ||
| 1445 | } | 1425 | } |
| 1446 | 1426 | ||
| 1447 | /* | 1427 | /* |
| @@ -1452,7 +1432,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
| 1452 | void setup_IO_APIC_irq_extra(u32 gsi) | 1432 | void setup_IO_APIC_irq_extra(u32 gsi) |
| 1453 | { | 1433 | { |
| 1454 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); | 1434 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); |
| 1455 | struct irq_cfg *cfg; | 1435 | struct io_apic_irq_attr attr; |
| 1456 | 1436 | ||
| 1457 | /* | 1437 | /* |
| 1458 | * Convert 'gsi' to 'ioapic.pin'. | 1438 | * Convert 'gsi' to 'ioapic.pin'. |
| @@ -1472,21 +1452,10 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
| 1472 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) | 1452 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) |
| 1473 | return; | 1453 | return; |
| 1474 | 1454 | ||
| 1475 | cfg = alloc_irq_and_cfg_at(irq, node); | 1455 | set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), |
| 1476 | if (!cfg) | 1456 | irq_polarity(idx)); |
| 1477 | return; | ||
| 1478 | |||
| 1479 | add_pin_to_irq_node(cfg, node, apic_id, pin); | ||
| 1480 | |||
| 1481 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { | ||
| 1482 | pr_debug("Pin %d-%d already programmed\n", | ||
| 1483 | mp_ioapics[apic_id].apicid, pin); | ||
| 1484 | return; | ||
| 1485 | } | ||
| 1486 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); | ||
| 1487 | 1457 | ||
| 1488 | setup_ioapic_irq(apic_id, pin, irq, cfg, | 1458 | io_apic_setup_irq_pin_once(irq, node, &attr); |
| 1489 | irq_trigger(idx), irq_polarity(idx)); | ||
| 1490 | } | 1459 | } |
| 1491 | 1460 | ||
| 1492 | /* | 1461 | /* |
| @@ -1518,7 +1487,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, | |||
| 1518 | * The timer IRQ doesn't have to know that behind the | 1487 | * The timer IRQ doesn't have to know that behind the |
| 1519 | * scene we may have a 8259A-master in AEOI mode ... | 1488 | * scene we may have a 8259A-master in AEOI mode ... |
| 1520 | */ | 1489 | */ |
| 1521 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 1490 | irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, |
| 1491 | "edge"); | ||
| 1522 | 1492 | ||
| 1523 | /* | 1493 | /* |
| 1524 | * Add it to the IO-APIC irq-routing table: | 1494 | * Add it to the IO-APIC irq-routing table: |
| @@ -1625,7 +1595,7 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
| 1625 | for_each_active_irq(irq) { | 1595 | for_each_active_irq(irq) { |
| 1626 | struct irq_pin_list *entry; | 1596 | struct irq_pin_list *entry; |
| 1627 | 1597 | ||
| 1628 | cfg = get_irq_chip_data(irq); | 1598 | cfg = irq_get_chip_data(irq); |
| 1629 | if (!cfg) | 1599 | if (!cfg) |
| 1630 | continue; | 1600 | continue; |
| 1631 | entry = cfg->irq_2_pin; | 1601 | entry = cfg->irq_2_pin; |
| @@ -2391,7 +2361,7 @@ static void irq_complete_move(struct irq_cfg *cfg) | |||
| 2391 | 2361 | ||
| 2392 | void irq_force_complete_move(int irq) | 2362 | void irq_force_complete_move(int irq) |
| 2393 | { | 2363 | { |
| 2394 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 2364 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
| 2395 | 2365 | ||
| 2396 | if (!cfg) | 2366 | if (!cfg) |
| 2397 | return; | 2367 | return; |
| @@ -2405,7 +2375,7 @@ static inline void irq_complete_move(struct irq_cfg *cfg) { } | |||
| 2405 | static void ack_apic_edge(struct irq_data *data) | 2375 | static void ack_apic_edge(struct irq_data *data) |
| 2406 | { | 2376 | { |
| 2407 | irq_complete_move(data->chip_data); | 2377 | irq_complete_move(data->chip_data); |
| 2408 | move_native_irq(data->irq); | 2378 | irq_move_irq(data); |
| 2409 | ack_APIC_irq(); | 2379 | ack_APIC_irq(); |
| 2410 | } | 2380 | } |
| 2411 | 2381 | ||
| @@ -2462,7 +2432,7 @@ static void ack_apic_level(struct irq_data *data) | |||
| 2462 | irq_complete_move(cfg); | 2432 | irq_complete_move(cfg); |
| 2463 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 2433 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 2464 | /* If we are moving the irq we need to mask it */ | 2434 | /* If we are moving the irq we need to mask it */ |
| 2465 | if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { | 2435 | if (unlikely(irqd_is_setaffinity_pending(data))) { |
| 2466 | do_unmask_irq = 1; | 2436 | do_unmask_irq = 1; |
| 2467 | mask_ioapic(cfg); | 2437 | mask_ioapic(cfg); |
| 2468 | } | 2438 | } |
| @@ -2551,7 +2521,7 @@ static void ack_apic_level(struct irq_data *data) | |||
| 2551 | * and you can go talk to the chipset vendor about it. | 2521 | * and you can go talk to the chipset vendor about it. |
| 2552 | */ | 2522 | */ |
| 2553 | if (!io_apic_level_ack_pending(cfg)) | 2523 | if (!io_apic_level_ack_pending(cfg)) |
| 2554 | move_masked_irq(irq); | 2524 | irq_move_masked_irq(data); |
| 2555 | unmask_ioapic(cfg); | 2525 | unmask_ioapic(cfg); |
| 2556 | } | 2526 | } |
| 2557 | } | 2527 | } |
| @@ -2614,7 +2584,7 @@ static inline void init_IO_APIC_traps(void) | |||
| 2614 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2584 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
| 2615 | */ | 2585 | */ |
| 2616 | for_each_active_irq(irq) { | 2586 | for_each_active_irq(irq) { |
| 2617 | cfg = get_irq_chip_data(irq); | 2587 | cfg = irq_get_chip_data(irq); |
| 2618 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | 2588 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { |
| 2619 | /* | 2589 | /* |
| 2620 | * Hmm.. We don't have an entry for this, | 2590 | * Hmm.. We don't have an entry for this, |
| @@ -2625,7 +2595,7 @@ static inline void init_IO_APIC_traps(void) | |||
| 2625 | legacy_pic->make_irq(irq); | 2595 | legacy_pic->make_irq(irq); |
| 2626 | else | 2596 | else |
| 2627 | /* Strange. Oh, well.. */ | 2597 | /* Strange. Oh, well.. */ |
| 2628 | set_irq_chip(irq, &no_irq_chip); | 2598 | irq_set_chip(irq, &no_irq_chip); |
| 2629 | } | 2599 | } |
| 2630 | } | 2600 | } |
| 2631 | } | 2601 | } |
| @@ -2665,7 +2635,7 @@ static struct irq_chip lapic_chip __read_mostly = { | |||
| 2665 | static void lapic_register_intr(int irq) | 2635 | static void lapic_register_intr(int irq) |
| 2666 | { | 2636 | { |
| 2667 | irq_clear_status_flags(irq, IRQ_LEVEL); | 2637 | irq_clear_status_flags(irq, IRQ_LEVEL); |
| 2668 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2638 | irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
| 2669 | "edge"); | 2639 | "edge"); |
| 2670 | } | 2640 | } |
| 2671 | 2641 | ||
| @@ -2749,7 +2719,7 @@ int timer_through_8259 __initdata; | |||
| 2749 | */ | 2719 | */ |
| 2750 | static inline void __init check_timer(void) | 2720 | static inline void __init check_timer(void) |
| 2751 | { | 2721 | { |
| 2752 | struct irq_cfg *cfg = get_irq_chip_data(0); | 2722 | struct irq_cfg *cfg = irq_get_chip_data(0); |
| 2753 | int node = cpu_to_node(0); | 2723 | int node = cpu_to_node(0); |
| 2754 | int apic1, pin1, apic2, pin2; | 2724 | int apic1, pin1, apic2, pin2; |
| 2755 | unsigned long flags; | 2725 | unsigned long flags; |
| @@ -3060,7 +3030,7 @@ unsigned int create_irq_nr(unsigned int from, int node) | |||
| 3060 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 3030 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
| 3061 | 3031 | ||
| 3062 | if (ret) { | 3032 | if (ret) { |
| 3063 | set_irq_chip_data(irq, cfg); | 3033 | irq_set_chip_data(irq, cfg); |
| 3064 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | 3034 | irq_clear_status_flags(irq, IRQ_NOREQUEST); |
| 3065 | } else { | 3035 | } else { |
| 3066 | free_irq_at(irq, cfg); | 3036 | free_irq_at(irq, cfg); |
| @@ -3085,7 +3055,7 @@ int create_irq(void) | |||
| 3085 | 3055 | ||
| 3086 | void destroy_irq(unsigned int irq) | 3056 | void destroy_irq(unsigned int irq) |
| 3087 | { | 3057 | { |
| 3088 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 3058 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
| 3089 | unsigned long flags; | 3059 | unsigned long flags; |
| 3090 | 3060 | ||
| 3091 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); | 3061 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
| @@ -3119,7 +3089,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
| 3119 | 3089 | ||
| 3120 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); | 3090 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); |
| 3121 | 3091 | ||
| 3122 | if (irq_remapped(get_irq_chip_data(irq))) { | 3092 | if (irq_remapped(cfg)) { |
| 3123 | struct irte irte; | 3093 | struct irte irte; |
| 3124 | int ir_index; | 3094 | int ir_index; |
| 3125 | u16 sub_handle; | 3095 | u16 sub_handle; |
| @@ -3291,6 +3261,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | |||
| 3291 | 3261 | ||
| 3292 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3262 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) |
| 3293 | { | 3263 | { |
| 3264 | struct irq_chip *chip = &msi_chip; | ||
| 3294 | struct msi_msg msg; | 3265 | struct msi_msg msg; |
| 3295 | int ret; | 3266 | int ret; |
| 3296 | 3267 | ||
| @@ -3298,14 +3269,15 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
| 3298 | if (ret < 0) | 3269 | if (ret < 0) |
| 3299 | return ret; | 3270 | return ret; |
| 3300 | 3271 | ||
| 3301 | set_irq_msi(irq, msidesc); | 3272 | irq_set_msi_desc(irq, msidesc); |
| 3302 | write_msi_msg(irq, &msg); | 3273 | write_msi_msg(irq, &msg); |
| 3303 | 3274 | ||
| 3304 | if (irq_remapped(get_irq_chip_data(irq))) { | 3275 | if (irq_remapped(irq_get_chip_data(irq))) { |
| 3305 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3276 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 3306 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | 3277 | chip = &msi_ir_chip; |
| 3307 | } else | 3278 | } |
| 3308 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 3279 | |
| 3280 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
| 3309 | 3281 | ||
| 3310 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); | 3282 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); |
| 3311 | 3283 | ||
| @@ -3423,8 +3395,8 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
| 3423 | if (ret < 0) | 3395 | if (ret < 0) |
| 3424 | return ret; | 3396 | return ret; |
| 3425 | dmar_msi_write(irq, &msg); | 3397 | dmar_msi_write(irq, &msg); |
| 3426 | set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, | 3398 | irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, |
| 3427 | "edge"); | 3399 | "edge"); |
| 3428 | return 0; | 3400 | return 0; |
| 3429 | } | 3401 | } |
| 3430 | #endif | 3402 | #endif |
| @@ -3482,6 +3454,7 @@ static struct irq_chip hpet_msi_type = { | |||
| 3482 | 3454 | ||
| 3483 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3455 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
| 3484 | { | 3456 | { |
| 3457 | struct irq_chip *chip = &hpet_msi_type; | ||
| 3485 | struct msi_msg msg; | 3458 | struct msi_msg msg; |
| 3486 | int ret; | 3459 | int ret; |
| 3487 | 3460 | ||
| @@ -3501,15 +3474,12 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | |||
| 3501 | if (ret < 0) | 3474 | if (ret < 0) |
| 3502 | return ret; | 3475 | return ret; |
| 3503 | 3476 | ||
| 3504 | hpet_msi_write(get_irq_data(irq), &msg); | 3477 | hpet_msi_write(irq_get_handler_data(irq), &msg); |
| 3505 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3478 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 3506 | if (irq_remapped(get_irq_chip_data(irq))) | 3479 | if (irq_remapped(irq_get_chip_data(irq))) |
| 3507 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, | 3480 | chip = &ir_hpet_msi_type; |
| 3508 | handle_edge_irq, "edge"); | ||
| 3509 | else | ||
| 3510 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, | ||
| 3511 | handle_edge_irq, "edge"); | ||
| 3512 | 3481 | ||
| 3482 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
| 3513 | return 0; | 3483 | return 0; |
| 3514 | } | 3484 | } |
| 3515 | #endif | 3485 | #endif |
| @@ -3596,7 +3566,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
| 3596 | 3566 | ||
| 3597 | write_ht_irq_msg(irq, &msg); | 3567 | write_ht_irq_msg(irq, &msg); |
| 3598 | 3568 | ||
| 3599 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, | 3569 | irq_set_chip_and_handler_name(irq, &ht_irq_chip, |
| 3600 | handle_edge_irq, "edge"); | 3570 | handle_edge_irq, "edge"); |
| 3601 | 3571 | ||
| 3602 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); | 3572 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); |
| @@ -3605,7 +3575,40 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
| 3605 | } | 3575 | } |
| 3606 | #endif /* CONFIG_HT_IRQ */ | 3576 | #endif /* CONFIG_HT_IRQ */ |
| 3607 | 3577 | ||
| 3608 | int __init io_apic_get_redir_entries (int ioapic) | 3578 | int |
| 3579 | io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) | ||
| 3580 | { | ||
| 3581 | struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); | ||
| 3582 | int ret; | ||
| 3583 | |||
| 3584 | if (!cfg) | ||
| 3585 | return -EINVAL; | ||
| 3586 | ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); | ||
| 3587 | if (!ret) | ||
| 3588 | setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, | ||
| 3589 | attr->trigger, attr->polarity); | ||
| 3590 | return ret; | ||
| 3591 | } | ||
| 3592 | |||
| 3593 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | ||
| 3594 | struct io_apic_irq_attr *attr) | ||
| 3595 | { | ||
| 3596 | unsigned int id = attr->ioapic, pin = attr->ioapic_pin; | ||
| 3597 | int ret; | ||
| 3598 | |||
| 3599 | /* Avoid redundant programming */ | ||
| 3600 | if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) { | ||
| 3601 | pr_debug("Pin %d-%d already programmed\n", | ||
| 3602 | mp_ioapics[id].apicid, pin); | ||
| 3603 | return 0; | ||
| 3604 | } | ||
| 3605 | ret = io_apic_setup_irq_pin(irq, node, attr); | ||
| 3606 | if (!ret) | ||
| 3607 | set_bit(pin, mp_ioapic_routing[id].pin_programmed); | ||
| 3608 | return ret; | ||
| 3609 | } | ||
| 3610 | |||
| 3611 | static int __init io_apic_get_redir_entries(int ioapic) | ||
| 3609 | { | 3612 | { |
| 3610 | union IO_APIC_reg_01 reg_01; | 3613 | union IO_APIC_reg_01 reg_01; |
| 3611 | unsigned long flags; | 3614 | unsigned long flags; |
| @@ -3659,96 +3662,24 @@ int __init arch_probe_nr_irqs(void) | |||
| 3659 | } | 3662 | } |
| 3660 | #endif | 3663 | #endif |
| 3661 | 3664 | ||
| 3662 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | 3665 | int io_apic_set_pci_routing(struct device *dev, int irq, |
| 3663 | struct io_apic_irq_attr *irq_attr) | 3666 | struct io_apic_irq_attr *irq_attr) |
| 3664 | { | 3667 | { |
| 3665 | struct irq_cfg *cfg; | ||
| 3666 | int node; | 3668 | int node; |
| 3667 | int ioapic, pin; | ||
| 3668 | int trigger, polarity; | ||
| 3669 | 3669 | ||
| 3670 | ioapic = irq_attr->ioapic; | ||
| 3671 | if (!IO_APIC_IRQ(irq)) { | 3670 | if (!IO_APIC_IRQ(irq)) { |
| 3672 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | 3671 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", |
| 3673 | ioapic); | 3672 | irq_attr->ioapic); |
| 3674 | return -EINVAL; | 3673 | return -EINVAL; |
| 3675 | } | 3674 | } |
| 3676 | 3675 | ||
| 3677 | if (dev) | 3676 | node = dev ? dev_to_node(dev) : cpu_to_node(0); |
| 3678 | node = dev_to_node(dev); | ||
| 3679 | else | ||
| 3680 | node = cpu_to_node(0); | ||
| 3681 | |||
| 3682 | cfg = alloc_irq_and_cfg_at(irq, node); | ||
| 3683 | if (!cfg) | ||
| 3684 | return 0; | ||
| 3685 | |||
| 3686 | pin = irq_attr->ioapic_pin; | ||
| 3687 | trigger = irq_attr->trigger; | ||
| 3688 | polarity = irq_attr->polarity; | ||
| 3689 | 3677 | ||
| 3690 | /* | 3678 | return io_apic_setup_irq_pin_once(irq, node, irq_attr); |
| 3691 | * IRQs < 16 are already in the irq_2_pin[] map | ||
| 3692 | */ | ||
| 3693 | if (irq >= legacy_pic->nr_legacy_irqs) { | ||
| 3694 | if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { | ||
| 3695 | printk(KERN_INFO "can not add pin %d for irq %d\n", | ||
| 3696 | pin, irq); | ||
| 3697 | return 0; | ||
| 3698 | } | ||
| 3699 | } | ||
| 3700 | |||
| 3701 | setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); | ||
| 3702 | |||
| 3703 | return 0; | ||
| 3704 | } | 3679 | } |
| 3705 | 3680 | ||
| 3706 | int io_apic_set_pci_routing(struct device *dev, int irq, | ||
| 3707 | struct io_apic_irq_attr *irq_attr) | ||
| 3708 | { | ||
| 3709 | int ioapic, pin; | ||
| 3710 | /* | ||
| 3711 | * Avoid pin reprogramming. PRTs typically include entries | ||
| 3712 | * with redundant pin->gsi mappings (but unique PCI devices); | ||
| 3713 | * we only program the IOAPIC on the first. | ||
| 3714 | */ | ||
| 3715 | ioapic = irq_attr->ioapic; | ||
| 3716 | pin = irq_attr->ioapic_pin; | ||
| 3717 | if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { | ||
| 3718 | pr_debug("Pin %d-%d already programmed\n", | ||
| 3719 | mp_ioapics[ioapic].apicid, pin); | ||
| 3720 | return 0; | ||
| 3721 | } | ||
| 3722 | set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); | ||
| 3723 | |||
| 3724 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | ||
| 3725 | } | ||
| 3726 | |||
| 3727 | u8 __init io_apic_unique_id(u8 id) | ||
| 3728 | { | ||
| 3729 | #ifdef CONFIG_X86_32 | 3681 | #ifdef CONFIG_X86_32 |
| 3730 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | 3682 | static int __init io_apic_get_unique_id(int ioapic, int apic_id) |
| 3731 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
| 3732 | return io_apic_get_unique_id(nr_ioapics, id); | ||
| 3733 | else | ||
| 3734 | return id; | ||
| 3735 | #else | ||
| 3736 | int i; | ||
| 3737 | DECLARE_BITMAP(used, 256); | ||
| 3738 | |||
| 3739 | bitmap_zero(used, 256); | ||
| 3740 | for (i = 0; i < nr_ioapics; i++) { | ||
| 3741 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
| 3742 | __set_bit(ia->apicid, used); | ||
| 3743 | } | ||
| 3744 | if (!test_bit(id, used)) | ||
| 3745 | return id; | ||
| 3746 | return find_first_zero_bit(used, 256); | ||
| 3747 | #endif | ||
| 3748 | } | ||
| 3749 | |||
| 3750 | #ifdef CONFIG_X86_32 | ||
| 3751 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | ||
| 3752 | { | 3683 | { |
| 3753 | union IO_APIC_reg_00 reg_00; | 3684 | union IO_APIC_reg_00 reg_00; |
| 3754 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; | 3685 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; |
| @@ -3821,9 +3752,33 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
| 3821 | 3752 | ||
| 3822 | return apic_id; | 3753 | return apic_id; |
| 3823 | } | 3754 | } |
| 3755 | |||
| 3756 | static u8 __init io_apic_unique_id(u8 id) | ||
| 3757 | { | ||
| 3758 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
| 3759 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
| 3760 | return io_apic_get_unique_id(nr_ioapics, id); | ||
| 3761 | else | ||
| 3762 | return id; | ||
| 3763 | } | ||
| 3764 | #else | ||
| 3765 | static u8 __init io_apic_unique_id(u8 id) | ||
| 3766 | { | ||
| 3767 | int i; | ||
| 3768 | DECLARE_BITMAP(used, 256); | ||
| 3769 | |||
| 3770 | bitmap_zero(used, 256); | ||
| 3771 | for (i = 0; i < nr_ioapics; i++) { | ||
| 3772 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
| 3773 | __set_bit(ia->apicid, used); | ||
| 3774 | } | ||
| 3775 | if (!test_bit(id, used)) | ||
| 3776 | return id; | ||
| 3777 | return find_first_zero_bit(used, 256); | ||
| 3778 | } | ||
| 3824 | #endif | 3779 | #endif |
| 3825 | 3780 | ||
| 3826 | int __init io_apic_get_version(int ioapic) | 3781 | static int __init io_apic_get_version(int ioapic) |
| 3827 | { | 3782 | { |
| 3828 | union IO_APIC_reg_01 reg_01; | 3783 | union IO_APIC_reg_01 reg_01; |
| 3829 | unsigned long flags; | 3784 | unsigned long flags; |
| @@ -3868,8 +3823,8 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) | |||
| 3868 | void __init setup_ioapic_dest(void) | 3823 | void __init setup_ioapic_dest(void) |
| 3869 | { | 3824 | { |
| 3870 | int pin, ioapic, irq, irq_entry; | 3825 | int pin, ioapic, irq, irq_entry; |
| 3871 | struct irq_desc *desc; | ||
| 3872 | const struct cpumask *mask; | 3826 | const struct cpumask *mask; |
| 3827 | struct irq_data *idata; | ||
| 3873 | 3828 | ||
| 3874 | if (skip_ioapic_setup == 1) | 3829 | if (skip_ioapic_setup == 1) |
| 3875 | return; | 3830 | return; |
| @@ -3884,21 +3839,20 @@ void __init setup_ioapic_dest(void) | |||
| 3884 | if ((ioapic > 0) && (irq > 16)) | 3839 | if ((ioapic > 0) && (irq > 16)) |
| 3885 | continue; | 3840 | continue; |
| 3886 | 3841 | ||
| 3887 | desc = irq_to_desc(irq); | 3842 | idata = irq_get_irq_data(irq); |
| 3888 | 3843 | ||
| 3889 | /* | 3844 | /* |
| 3890 | * Honour affinities which have been set in early boot | 3845 | * Honour affinities which have been set in early boot |
| 3891 | */ | 3846 | */ |
| 3892 | if (desc->status & | 3847 | if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) |
| 3893 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3848 | mask = idata->affinity; |
| 3894 | mask = desc->irq_data.affinity; | ||
| 3895 | else | 3849 | else |
| 3896 | mask = apic->target_cpus(); | 3850 | mask = apic->target_cpus(); |
| 3897 | 3851 | ||
| 3898 | if (intr_remapping_enabled) | 3852 | if (intr_remapping_enabled) |
| 3899 | ir_ioapic_set_affinity(&desc->irq_data, mask, false); | 3853 | ir_ioapic_set_affinity(idata, mask, false); |
| 3900 | else | 3854 | else |
| 3901 | ioapic_set_affinity(&desc->irq_data, mask, false); | 3855 | ioapic_set_affinity(idata, mask, false); |
| 3902 | } | 3856 | } |
| 3903 | 3857 | ||
| 3904 | } | 3858 | } |
| @@ -4026,7 +3980,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi) | |||
| 4026 | return gsi - mp_gsi_routing[ioapic].gsi_base; | 3980 | return gsi - mp_gsi_routing[ioapic].gsi_base; |
| 4027 | } | 3981 | } |
| 4028 | 3982 | ||
| 4029 | static int bad_ioapic(unsigned long address) | 3983 | static __init int bad_ioapic(unsigned long address) |
| 4030 | { | 3984 | { |
| 4031 | if (nr_ioapics >= MAX_IO_APICS) { | 3985 | if (nr_ioapics >= MAX_IO_APICS) { |
| 4032 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " | 3986 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " |
| @@ -4086,20 +4040,16 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
| 4086 | /* Enable IOAPIC early just for system timer */ | 4040 | /* Enable IOAPIC early just for system timer */ |
| 4087 | void __init pre_init_apic_IRQ0(void) | 4041 | void __init pre_init_apic_IRQ0(void) |
| 4088 | { | 4042 | { |
| 4089 | struct irq_cfg *cfg; | 4043 | struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; |
| 4090 | 4044 | ||
| 4091 | printk(KERN_INFO "Early APIC setup for system timer0\n"); | 4045 | printk(KERN_INFO "Early APIC setup for system timer0\n"); |
| 4092 | #ifndef CONFIG_SMP | 4046 | #ifndef CONFIG_SMP |
| 4093 | physid_set_mask_of_physid(boot_cpu_physical_apicid, | 4047 | physid_set_mask_of_physid(boot_cpu_physical_apicid, |
| 4094 | &phys_cpu_present_map); | 4048 | &phys_cpu_present_map); |
| 4095 | #endif | 4049 | #endif |
| 4096 | /* Make sure the irq descriptor is set up */ | ||
| 4097 | cfg = alloc_irq_and_cfg_at(0, 0); | ||
| 4098 | |||
| 4099 | setup_local_APIC(); | 4050 | setup_local_APIC(); |
| 4100 | 4051 | ||
| 4101 | add_pin_to_irq_node(cfg, 0, 0, 0); | 4052 | io_apic_setup_irq_pin(0, 0, &attr); |
| 4102 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 4053 | irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, |
| 4103 | 4054 | "edge"); | |
| 4104 | setup_ioapic_irq(0, 0, 0, cfg, 0, 0); | ||
| 4105 | } | 4055 | } |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 4ff5968f12d2..bfe8f729e086 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -503,7 +503,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) | |||
| 503 | if (!irq) | 503 | if (!irq) |
| 504 | return -EINVAL; | 504 | return -EINVAL; |
| 505 | 505 | ||
| 506 | set_irq_data(irq, dev); | 506 | irq_set_handler_data(irq, dev); |
| 507 | 507 | ||
| 508 | if (hpet_setup_msi_irq(irq)) | 508 | if (hpet_setup_msi_irq(irq)) |
| 509 | return -EINVAL; | 509 | return -EINVAL; |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 20757cb2efa3..d9ca749c123b 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
| @@ -112,7 +112,7 @@ static void make_8259A_irq(unsigned int irq) | |||
| 112 | { | 112 | { |
| 113 | disable_irq_nosync(irq); | 113 | disable_irq_nosync(irq); |
| 114 | io_apic_irqs &= ~(1<<irq); | 114 | io_apic_irqs &= ~(1<<irq); |
| 115 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, | 115 | irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
| 116 | i8259A_chip.name); | 116 | i8259A_chip.name); |
| 117 | enable_irq(irq); | 117 | enable_irq(irq); |
| 118 | } | 118 | } |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 387b6a0c9e81..5ee693faa111 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
| @@ -44,9 +44,9 @@ void ack_bad_irq(unsigned int irq) | |||
| 44 | 44 | ||
| 45 | #define irq_stats(x) (&per_cpu(irq_stat, x)) | 45 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
| 46 | /* | 46 | /* |
| 47 | * /proc/interrupts printing: | 47 | * /proc/interrupts printing for arch specific interrupts |
| 48 | */ | 48 | */ |
| 49 | static int show_other_interrupts(struct seq_file *p, int prec) | 49 | int arch_show_interrupts(struct seq_file *p, int prec) |
| 50 | { | 50 | { |
| 51 | int j; | 51 | int j; |
| 52 | 52 | ||
| @@ -122,59 +122,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
| 122 | return 0; | 122 | return 0; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | int show_interrupts(struct seq_file *p, void *v) | ||
| 126 | { | ||
| 127 | unsigned long flags, any_count = 0; | ||
| 128 | int i = *(loff_t *) v, j, prec; | ||
| 129 | struct irqaction *action; | ||
| 130 | struct irq_desc *desc; | ||
| 131 | |||
| 132 | if (i > nr_irqs) | ||
| 133 | return 0; | ||
| 134 | |||
| 135 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
| 136 | j *= 10; | ||
| 137 | |||
| 138 | if (i == nr_irqs) | ||
| 139 | return show_other_interrupts(p, prec); | ||
| 140 | |||
| 141 | /* print header */ | ||
| 142 | if (i == 0) { | ||
| 143 | seq_printf(p, "%*s", prec + 8, ""); | ||
| 144 | for_each_online_cpu(j) | ||
| 145 | seq_printf(p, "CPU%-8d", j); | ||
| 146 | seq_putc(p, '\n'); | ||
| 147 | } | ||
| 148 | |||
| 149 | desc = irq_to_desc(i); | ||
| 150 | if (!desc) | ||
| 151 | return 0; | ||
| 152 | |||
| 153 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 154 | for_each_online_cpu(j) | ||
| 155 | any_count |= kstat_irqs_cpu(i, j); | ||
| 156 | action = desc->action; | ||
| 157 | if (!action && !any_count) | ||
| 158 | goto out; | ||
| 159 | |||
| 160 | seq_printf(p, "%*d: ", prec, i); | ||
| 161 | for_each_online_cpu(j) | ||
| 162 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
| 163 | seq_printf(p, " %8s", desc->irq_data.chip->name); | ||
| 164 | seq_printf(p, "-%-8s", desc->name); | ||
| 165 | |||
| 166 | if (action) { | ||
| 167 | seq_printf(p, " %s", action->name); | ||
| 168 | while ((action = action->next) != NULL) | ||
| 169 | seq_printf(p, ", %s", action->name); | ||
| 170 | } | ||
| 171 | |||
| 172 | seq_putc(p, '\n'); | ||
| 173 | out: | ||
| 174 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 175 | return 0; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* | 125 | /* |
| 179 | * /proc/stat helpers | 126 | * /proc/stat helpers |
| 180 | */ | 127 | */ |
| @@ -293,6 +240,7 @@ void fixup_irqs(void) | |||
| 293 | static int warned; | 240 | static int warned; |
| 294 | struct irq_desc *desc; | 241 | struct irq_desc *desc; |
| 295 | struct irq_data *data; | 242 | struct irq_data *data; |
| 243 | struct irq_chip *chip; | ||
| 296 | 244 | ||
| 297 | for_each_irq_desc(irq, desc) { | 245 | for_each_irq_desc(irq, desc) { |
| 298 | int break_affinity = 0; | 246 | int break_affinity = 0; |
| @@ -307,10 +255,10 @@ void fixup_irqs(void) | |||
| 307 | /* interrupt's are disabled at this point */ | 255 | /* interrupt's are disabled at this point */ |
| 308 | raw_spin_lock(&desc->lock); | 256 | raw_spin_lock(&desc->lock); |
| 309 | 257 | ||
| 310 | data = &desc->irq_data; | 258 | data = irq_desc_get_irq_data(desc); |
| 311 | affinity = data->affinity; | 259 | affinity = data->affinity; |
| 312 | if (!irq_has_action(irq) || | 260 | if (!irq_has_action(irq) || |
| 313 | cpumask_equal(affinity, cpu_online_mask)) { | 261 | cpumask_subset(affinity, cpu_online_mask)) { |
| 314 | raw_spin_unlock(&desc->lock); | 262 | raw_spin_unlock(&desc->lock); |
| 315 | continue; | 263 | continue; |
| 316 | } | 264 | } |
| @@ -327,16 +275,17 @@ void fixup_irqs(void) | |||
| 327 | affinity = cpu_all_mask; | 275 | affinity = cpu_all_mask; |
| 328 | } | 276 | } |
| 329 | 277 | ||
| 330 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) | 278 | chip = irq_data_get_irq_chip(data); |
| 331 | data->chip->irq_mask(data); | 279 | if (!irqd_can_move_in_process_context(data) && chip->irq_mask) |
| 280 | chip->irq_mask(data); | ||
| 332 | 281 | ||
| 333 | if (data->chip->irq_set_affinity) | 282 | if (chip->irq_set_affinity) |
| 334 | data->chip->irq_set_affinity(data, affinity, true); | 283 | chip->irq_set_affinity(data, affinity, true); |
| 335 | else if (!(warned++)) | 284 | else if (!(warned++)) |
| 336 | set_affinity = 0; | 285 | set_affinity = 0; |
| 337 | 286 | ||
| 338 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) | 287 | if (!irqd_can_move_in_process_context(data) && chip->irq_unmask) |
| 339 | data->chip->irq_unmask(data); | 288 | chip->irq_unmask(data); |
| 340 | 289 | ||
| 341 | raw_spin_unlock(&desc->lock); | 290 | raw_spin_unlock(&desc->lock); |
| 342 | 291 | ||
| @@ -368,10 +317,11 @@ void fixup_irqs(void) | |||
| 368 | irq = __this_cpu_read(vector_irq[vector]); | 317 | irq = __this_cpu_read(vector_irq[vector]); |
| 369 | 318 | ||
| 370 | desc = irq_to_desc(irq); | 319 | desc = irq_to_desc(irq); |
| 371 | data = &desc->irq_data; | 320 | data = irq_desc_get_irq_data(desc); |
| 321 | chip = irq_data_get_irq_chip(data); | ||
| 372 | raw_spin_lock(&desc->lock); | 322 | raw_spin_lock(&desc->lock); |
| 373 | if (data->chip->irq_retrigger) | 323 | if (chip->irq_retrigger) |
| 374 | data->chip->irq_retrigger(data); | 324 | chip->irq_retrigger(data); |
| 375 | raw_spin_unlock(&desc->lock); | 325 | raw_spin_unlock(&desc->lock); |
| 376 | } | 326 | } |
| 377 | } | 327 | } |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index c752e973958d..1cc302d16fb4 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
| @@ -71,6 +71,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) | |||
| 71 | static struct irqaction fpu_irq = { | 71 | static struct irqaction fpu_irq = { |
| 72 | .handler = math_error_irq, | 72 | .handler = math_error_irq, |
| 73 | .name = "fpu", | 73 | .name = "fpu", |
| 74 | .flags = IRQF_NO_THREAD, | ||
| 74 | }; | 75 | }; |
| 75 | #endif | 76 | #endif |
| 76 | 77 | ||
| @@ -80,6 +81,7 @@ static struct irqaction fpu_irq = { | |||
| 80 | static struct irqaction irq2 = { | 81 | static struct irqaction irq2 = { |
| 81 | .handler = no_action, | 82 | .handler = no_action, |
| 82 | .name = "cascade", | 83 | .name = "cascade", |
| 84 | .flags = IRQF_NO_THREAD, | ||
| 83 | }; | 85 | }; |
| 84 | 86 | ||
| 85 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 87 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
| @@ -110,7 +112,7 @@ void __init init_ISA_irqs(void) | |||
| 110 | legacy_pic->init(0); | 112 | legacy_pic->init(0); |
| 111 | 113 | ||
| 112 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) | 114 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) |
| 113 | set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); | 115 | irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); |
| 114 | } | 116 | } |
| 115 | 117 | ||
| 116 | void __init init_IRQ(void) | 118 | void __init init_IRQ(void) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e2865df242bc..16ce42613991 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -64,6 +64,7 @@ | |||
| 64 | #include <asm/mtrr.h> | 64 | #include <asm/mtrr.h> |
| 65 | #include <asm/mwait.h> | 65 | #include <asm/mwait.h> |
| 66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
| 67 | #include <asm/io_apic.h> | ||
| 67 | #include <asm/setup.h> | 68 | #include <asm/setup.h> |
| 68 | #include <asm/uv/uv.h> | 69 | #include <asm/uv/uv.h> |
| 69 | #include <linux/mc146818rtc.h> | 70 | #include <linux/mc146818rtc.h> |
| @@ -927,6 +928,14 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
| 927 | return 0; | 928 | return 0; |
| 928 | } | 929 | } |
| 929 | 930 | ||
| 931 | /** | ||
| 932 | * arch_disable_smp_support() - disables SMP support for x86 at runtime | ||
| 933 | */ | ||
| 934 | void arch_disable_smp_support(void) | ||
| 935 | { | ||
| 936 | disable_ioapic_support(); | ||
| 937 | } | ||
| 938 | |||
| 930 | /* | 939 | /* |
| 931 | * Fall back to non SMP mode after errors. | 940 | * Fall back to non SMP mode after errors. |
| 932 | * | 941 | * |
| @@ -1027,7 +1036,7 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
| 1027 | "(tell your hw vendor)\n"); | 1036 | "(tell your hw vendor)\n"); |
| 1028 | } | 1037 | } |
| 1029 | smpboot_clear_io_apic(); | 1038 | smpboot_clear_io_apic(); |
| 1030 | arch_disable_smp_support(); | 1039 | disable_ioapic_support(); |
| 1031 | return -1; | 1040 | return -1; |
| 1032 | } | 1041 | } |
| 1033 | 1042 | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index eba687f0cc0c..b9ec1c74943c 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
| @@ -847,7 +847,7 @@ static void __init lguest_init_IRQ(void) | |||
| 847 | void lguest_setup_irq(unsigned int irq) | 847 | void lguest_setup_irq(unsigned int irq) |
| 848 | { | 848 | { |
| 849 | irq_alloc_desc_at(irq, 0); | 849 | irq_alloc_desc_at(irq, 0); |
| 850 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, | 850 | irq_set_chip_and_handler_name(irq, &lguest_irq_controller, |
| 851 | handle_level_irq, "level"); | 851 | handle_level_irq, "level"); |
| 852 | } | 852 | } |
| 853 | 853 | ||
| @@ -995,7 +995,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) | |||
| 995 | static void lguest_time_init(void) | 995 | static void lguest_time_init(void) |
| 996 | { | 996 | { |
| 997 | /* Set up the timer interrupt (0) to go to our simple timer routine */ | 997 | /* Set up the timer interrupt (0) to go to our simple timer routine */ |
| 998 | set_irq_handler(0, lguest_time_irq); | 998 | irq_set_handler(0, lguest_time_irq); |
| 999 | 999 | ||
| 1000 | clocksource_register(&lguest_clock); | 1000 | clocksource_register(&lguest_clock); |
| 1001 | 1001 | ||
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index 7b24460917d5..374a05d8ad22 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c | |||
| @@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
| 131 | unsigned long mmr_offset, int limit) | 131 | unsigned long mmr_offset, int limit) |
| 132 | { | 132 | { |
| 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
| 134 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 134 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
| 135 | unsigned long mmr_value; | 135 | unsigned long mmr_value; |
| 136 | struct uv_IO_APIC_route_entry *entry; | 136 | struct uv_IO_APIC_route_entry *entry; |
| 137 | int mmr_pnode, err; | 137 | int mmr_pnode, err; |
| @@ -148,7 +148,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
| 148 | else | 148 | else |
| 149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
| 150 | 150 | ||
| 151 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | 151 | irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, |
| 152 | irq_name); | 152 | irq_name); |
| 153 | 153 | ||
| 154 | mmr_value = 0; | 154 | mmr_value = 0; |
diff --git a/arch/x86/platform/visws/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c index 632037671746..fe4cf8294878 100644 --- a/arch/x86/platform/visws/visws_quirks.c +++ b/arch/x86/platform/visws/visws_quirks.c | |||
| @@ -569,11 +569,13 @@ out_unlock: | |||
| 569 | static struct irqaction master_action = { | 569 | static struct irqaction master_action = { |
| 570 | .handler = piix4_master_intr, | 570 | .handler = piix4_master_intr, |
| 571 | .name = "PIIX4-8259", | 571 | .name = "PIIX4-8259", |
| 572 | .flags = IRQF_NO_THREAD, | ||
| 572 | }; | 573 | }; |
| 573 | 574 | ||
| 574 | static struct irqaction cascade_action = { | 575 | static struct irqaction cascade_action = { |
| 575 | .handler = no_action, | 576 | .handler = no_action, |
| 576 | .name = "cascade", | 577 | .name = "cascade", |
| 578 | .flags = IRQF_NO_THREAD, | ||
| 577 | }; | 579 | }; |
| 578 | 580 | ||
| 579 | static inline void set_piix4_virtual_irq_type(void) | 581 | static inline void set_piix4_virtual_irq_type(void) |
| @@ -606,7 +608,7 @@ static void __init visws_pre_intr_init(void) | |||
| 606 | chip = &cobalt_irq_type; | 608 | chip = &cobalt_irq_type; |
| 607 | 609 | ||
| 608 | if (chip) | 610 | if (chip) |
| 609 | set_irq_chip(i, chip); | 611 | irq_set_chip(i, chip); |
| 610 | } | 612 | } |
| 611 | 613 | ||
| 612 | setup_irq(CO_IRQ_8259, &master_action); | 614 | setup_irq(CO_IRQ_8259, &master_action); |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 149fa875e396..0ad1699a1b3e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -674,7 +674,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
| 674 | 674 | ||
| 675 | irq_info[irq] = mk_pirq_info(0, pirq, 0, vector); | 675 | irq_info[irq] = mk_pirq_info(0, pirq, 0, vector); |
| 676 | pirq_to_irq[pirq] = irq; | 676 | pirq_to_irq[pirq] = irq; |
| 677 | ret = set_irq_msi(irq, msidesc); | 677 | ret = irq_set_msi_desc(irq, msidesc); |
| 678 | if (ret < 0) | 678 | if (ret < 0) |
| 679 | goto error_irq; | 679 | goto error_irq; |
| 680 | out: | 680 | out: |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2eb16e03422f..59b72ca1c5d1 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
| 15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
| 16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
| 17 | #include <linux/kref.h> | ||
| 18 | #include <linux/workqueue.h> | ||
| 17 | 19 | ||
| 18 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
| 19 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
| @@ -56,6 +58,7 @@ | |||
| 56 | * irq line disabled until the threaded handler has been run. | 58 | * irq line disabled until the threaded handler has been run. |
| 57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 59 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
| 58 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set | 60 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
| 61 | * IRQF_NO_THREAD - Interrupt cannot be threaded | ||
| 59 | */ | 62 | */ |
| 60 | #define IRQF_DISABLED 0x00000020 | 63 | #define IRQF_DISABLED 0x00000020 |
| 61 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 64 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
| @@ -68,22 +71,9 @@ | |||
| 68 | #define IRQF_ONESHOT 0x00002000 | 71 | #define IRQF_ONESHOT 0x00002000 |
| 69 | #define IRQF_NO_SUSPEND 0x00004000 | 72 | #define IRQF_NO_SUSPEND 0x00004000 |
| 70 | #define IRQF_FORCE_RESUME 0x00008000 | 73 | #define IRQF_FORCE_RESUME 0x00008000 |
| 74 | #define IRQF_NO_THREAD 0x00010000 | ||
| 71 | 75 | ||
| 72 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) | 76 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
| 73 | |||
| 74 | /* | ||
| 75 | * Bits used by threaded handlers: | ||
| 76 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
| 77 | * IRQTF_DIED - handler thread died | ||
| 78 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
| 79 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | ||
| 80 | */ | ||
| 81 | enum { | ||
| 82 | IRQTF_RUNTHREAD, | ||
| 83 | IRQTF_DIED, | ||
| 84 | IRQTF_WARNED, | ||
| 85 | IRQTF_AFFINITY, | ||
| 86 | }; | ||
| 87 | 77 | ||
| 88 | /* | 78 | /* |
| 89 | * These values can be returned by request_any_context_irq() and | 79 | * These values can be returned by request_any_context_irq() and |
| @@ -111,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
| 111 | * @thread_fn: interupt handler function for threaded interrupts | 101 | * @thread_fn: interupt handler function for threaded interrupts |
| 112 | * @thread: thread pointer for threaded interrupts | 102 | * @thread: thread pointer for threaded interrupts |
| 113 | * @thread_flags: flags related to @thread | 103 | * @thread_flags: flags related to @thread |
| 104 | * @thread_mask: bitmask for keeping track of @thread activity | ||
| 114 | */ | 105 | */ |
| 115 | struct irqaction { | 106 | struct irqaction { |
| 116 | irq_handler_t handler; | 107 | irq_handler_t handler; |
| @@ -121,6 +112,7 @@ struct irqaction { | |||
| 121 | irq_handler_t thread_fn; | 112 | irq_handler_t thread_fn; |
| 122 | struct task_struct *thread; | 113 | struct task_struct *thread; |
| 123 | unsigned long thread_flags; | 114 | unsigned long thread_flags; |
| 115 | unsigned long thread_mask; | ||
| 124 | const char *name; | 116 | const char *name; |
| 125 | struct proc_dir_entry *dir; | 117 | struct proc_dir_entry *dir; |
| 126 | } ____cacheline_internodealigned_in_smp; | 118 | } ____cacheline_internodealigned_in_smp; |
| @@ -241,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq); | |||
| 241 | extern int irq_select_affinity(unsigned int irq); | 233 | extern int irq_select_affinity(unsigned int irq); |
| 242 | 234 | ||
| 243 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | 235 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
| 236 | |||
| 237 | /** | ||
| 238 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | ||
| 239 | * @irq: Interrupt to which notification applies | ||
| 240 | * @kref: Reference count, for internal use | ||
| 241 | * @work: Work item, for internal use | ||
| 242 | * @notify: Function to be called on change. This will be | ||
| 243 | * called in process context. | ||
| 244 | * @release: Function to be called on release. This will be | ||
| 245 | * called in process context. Once registered, the | ||
| 246 | * structure must only be freed when this function is | ||
| 247 | * called or later. | ||
| 248 | */ | ||
| 249 | struct irq_affinity_notify { | ||
| 250 | unsigned int irq; | ||
| 251 | struct kref kref; | ||
| 252 | struct work_struct work; | ||
| 253 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | ||
| 254 | void (*release)(struct kref *ref); | ||
| 255 | }; | ||
| 256 | |||
| 257 | extern int | ||
| 258 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | ||
| 259 | |||
| 260 | static inline void irq_run_affinity_notifiers(void) | ||
| 261 | { | ||
| 262 | flush_scheduled_work(); | ||
| 263 | } | ||
| 264 | |||
| 244 | #else /* CONFIG_SMP */ | 265 | #else /* CONFIG_SMP */ |
| 245 | 266 | ||
| 246 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 267 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
| @@ -256,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq) | |||
| 256 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | 277 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
| 257 | 278 | ||
| 258 | static inline int irq_set_affinity_hint(unsigned int irq, | 279 | static inline int irq_set_affinity_hint(unsigned int irq, |
| 259 | const struct cpumask *m) | 280 | const struct cpumask *m) |
| 260 | { | 281 | { |
| 261 | return -EINVAL; | 282 | return -EINVAL; |
| 262 | } | 283 | } |
| @@ -315,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long | |||
| 315 | } | 336 | } |
| 316 | 337 | ||
| 317 | /* IRQ wakeup (PM) control: */ | 338 | /* IRQ wakeup (PM) control: */ |
| 318 | extern int set_irq_wake(unsigned int irq, unsigned int on); | 339 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
| 340 | |||
| 341 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 342 | /* Please do not use: Use the replacement functions instead */ | ||
| 343 | static inline int set_irq_wake(unsigned int irq, unsigned int on) | ||
| 344 | { | ||
| 345 | return irq_set_irq_wake(irq, on); | ||
| 346 | } | ||
| 347 | #endif | ||
| 319 | 348 | ||
| 320 | static inline int enable_irq_wake(unsigned int irq) | 349 | static inline int enable_irq_wake(unsigned int irq) |
| 321 | { | 350 | { |
| 322 | return set_irq_wake(irq, 1); | 351 | return irq_set_irq_wake(irq, 1); |
| 323 | } | 352 | } |
| 324 | 353 | ||
| 325 | static inline int disable_irq_wake(unsigned int irq) | 354 | static inline int disable_irq_wake(unsigned int irq) |
| 326 | { | 355 | { |
| 327 | return set_irq_wake(irq, 0); | 356 | return irq_set_irq_wake(irq, 0); |
| 328 | } | 357 | } |
| 329 | 358 | ||
| 330 | #else /* !CONFIG_GENERIC_HARDIRQS */ | 359 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
| @@ -354,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) | |||
| 354 | } | 383 | } |
| 355 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 384 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
| 356 | 385 | ||
| 386 | |||
| 387 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
| 388 | extern bool force_irqthreads; | ||
| 389 | #else | ||
| 390 | #define force_irqthreads (0) | ||
| 391 | #endif | ||
| 392 | |||
| 357 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 393 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
| 358 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 394 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
| 359 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 395 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
| @@ -653,6 +689,7 @@ static inline void init_irq_proc(void) | |||
| 653 | 689 | ||
| 654 | struct seq_file; | 690 | struct seq_file; |
| 655 | int show_interrupts(struct seq_file *p, void *v); | 691 | int show_interrupts(struct seq_file *p, void *v); |
| 692 | int arch_show_interrupts(struct seq_file *p, int prec); | ||
| 656 | 693 | ||
| 657 | extern int early_irq_init(void); | 694 | extern int early_irq_init(void); |
| 658 | extern int arch_probe_nr_irqs(void); | 695 | extern int arch_probe_nr_irqs(void); |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 80fcb53057bc..1d3577f30d45 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -29,61 +29,104 @@ | |||
| 29 | #include <asm/irq_regs.h> | 29 | #include <asm/irq_regs.h> |
| 30 | 30 | ||
| 31 | struct irq_desc; | 31 | struct irq_desc; |
| 32 | struct irq_data; | ||
| 32 | typedef void (*irq_flow_handler_t)(unsigned int irq, | 33 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
| 33 | struct irq_desc *desc); | 34 | struct irq_desc *desc); |
| 34 | 35 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | |
| 35 | 36 | ||
| 36 | /* | 37 | /* |
| 37 | * IRQ line status. | 38 | * IRQ line status. |
| 38 | * | 39 | * |
| 39 | * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h | 40 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
| 41 | * | ||
| 42 | * IRQ_TYPE_NONE - default, unspecified type | ||
| 43 | * IRQ_TYPE_EDGE_RISING - rising edge triggered | ||
| 44 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered | ||
| 45 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered | ||
| 46 | * IRQ_TYPE_LEVEL_HIGH - high level triggered | ||
| 47 | * IRQ_TYPE_LEVEL_LOW - low level triggered | ||
| 48 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits | ||
| 49 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits | ||
| 50 | * IRQ_TYPE_PROBE - Special flag for probing in progress | ||
| 51 | * | ||
| 52 | * Bits which can be modified via irq_set/clear/modify_status_flags() | ||
| 53 | * IRQ_LEVEL - Interrupt is level type. Will be also | ||
| 54 | * updated in the code when the above trigger | ||
| 55 | * bits are modified via set_irq_type() | ||
| 56 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | ||
| 57 | * it from affinity setting | ||
| 58 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | ||
| 59 | * IRQ_NOREQUEST - Interrupt cannot be requested via | ||
| 60 | * request_irq() | ||
| 61 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | ||
| 62 | * request/setup_irq() | ||
| 63 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | ||
| 64 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | ||
| 65 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | ||
| 66 | * | ||
| 67 | * Deprecated bits. They are kept updated as long as | ||
| 68 | * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits | ||
| 69 | * are internal state of the core code and if you really need to acces | ||
| 70 | * them then talk to the genirq maintainer instead of hacking | ||
| 71 | * something weird. | ||
| 40 | * | 72 | * |
| 41 | * IRQ types | ||
| 42 | */ | 73 | */ |
| 43 | #define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ | 74 | enum { |
| 44 | #define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ | 75 | IRQ_TYPE_NONE = 0x00000000, |
| 45 | #define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ | 76 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
| 46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) | 77 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
| 47 | #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ | 78 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
| 48 | #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ | 79 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
| 49 | #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ | 80 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
| 50 | #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ | 81 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
| 51 | 82 | IRQ_TYPE_SENSE_MASK = 0x0000000f, | |
| 52 | /* Internal flags */ | 83 | |
| 53 | #define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ | 84 | IRQ_TYPE_PROBE = 0x00000010, |
| 54 | #define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ | 85 | |
| 55 | #define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ | 86 | IRQ_LEVEL = (1 << 8), |
| 56 | #define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ | 87 | IRQ_PER_CPU = (1 << 9), |
| 57 | #define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ | 88 | IRQ_NOPROBE = (1 << 10), |
| 58 | #define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ | 89 | IRQ_NOREQUEST = (1 << 11), |
| 59 | #define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ | 90 | IRQ_NOAUTOEN = (1 << 12), |
| 60 | #define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ | 91 | IRQ_NO_BALANCING = (1 << 13), |
| 61 | #define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ | 92 | IRQ_MOVE_PCNTXT = (1 << 14), |
| 62 | #define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ | 93 | IRQ_NESTED_THREAD = (1 << 15), |
| 63 | #define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ | 94 | |
| 64 | #define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ | 95 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT |
| 65 | #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ | 96 | IRQ_INPROGRESS = (1 << 16), |
| 66 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ | 97 | IRQ_REPLAY = (1 << 17), |
| 67 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ | 98 | IRQ_WAITING = (1 << 18), |
| 68 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | 99 | IRQ_DISABLED = (1 << 19), |
| 69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | 100 | IRQ_PENDING = (1 << 20), |
| 70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ | 101 | IRQ_MASKED = (1 << 21), |
| 71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ | 102 | IRQ_MOVE_PENDING = (1 << 22), |
| 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 103 | IRQ_AFFINITY_SET = (1 << 23), |
| 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 104 | IRQ_WAKEUP = (1 << 24), |
| 105 | #endif | ||
| 106 | }; | ||
| 74 | 107 | ||
| 75 | #define IRQF_MODIFY_MASK \ | 108 | #define IRQF_MODIFY_MASK \ |
| 76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 109 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
| 77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 110 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
| 78 | IRQ_PER_CPU) | 111 | IRQ_PER_CPU | IRQ_NESTED_THREAD) |
| 79 | 112 | ||
| 80 | #ifdef CONFIG_IRQ_PER_CPU | 113 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
| 81 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 114 | |
| 82 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 115 | static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status) |
| 83 | #else | 116 | { |
| 84 | # define CHECK_IRQ_PER_CPU(var) 0 | 117 | return status & IRQ_PER_CPU; |
| 85 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 118 | } |
| 86 | #endif | 119 | |
| 120 | /* | ||
| 121 | * Return value for chip->irq_set_affinity() | ||
| 122 | * | ||
| 123 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | ||
| 124 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | ||
| 125 | */ | ||
| 126 | enum { | ||
| 127 | IRQ_SET_MASK_OK = 0, | ||
| 128 | IRQ_SET_MASK_OK_NOCOPY, | ||
| 129 | }; | ||
| 87 | 130 | ||
| 88 | struct msi_desc; | 131 | struct msi_desc; |
| 89 | 132 | ||
| @@ -91,6 +134,8 @@ struct msi_desc; | |||
| 91 | * struct irq_data - per irq and irq chip data passed down to chip functions | 134 | * struct irq_data - per irq and irq chip data passed down to chip functions |
| 92 | * @irq: interrupt number | 135 | * @irq: interrupt number |
| 93 | * @node: node index useful for balancing | 136 | * @node: node index useful for balancing |
| 137 | * @state_use_accessor: status information for irq chip functions. | ||
| 138 | * Use accessor functions to deal with it | ||
| 94 | * @chip: low level interrupt hardware access | 139 | * @chip: low level interrupt hardware access |
| 95 | * @handler_data: per-IRQ data for the irq_chip methods | 140 | * @handler_data: per-IRQ data for the irq_chip methods |
| 96 | * @chip_data: platform-specific per-chip private data for the chip | 141 | * @chip_data: platform-specific per-chip private data for the chip |
| @@ -105,6 +150,7 @@ struct msi_desc; | |||
| 105 | struct irq_data { | 150 | struct irq_data { |
| 106 | unsigned int irq; | 151 | unsigned int irq; |
| 107 | unsigned int node; | 152 | unsigned int node; |
| 153 | unsigned int state_use_accessors; | ||
| 108 | struct irq_chip *chip; | 154 | struct irq_chip *chip; |
| 109 | void *handler_data; | 155 | void *handler_data; |
| 110 | void *chip_data; | 156 | void *chip_data; |
| @@ -114,6 +160,80 @@ struct irq_data { | |||
| 114 | #endif | 160 | #endif |
| 115 | }; | 161 | }; |
| 116 | 162 | ||
| 163 | /* | ||
| 164 | * Bit masks for irq_data.state | ||
| 165 | * | ||
| 166 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | ||
| 167 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | ||
| 168 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | ||
| 169 | * IRQD_PER_CPU - Interrupt is per cpu | ||
| 170 | * IRQD_AFFINITY_SET - Interrupt affinity was set | ||
| 171 | * IRQD_LEVEL - Interrupt is level triggered | ||
| 172 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup | ||
| 173 | * from suspend | ||
| 174 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | ||
| 175 | * context | ||
| 176 | */ | ||
| 177 | enum { | ||
| 178 | IRQD_TRIGGER_MASK = 0xf, | ||
| 179 | IRQD_SETAFFINITY_PENDING = (1 << 8), | ||
| 180 | IRQD_NO_BALANCING = (1 << 10), | ||
| 181 | IRQD_PER_CPU = (1 << 11), | ||
| 182 | IRQD_AFFINITY_SET = (1 << 12), | ||
| 183 | IRQD_LEVEL = (1 << 13), | ||
| 184 | IRQD_WAKEUP_STATE = (1 << 14), | ||
| 185 | IRQD_MOVE_PCNTXT = (1 << 15), | ||
| 186 | }; | ||
| 187 | |||
| 188 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | ||
| 189 | { | ||
| 190 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | ||
| 191 | } | ||
| 192 | |||
| 193 | static inline bool irqd_is_per_cpu(struct irq_data *d) | ||
| 194 | { | ||
| 195 | return d->state_use_accessors & IRQD_PER_CPU; | ||
| 196 | } | ||
| 197 | |||
| 198 | static inline bool irqd_can_balance(struct irq_data *d) | ||
| 199 | { | ||
| 200 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | ||
| 201 | } | ||
| 202 | |||
| 203 | static inline bool irqd_affinity_was_set(struct irq_data *d) | ||
| 204 | { | ||
| 205 | return d->state_use_accessors & IRQD_AFFINITY_SET; | ||
| 206 | } | ||
| 207 | |||
| 208 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | ||
| 209 | { | ||
| 210 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | ||
| 211 | } | ||
| 212 | |||
| 213 | /* | ||
| 214 | * Must only be called inside irq_chip.irq_set_type() functions. | ||
| 215 | */ | ||
| 216 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | ||
| 217 | { | ||
| 218 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | ||
| 219 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | ||
| 220 | } | ||
| 221 | |||
| 222 | static inline bool irqd_is_level_type(struct irq_data *d) | ||
| 223 | { | ||
| 224 | return d->state_use_accessors & IRQD_LEVEL; | ||
| 225 | } | ||
| 226 | |||
| 227 | static inline bool irqd_is_wakeup_set(struct irq_data *d) | ||
| 228 | { | ||
| 229 | return d->state_use_accessors & IRQD_WAKEUP_STATE; | ||
| 230 | } | ||
| 231 | |||
| 232 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | ||
| 233 | { | ||
| 234 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | ||
| 235 | } | ||
| 236 | |||
| 117 | /** | 237 | /** |
| 118 | * struct irq_chip - hardware interrupt chip descriptor | 238 | * struct irq_chip - hardware interrupt chip descriptor |
| 119 | * | 239 | * |
| @@ -150,6 +270,7 @@ struct irq_data { | |||
| 150 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | 270 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
| 151 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | 271 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
| 152 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 272 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
| 273 | * @flags: chip specific flags | ||
| 153 | * | 274 | * |
| 154 | * @release: release function solely used by UML | 275 | * @release: release function solely used by UML |
| 155 | */ | 276 | */ |
| @@ -196,12 +317,27 @@ struct irq_chip { | |||
| 196 | void (*irq_bus_lock)(struct irq_data *data); | 317 | void (*irq_bus_lock)(struct irq_data *data); |
| 197 | void (*irq_bus_sync_unlock)(struct irq_data *data); | 318 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
| 198 | 319 | ||
| 320 | unsigned long flags; | ||
| 321 | |||
| 199 | /* Currently used only by UML, might disappear one day.*/ | 322 | /* Currently used only by UML, might disappear one day.*/ |
| 200 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 323 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 201 | void (*release)(unsigned int irq, void *dev_id); | 324 | void (*release)(unsigned int irq, void *dev_id); |
| 202 | #endif | 325 | #endif |
| 203 | }; | 326 | }; |
| 204 | 327 | ||
| 328 | /* | ||
| 329 | * irq_chip specific flags | ||
| 330 | * | ||
| 331 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() | ||
| 332 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled | ||
| 333 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path | ||
| 334 | */ | ||
| 335 | enum { | ||
| 336 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | ||
| 337 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | ||
| 338 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | ||
| 339 | }; | ||
| 340 | |||
| 205 | /* This include will go away once we isolated irq_desc usage to core code */ | 341 | /* This include will go away once we isolated irq_desc usage to core code */ |
| 206 | #include <linux/irqdesc.h> | 342 | #include <linux/irqdesc.h> |
| 207 | 343 | ||
| @@ -218,7 +354,7 @@ struct irq_chip { | |||
| 218 | # define ARCH_IRQ_INIT_FLAGS 0 | 354 | # define ARCH_IRQ_INIT_FLAGS 0 |
| 219 | #endif | 355 | #endif |
| 220 | 356 | ||
| 221 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) | 357 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
| 222 | 358 | ||
| 223 | struct irqaction; | 359 | struct irqaction; |
| 224 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 360 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
| @@ -229,9 +365,13 @@ extern void remove_irq(unsigned int irq, struct irqaction *act); | |||
| 229 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 365 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
| 230 | void move_native_irq(int irq); | 366 | void move_native_irq(int irq); |
| 231 | void move_masked_irq(int irq); | 367 | void move_masked_irq(int irq); |
| 368 | void irq_move_irq(struct irq_data *data); | ||
| 369 | void irq_move_masked_irq(struct irq_data *data); | ||
| 232 | #else | 370 | #else |
| 233 | static inline void move_native_irq(int irq) { } | 371 | static inline void move_native_irq(int irq) { } |
| 234 | static inline void move_masked_irq(int irq) { } | 372 | static inline void move_masked_irq(int irq) { } |
| 373 | static inline void irq_move_irq(struct irq_data *data) { } | ||
| 374 | static inline void irq_move_masked_irq(struct irq_data *data) { } | ||
| 235 | #endif | 375 | #endif |
| 236 | 376 | ||
| 237 | extern int no_irq_affinity; | 377 | extern int no_irq_affinity; |
| @@ -267,23 +407,23 @@ extern struct irq_chip no_irq_chip; | |||
| 267 | extern struct irq_chip dummy_irq_chip; | 407 | extern struct irq_chip dummy_irq_chip; |
| 268 | 408 | ||
| 269 | extern void | 409 | extern void |
| 270 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 410 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
| 271 | irq_flow_handler_t handle); | ||
| 272 | extern void | ||
| 273 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | ||
| 274 | irq_flow_handler_t handle, const char *name); | 411 | irq_flow_handler_t handle, const char *name); |
| 275 | 412 | ||
| 413 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
| 414 | irq_flow_handler_t handle) | ||
| 415 | { | ||
| 416 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); | ||
| 417 | } | ||
| 418 | |||
| 276 | extern void | 419 | extern void |
| 277 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 420 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
| 278 | const char *name); | 421 | const char *name); |
| 279 | 422 | ||
| 280 | /* | ||
| 281 | * Set a highlevel flow handler for a given IRQ: | ||
| 282 | */ | ||
| 283 | static inline void | 423 | static inline void |
| 284 | set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | 424 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
| 285 | { | 425 | { |
| 286 | __set_irq_handler(irq, handle, 0, NULL); | 426 | __irq_set_handler(irq, handle, 0, NULL); |
| 287 | } | 427 | } |
| 288 | 428 | ||
| 289 | /* | 429 | /* |
| @@ -292,14 +432,11 @@ set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | |||
| 292 | * IRQ_NOREQUEST and IRQ_NOPROBE) | 432 | * IRQ_NOREQUEST and IRQ_NOPROBE) |
| 293 | */ | 433 | */ |
| 294 | static inline void | 434 | static inline void |
| 295 | set_irq_chained_handler(unsigned int irq, | 435 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
| 296 | irq_flow_handler_t handle) | ||
| 297 | { | 436 | { |
| 298 | __set_irq_handler(irq, handle, 1, NULL); | 437 | __irq_set_handler(irq, handle, 1, NULL); |
| 299 | } | 438 | } |
| 300 | 439 | ||
| 301 | extern void set_irq_nested_thread(unsigned int irq, int nest); | ||
| 302 | |||
| 303 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | 440 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
| 304 | 441 | ||
| 305 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | 442 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
| @@ -312,16 +449,24 @@ static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | |||
| 312 | irq_modify_status(irq, clr, 0); | 449 | irq_modify_status(irq, clr, 0); |
| 313 | } | 450 | } |
| 314 | 451 | ||
| 315 | static inline void set_irq_noprobe(unsigned int irq) | 452 | static inline void irq_set_noprobe(unsigned int irq) |
| 316 | { | 453 | { |
| 317 | irq_modify_status(irq, 0, IRQ_NOPROBE); | 454 | irq_modify_status(irq, 0, IRQ_NOPROBE); |
| 318 | } | 455 | } |
| 319 | 456 | ||
| 320 | static inline void set_irq_probe(unsigned int irq) | 457 | static inline void irq_set_probe(unsigned int irq) |
| 321 | { | 458 | { |
| 322 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 459 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
| 323 | } | 460 | } |
| 324 | 461 | ||
| 462 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | ||
| 463 | { | ||
| 464 | if (nest) | ||
| 465 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); | ||
| 466 | else | ||
| 467 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | ||
| 468 | } | ||
| 469 | |||
| 325 | /* Handle dynamic irq creation and destruction */ | 470 | /* Handle dynamic irq creation and destruction */ |
| 326 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 471 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
| 327 | extern int create_irq(void); | 472 | extern int create_irq(void); |
| @@ -338,14 +483,14 @@ static inline void dynamic_irq_init(unsigned int irq) | |||
| 338 | } | 483 | } |
| 339 | 484 | ||
| 340 | /* Set/get chip/data for an IRQ: */ | 485 | /* Set/get chip/data for an IRQ: */ |
| 341 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 486 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); |
| 342 | extern int set_irq_data(unsigned int irq, void *data); | 487 | extern int irq_set_handler_data(unsigned int irq, void *data); |
| 343 | extern int set_irq_chip_data(unsigned int irq, void *data); | 488 | extern int irq_set_chip_data(unsigned int irq, void *data); |
| 344 | extern int set_irq_type(unsigned int irq, unsigned int type); | 489 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
| 345 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 490 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
| 346 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | 491 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
| 347 | 492 | ||
| 348 | static inline struct irq_chip *get_irq_chip(unsigned int irq) | 493 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
| 349 | { | 494 | { |
| 350 | struct irq_data *d = irq_get_irq_data(irq); | 495 | struct irq_data *d = irq_get_irq_data(irq); |
| 351 | return d ? d->chip : NULL; | 496 | return d ? d->chip : NULL; |
| @@ -356,7 +501,7 @@ static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | |||
| 356 | return d->chip; | 501 | return d->chip; |
| 357 | } | 502 | } |
| 358 | 503 | ||
| 359 | static inline void *get_irq_chip_data(unsigned int irq) | 504 | static inline void *irq_get_chip_data(unsigned int irq) |
| 360 | { | 505 | { |
| 361 | struct irq_data *d = irq_get_irq_data(irq); | 506 | struct irq_data *d = irq_get_irq_data(irq); |
| 362 | return d ? d->chip_data : NULL; | 507 | return d ? d->chip_data : NULL; |
| @@ -367,18 +512,18 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | |||
| 367 | return d->chip_data; | 512 | return d->chip_data; |
| 368 | } | 513 | } |
| 369 | 514 | ||
| 370 | static inline void *get_irq_data(unsigned int irq) | 515 | static inline void *irq_get_handler_data(unsigned int irq) |
| 371 | { | 516 | { |
| 372 | struct irq_data *d = irq_get_irq_data(irq); | 517 | struct irq_data *d = irq_get_irq_data(irq); |
| 373 | return d ? d->handler_data : NULL; | 518 | return d ? d->handler_data : NULL; |
| 374 | } | 519 | } |
| 375 | 520 | ||
| 376 | static inline void *irq_data_get_irq_data(struct irq_data *d) | 521 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
| 377 | { | 522 | { |
| 378 | return d->handler_data; | 523 | return d->handler_data; |
| 379 | } | 524 | } |
| 380 | 525 | ||
| 381 | static inline struct msi_desc *get_irq_msi(unsigned int irq) | 526 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
| 382 | { | 527 | { |
| 383 | struct irq_data *d = irq_get_irq_data(irq); | 528 | struct irq_data *d = irq_get_irq_data(irq); |
| 384 | return d ? d->msi_desc : NULL; | 529 | return d ? d->msi_desc : NULL; |
| @@ -389,6 +534,89 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | |||
| 389 | return d->msi_desc; | 534 | return d->msi_desc; |
| 390 | } | 535 | } |
| 391 | 536 | ||
| 537 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 538 | /* Please do not use: Use the replacement functions instead */ | ||
| 539 | static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip) | ||
| 540 | { | ||
| 541 | return irq_set_chip(irq, chip); | ||
| 542 | } | ||
| 543 | static inline int set_irq_data(unsigned int irq, void *data) | ||
| 544 | { | ||
| 545 | return irq_set_handler_data(irq, data); | ||
| 546 | } | ||
| 547 | static inline int set_irq_chip_data(unsigned int irq, void *data) | ||
| 548 | { | ||
| 549 | return irq_set_chip_data(irq, data); | ||
| 550 | } | ||
| 551 | static inline int set_irq_type(unsigned int irq, unsigned int type) | ||
| 552 | { | ||
| 553 | return irq_set_irq_type(irq, type); | ||
| 554 | } | ||
| 555 | static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry) | ||
| 556 | { | ||
| 557 | return irq_set_msi_desc(irq, entry); | ||
| 558 | } | ||
| 559 | static inline struct irq_chip *get_irq_chip(unsigned int irq) | ||
| 560 | { | ||
| 561 | return irq_get_chip(irq); | ||
| 562 | } | ||
| 563 | static inline void *get_irq_chip_data(unsigned int irq) | ||
| 564 | { | ||
| 565 | return irq_get_chip_data(irq); | ||
| 566 | } | ||
| 567 | static inline void *get_irq_data(unsigned int irq) | ||
| 568 | { | ||
| 569 | return irq_get_handler_data(irq); | ||
| 570 | } | ||
| 571 | static inline void *irq_data_get_irq_data(struct irq_data *d) | ||
| 572 | { | ||
| 573 | return irq_data_get_irq_handler_data(d); | ||
| 574 | } | ||
| 575 | static inline struct msi_desc *get_irq_msi(unsigned int irq) | ||
| 576 | { | ||
| 577 | return irq_get_msi_desc(irq); | ||
| 578 | } | ||
| 579 | static inline void set_irq_noprobe(unsigned int irq) | ||
| 580 | { | ||
| 581 | irq_set_noprobe(irq); | ||
| 582 | } | ||
| 583 | static inline void set_irq_probe(unsigned int irq) | ||
| 584 | { | ||
| 585 | irq_set_probe(irq); | ||
| 586 | } | ||
| 587 | static inline void set_irq_nested_thread(unsigned int irq, int nest) | ||
| 588 | { | ||
| 589 | irq_set_nested_thread(irq, nest); | ||
| 590 | } | ||
| 591 | static inline void | ||
| 592 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | ||
| 593 | irq_flow_handler_t handle, const char *name) | ||
| 594 | { | ||
| 595 | irq_set_chip_and_handler_name(irq, chip, handle, name); | ||
| 596 | } | ||
| 597 | static inline void | ||
| 598 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
| 599 | irq_flow_handler_t handle) | ||
| 600 | { | ||
| 601 | irq_set_chip_and_handler(irq, chip, handle); | ||
| 602 | } | ||
| 603 | static inline void | ||
| 604 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | ||
| 605 | const char *name) | ||
| 606 | { | ||
| 607 | __irq_set_handler(irq, handle, is_chained, name); | ||
| 608 | } | ||
| 609 | static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | ||
| 610 | { | ||
| 611 | irq_set_handler(irq, handle); | ||
| 612 | } | ||
| 613 | static inline void | ||
| 614 | set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle) | ||
| 615 | { | ||
| 616 | irq_set_chained_handler(irq, handle); | ||
| 617 | } | ||
| 618 | #endif | ||
| 619 | |||
| 392 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); | 620 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
| 393 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 621 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
| 394 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 622 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c1a95b7b58de..00218371518b 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * For now it's included from <linux/irq.h> | 8 | * For now it's included from <linux/irq.h> |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | struct irq_affinity_notify; | ||
| 11 | struct proc_dir_entry; | 12 | struct proc_dir_entry; |
| 12 | struct timer_rand_state; | 13 | struct timer_rand_state; |
| 13 | /** | 14 | /** |
| @@ -18,13 +19,16 @@ struct timer_rand_state; | |||
| 18 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | 19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] |
| 19 | * @action: the irq action chain | 20 | * @action: the irq action chain |
| 20 | * @status: status information | 21 | * @status: status information |
| 22 | * @core_internal_state__do_not_mess_with_it: core internal status information | ||
| 21 | * @depth: disable-depth, for nested irq_disable() calls | 23 | * @depth: disable-depth, for nested irq_disable() calls |
| 22 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | 24 | * @wake_depth: enable depth, for multiple set_irq_wake() callers |
| 23 | * @irq_count: stats field to detect stalled irqs | 25 | * @irq_count: stats field to detect stalled irqs |
| 24 | * @last_unhandled: aging timer for unhandled count | 26 | * @last_unhandled: aging timer for unhandled count |
| 25 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 27 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
| 26 | * @lock: locking for SMP | 28 | * @lock: locking for SMP |
| 29 | * @affinity_notify: context for notification of affinity changes | ||
| 27 | * @pending_mask: pending rebalanced interrupts | 30 | * @pending_mask: pending rebalanced interrupts |
| 31 | * @threads_oneshot: bitfield to handle shared oneshot threads | ||
| 28 | * @threads_active: number of irqaction threads currently running | 32 | * @threads_active: number of irqaction threads currently running |
| 29 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | 33 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
| 30 | * @dir: /proc/irq/ procfs entry | 34 | * @dir: /proc/irq/ procfs entry |
| @@ -45,6 +49,7 @@ struct irq_desc { | |||
| 45 | struct { | 49 | struct { |
| 46 | unsigned int irq; | 50 | unsigned int irq; |
| 47 | unsigned int node; | 51 | unsigned int node; |
| 52 | unsigned int pad_do_not_even_think_about_it; | ||
| 48 | struct irq_chip *chip; | 53 | struct irq_chip *chip; |
| 49 | void *handler_data; | 54 | void *handler_data; |
| 50 | void *chip_data; | 55 | void *chip_data; |
| @@ -59,9 +64,16 @@ struct irq_desc { | |||
| 59 | struct timer_rand_state *timer_rand_state; | 64 | struct timer_rand_state *timer_rand_state; |
| 60 | unsigned int __percpu *kstat_irqs; | 65 | unsigned int __percpu *kstat_irqs; |
| 61 | irq_flow_handler_t handle_irq; | 66 | irq_flow_handler_t handle_irq; |
| 67 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
| 68 | irq_preflow_handler_t preflow_handler; | ||
| 69 | #endif | ||
| 62 | struct irqaction *action; /* IRQ action list */ | 70 | struct irqaction *action; /* IRQ action list */ |
| 71 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 72 | unsigned int status_use_accessors; | ||
| 73 | #else | ||
| 63 | unsigned int status; /* IRQ status */ | 74 | unsigned int status; /* IRQ status */ |
| 64 | 75 | #endif | |
| 76 | unsigned int core_internal_state__do_not_mess_with_it; | ||
| 65 | unsigned int depth; /* nested irq disables */ | 77 | unsigned int depth; /* nested irq disables */ |
| 66 | unsigned int wake_depth; /* nested wake enables */ | 78 | unsigned int wake_depth; /* nested wake enables */ |
| 67 | unsigned int irq_count; /* For detecting broken IRQs */ | 79 | unsigned int irq_count; /* For detecting broken IRQs */ |
| @@ -70,10 +82,12 @@ struct irq_desc { | |||
| 70 | raw_spinlock_t lock; | 82 | raw_spinlock_t lock; |
| 71 | #ifdef CONFIG_SMP | 83 | #ifdef CONFIG_SMP |
| 72 | const struct cpumask *affinity_hint; | 84 | const struct cpumask *affinity_hint; |
| 85 | struct irq_affinity_notify *affinity_notify; | ||
| 73 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 86 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 74 | cpumask_var_t pending_mask; | 87 | cpumask_var_t pending_mask; |
| 75 | #endif | 88 | #endif |
| 76 | #endif | 89 | #endif |
| 90 | unsigned long threads_oneshot; | ||
| 77 | atomic_t threads_active; | 91 | atomic_t threads_active; |
| 78 | wait_queue_head_t wait_for_threads; | 92 | wait_queue_head_t wait_for_threads; |
| 79 | #ifdef CONFIG_PROC_FS | 93 | #ifdef CONFIG_PROC_FS |
| @@ -95,10 +109,51 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | |||
| 95 | 109 | ||
| 96 | #ifdef CONFIG_GENERIC_HARDIRQS | 110 | #ifdef CONFIG_GENERIC_HARDIRQS |
| 97 | 111 | ||
| 98 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | 112 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) |
| 99 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | 113 | { |
| 100 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | 114 | return &desc->irq_data; |
| 101 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | 115 | } |
| 116 | |||
| 117 | static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) | ||
| 118 | { | ||
| 119 | return desc->irq_data.chip; | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | ||
| 123 | { | ||
| 124 | return desc->irq_data.chip_data; | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | ||
| 128 | { | ||
| 129 | return desc->irq_data.handler_data; | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | ||
| 133 | { | ||
| 134 | return desc->irq_data.msi_desc; | ||
| 135 | } | ||
| 136 | |||
| 137 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 138 | static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) | ||
| 139 | { | ||
| 140 | return irq_desc_get_chip(desc); | ||
| 141 | } | ||
| 142 | static inline void *get_irq_desc_data(struct irq_desc *desc) | ||
| 143 | { | ||
| 144 | return irq_desc_get_handler_data(desc); | ||
| 145 | } | ||
| 146 | |||
| 147 | static inline void *get_irq_desc_chip_data(struct irq_desc *desc) | ||
| 148 | { | ||
| 149 | return irq_desc_get_chip_data(desc); | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) | ||
| 153 | { | ||
| 154 | return irq_desc_get_msi_desc(desc); | ||
| 155 | } | ||
| 156 | #endif | ||
| 102 | 157 | ||
| 103 | /* | 158 | /* |
| 104 | * Architectures call this to let the generic IRQ layer | 159 | * Architectures call this to let the generic IRQ layer |
| @@ -123,6 +178,7 @@ static inline int irq_has_action(unsigned int irq) | |||
| 123 | return desc->action != NULL; | 178 | return desc->action != NULL; |
| 124 | } | 179 | } |
| 125 | 180 | ||
| 181 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 126 | static inline int irq_balancing_disabled(unsigned int irq) | 182 | static inline int irq_balancing_disabled(unsigned int irq) |
| 127 | { | 183 | { |
| 128 | struct irq_desc *desc; | 184 | struct irq_desc *desc; |
| @@ -130,6 +186,7 @@ static inline int irq_balancing_disabled(unsigned int irq) | |||
| 130 | desc = irq_to_desc(irq); | 186 | desc = irq_to_desc(irq); |
| 131 | return desc->status & IRQ_NO_BALANCING_MASK; | 187 | return desc->status & IRQ_NO_BALANCING_MASK; |
| 132 | } | 188 | } |
| 189 | #endif | ||
| 133 | 190 | ||
| 134 | /* caller has locked the irq_desc and both params are valid */ | 191 | /* caller has locked the irq_desc and both params are valid */ |
| 135 | static inline void __set_irq_handler_unlocked(int irq, | 192 | static inline void __set_irq_handler_unlocked(int irq, |
| @@ -140,6 +197,17 @@ static inline void __set_irq_handler_unlocked(int irq, | |||
| 140 | desc = irq_to_desc(irq); | 197 | desc = irq_to_desc(irq); |
| 141 | desc->handle_irq = handler; | 198 | desc->handle_irq = handler; |
| 142 | } | 199 | } |
| 200 | |||
| 201 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
| 202 | static inline void | ||
| 203 | __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) | ||
| 204 | { | ||
| 205 | struct irq_desc *desc; | ||
| 206 | |||
| 207 | desc = irq_to_desc(irq); | ||
| 208 | desc->preflow_handler = handler; | ||
| 209 | } | ||
| 210 | #endif | ||
| 143 | #endif | 211 | #endif |
| 144 | 212 | ||
| 145 | #endif | 213 | #endif |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 8e42fec7686d..09bef82d74cb 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | # Select this to activate the generic irq options below | ||
| 1 | config HAVE_GENERIC_HARDIRQS | 2 | config HAVE_GENERIC_HARDIRQS |
| 2 | def_bool n | 3 | bool |
| 3 | 4 | ||
| 4 | if HAVE_GENERIC_HARDIRQS | 5 | if HAVE_GENERIC_HARDIRQS |
| 5 | menu "IRQ subsystem" | 6 | menu "IRQ subsystem" |
| @@ -11,26 +12,44 @@ config GENERIC_HARDIRQS | |||
| 11 | 12 | ||
| 12 | # Select this to disable the deprecated stuff | 13 | # Select this to disable the deprecated stuff |
| 13 | config GENERIC_HARDIRQS_NO_DEPRECATED | 14 | config GENERIC_HARDIRQS_NO_DEPRECATED |
| 14 | def_bool n | 15 | bool |
| 16 | |||
| 17 | config GENERIC_HARDIRQS_NO_COMPAT | ||
| 18 | bool | ||
| 15 | 19 | ||
| 16 | # Options selectable by the architecture code | 20 | # Options selectable by the architecture code |
| 21 | |||
| 22 | # Make sparse irq Kconfig switch below available | ||
| 17 | config HAVE_SPARSE_IRQ | 23 | config HAVE_SPARSE_IRQ |
| 18 | def_bool n | 24 | bool |
| 19 | 25 | ||
| 26 | # Enable the generic irq autoprobe mechanism | ||
| 20 | config GENERIC_IRQ_PROBE | 27 | config GENERIC_IRQ_PROBE |
| 21 | def_bool n | 28 | bool |
| 29 | |||
| 30 | # Use the generic /proc/interrupts implementation | ||
| 31 | config GENERIC_IRQ_SHOW | ||
| 32 | bool | ||
| 22 | 33 | ||
| 34 | # Support for delayed migration from interrupt context | ||
| 23 | config GENERIC_PENDING_IRQ | 35 | config GENERIC_PENDING_IRQ |
| 24 | def_bool n | 36 | bool |
| 25 | 37 | ||
| 38 | # Alpha specific irq affinity mechanism | ||
| 26 | config AUTO_IRQ_AFFINITY | 39 | config AUTO_IRQ_AFFINITY |
| 27 | def_bool n | 40 | bool |
| 28 | |||
| 29 | config IRQ_PER_CPU | ||
| 30 | def_bool n | ||
| 31 | 41 | ||
| 42 | # Tasklet based software resend for pending interrupts on enable_irq() | ||
| 32 | config HARDIRQS_SW_RESEND | 43 | config HARDIRQS_SW_RESEND |
| 33 | def_bool n | 44 | bool |
| 45 | |||
| 46 | # Preflow handler support for fasteoi (sparc64) | ||
| 47 | config IRQ_PREFLOW_FASTEOI | ||
| 48 | bool | ||
| 49 | |||
| 50 | # Support forced irq threading | ||
| 51 | config IRQ_FORCED_THREADING | ||
| 52 | bool | ||
| 34 | 53 | ||
| 35 | config SPARSE_IRQ | 54 | config SPARSE_IRQ |
| 36 | bool "Support sparse irq numbering" | 55 | bool "Support sparse irq numbering" |
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 505798f86c36..394784c57060 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | /* | 17 | /* |
| 18 | * Autodetection depends on the fact that any interrupt that | 18 | * Autodetection depends on the fact that any interrupt that |
| 19 | * comes in on to an unassigned handler will get stuck with | 19 | * comes in on to an unassigned handler will get stuck with |
| 20 | * "IRQ_WAITING" cleared and the interrupt disabled. | 20 | * "IRQS_WAITING" cleared and the interrupt disabled. |
| 21 | */ | 21 | */ |
| 22 | static DEFINE_MUTEX(probing_active); | 22 | static DEFINE_MUTEX(probing_active); |
| 23 | 23 | ||
| @@ -32,7 +32,6 @@ unsigned long probe_irq_on(void) | |||
| 32 | { | 32 | { |
| 33 | struct irq_desc *desc; | 33 | struct irq_desc *desc; |
| 34 | unsigned long mask = 0; | 34 | unsigned long mask = 0; |
| 35 | unsigned int status; | ||
| 36 | int i; | 35 | int i; |
| 37 | 36 | ||
| 38 | /* | 37 | /* |
| @@ -46,13 +45,7 @@ unsigned long probe_irq_on(void) | |||
| 46 | */ | 45 | */ |
| 47 | for_each_irq_desc_reverse(i, desc) { | 46 | for_each_irq_desc_reverse(i, desc) { |
| 48 | raw_spin_lock_irq(&desc->lock); | 47 | raw_spin_lock_irq(&desc->lock); |
| 49 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 48 | if (!desc->action && irq_settings_can_probe(desc)) { |
| 50 | /* | ||
| 51 | * An old-style architecture might still have | ||
| 52 | * the handle_bad_irq handler there: | ||
| 53 | */ | ||
| 54 | compat_irq_chip_set_default_handler(desc); | ||
| 55 | |||
| 56 | /* | 49 | /* |
| 57 | * Some chips need to know about probing in | 50 | * Some chips need to know about probing in |
| 58 | * progress: | 51 | * progress: |
| @@ -60,7 +53,7 @@ unsigned long probe_irq_on(void) | |||
| 60 | if (desc->irq_data.chip->irq_set_type) | 53 | if (desc->irq_data.chip->irq_set_type) |
| 61 | desc->irq_data.chip->irq_set_type(&desc->irq_data, | 54 | desc->irq_data.chip->irq_set_type(&desc->irq_data, |
| 62 | IRQ_TYPE_PROBE); | 55 | IRQ_TYPE_PROBE); |
| 63 | desc->irq_data.chip->irq_startup(&desc->irq_data); | 56 | irq_startup(desc); |
| 64 | } | 57 | } |
| 65 | raw_spin_unlock_irq(&desc->lock); | 58 | raw_spin_unlock_irq(&desc->lock); |
| 66 | } | 59 | } |
| @@ -75,10 +68,12 @@ unsigned long probe_irq_on(void) | |||
| 75 | */ | 68 | */ |
| 76 | for_each_irq_desc_reverse(i, desc) { | 69 | for_each_irq_desc_reverse(i, desc) { |
| 77 | raw_spin_lock_irq(&desc->lock); | 70 | raw_spin_lock_irq(&desc->lock); |
| 78 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 71 | if (!desc->action && irq_settings_can_probe(desc)) { |
| 79 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; | 72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; |
| 80 | if (desc->irq_data.chip->irq_startup(&desc->irq_data)) | 73 | if (irq_startup(desc)) { |
| 81 | desc->status |= IRQ_PENDING; | 74 | irq_compat_set_pending(desc); |
| 75 | desc->istate |= IRQS_PENDING; | ||
| 76 | } | ||
| 82 | } | 77 | } |
| 83 | raw_spin_unlock_irq(&desc->lock); | 78 | raw_spin_unlock_irq(&desc->lock); |
| 84 | } | 79 | } |
| @@ -93,13 +88,12 @@ unsigned long probe_irq_on(void) | |||
| 93 | */ | 88 | */ |
| 94 | for_each_irq_desc(i, desc) { | 89 | for_each_irq_desc(i, desc) { |
| 95 | raw_spin_lock_irq(&desc->lock); | 90 | raw_spin_lock_irq(&desc->lock); |
| 96 | status = desc->status; | ||
| 97 | 91 | ||
| 98 | if (status & IRQ_AUTODETECT) { | 92 | if (desc->istate & IRQS_AUTODETECT) { |
| 99 | /* It triggered already - consider it spurious. */ | 93 | /* It triggered already - consider it spurious. */ |
| 100 | if (!(status & IRQ_WAITING)) { | 94 | if (!(desc->istate & IRQS_WAITING)) { |
| 101 | desc->status = status & ~IRQ_AUTODETECT; | 95 | desc->istate &= ~IRQS_AUTODETECT; |
| 102 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 96 | irq_shutdown(desc); |
| 103 | } else | 97 | } else |
| 104 | if (i < 32) | 98 | if (i < 32) |
| 105 | mask |= 1 << i; | 99 | mask |= 1 << i; |
| @@ -125,20 +119,18 @@ EXPORT_SYMBOL(probe_irq_on); | |||
| 125 | */ | 119 | */ |
| 126 | unsigned int probe_irq_mask(unsigned long val) | 120 | unsigned int probe_irq_mask(unsigned long val) |
| 127 | { | 121 | { |
| 128 | unsigned int status, mask = 0; | 122 | unsigned int mask = 0; |
| 129 | struct irq_desc *desc; | 123 | struct irq_desc *desc; |
| 130 | int i; | 124 | int i; |
| 131 | 125 | ||
| 132 | for_each_irq_desc(i, desc) { | 126 | for_each_irq_desc(i, desc) { |
| 133 | raw_spin_lock_irq(&desc->lock); | 127 | raw_spin_lock_irq(&desc->lock); |
| 134 | status = desc->status; | 128 | if (desc->istate & IRQS_AUTODETECT) { |
| 135 | 129 | if (i < 16 && !(desc->istate & IRQS_WAITING)) | |
| 136 | if (status & IRQ_AUTODETECT) { | ||
| 137 | if (i < 16 && !(status & IRQ_WAITING)) | ||
| 138 | mask |= 1 << i; | 130 | mask |= 1 << i; |
| 139 | 131 | ||
| 140 | desc->status = status & ~IRQ_AUTODETECT; | 132 | desc->istate &= ~IRQS_AUTODETECT; |
| 141 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 133 | irq_shutdown(desc); |
| 142 | } | 134 | } |
| 143 | raw_spin_unlock_irq(&desc->lock); | 135 | raw_spin_unlock_irq(&desc->lock); |
| 144 | } | 136 | } |
| @@ -169,20 +161,18 @@ int probe_irq_off(unsigned long val) | |||
| 169 | { | 161 | { |
| 170 | int i, irq_found = 0, nr_of_irqs = 0; | 162 | int i, irq_found = 0, nr_of_irqs = 0; |
| 171 | struct irq_desc *desc; | 163 | struct irq_desc *desc; |
| 172 | unsigned int status; | ||
| 173 | 164 | ||
| 174 | for_each_irq_desc(i, desc) { | 165 | for_each_irq_desc(i, desc) { |
| 175 | raw_spin_lock_irq(&desc->lock); | 166 | raw_spin_lock_irq(&desc->lock); |
| 176 | status = desc->status; | ||
| 177 | 167 | ||
| 178 | if (status & IRQ_AUTODETECT) { | 168 | if (desc->istate & IRQS_AUTODETECT) { |
| 179 | if (!(status & IRQ_WAITING)) { | 169 | if (!(desc->istate & IRQS_WAITING)) { |
| 180 | if (!nr_of_irqs) | 170 | if (!nr_of_irqs) |
| 181 | irq_found = i; | 171 | irq_found = i; |
| 182 | nr_of_irqs++; | 172 | nr_of_irqs++; |
| 183 | } | 173 | } |
| 184 | desc->status = status & ~IRQ_AUTODETECT; | 174 | desc->istate &= ~IRQS_AUTODETECT; |
| 185 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 175 | irq_shutdown(desc); |
| 186 | } | 176 | } |
| 187 | raw_spin_unlock_irq(&desc->lock); | 177 | raw_spin_unlock_irq(&desc->lock); |
| 188 | } | 178 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index baa5c4acad83..c9c0601f0615 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -19,140 +19,110 @@ | |||
| 19 | #include "internals.h" | 19 | #include "internals.h" |
| 20 | 20 | ||
| 21 | /** | 21 | /** |
| 22 | * set_irq_chip - set the irq chip for an irq | 22 | * irq_set_chip - set the irq chip for an irq |
| 23 | * @irq: irq number | 23 | * @irq: irq number |
| 24 | * @chip: pointer to irq chip description structure | 24 | * @chip: pointer to irq chip description structure |
| 25 | */ | 25 | */ |
| 26 | int set_irq_chip(unsigned int irq, struct irq_chip *chip) | 26 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
| 27 | { | 27 | { |
| 28 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 29 | unsigned long flags; | 28 | unsigned long flags; |
| 29 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
| 30 | 30 | ||
| 31 | if (!desc) { | 31 | if (!desc) |
| 32 | WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); | ||
| 33 | return -EINVAL; | 32 | return -EINVAL; |
| 34 | } | ||
| 35 | 33 | ||
| 36 | if (!chip) | 34 | if (!chip) |
| 37 | chip = &no_irq_chip; | 35 | chip = &no_irq_chip; |
| 38 | 36 | ||
| 39 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 40 | irq_chip_set_defaults(chip); | 37 | irq_chip_set_defaults(chip); |
| 41 | desc->irq_data.chip = chip; | 38 | desc->irq_data.chip = chip; |
| 42 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 39 | irq_put_desc_unlock(desc, flags); |
| 43 | |||
| 44 | return 0; | 40 | return 0; |
| 45 | } | 41 | } |
| 46 | EXPORT_SYMBOL(set_irq_chip); | 42 | EXPORT_SYMBOL(irq_set_chip); |
| 47 | 43 | ||
| 48 | /** | 44 | /** |
| 49 | * set_irq_type - set the irq trigger type for an irq | 45 | * irq_set_type - set the irq trigger type for an irq |
| 50 | * @irq: irq number | 46 | * @irq: irq number |
| 51 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h | 47 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
| 52 | */ | 48 | */ |
| 53 | int set_irq_type(unsigned int irq, unsigned int type) | 49 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
| 54 | { | 50 | { |
| 55 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 56 | unsigned long flags; | 51 | unsigned long flags; |
| 57 | int ret = -ENXIO; | 52 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); |
| 53 | int ret = 0; | ||
| 58 | 54 | ||
| 59 | if (!desc) { | 55 | if (!desc) |
| 60 | printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); | 56 | return -EINVAL; |
| 61 | return -ENODEV; | ||
| 62 | } | ||
| 63 | 57 | ||
| 64 | type &= IRQ_TYPE_SENSE_MASK; | 58 | type &= IRQ_TYPE_SENSE_MASK; |
| 65 | if (type == IRQ_TYPE_NONE) | 59 | if (type != IRQ_TYPE_NONE) |
| 66 | return 0; | 60 | ret = __irq_set_trigger(desc, irq, type); |
| 67 | 61 | irq_put_desc_busunlock(desc, flags); | |
| 68 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 69 | ret = __irq_set_trigger(desc, irq, type); | ||
| 70 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 71 | return ret; | 62 | return ret; |
| 72 | } | 63 | } |
| 73 | EXPORT_SYMBOL(set_irq_type); | 64 | EXPORT_SYMBOL(irq_set_irq_type); |
| 74 | 65 | ||
| 75 | /** | 66 | /** |
| 76 | * set_irq_data - set irq type data for an irq | 67 | * irq_set_handler_data - set irq handler data for an irq |
| 77 | * @irq: Interrupt number | 68 | * @irq: Interrupt number |
| 78 | * @data: Pointer to interrupt specific data | 69 | * @data: Pointer to interrupt specific data |
| 79 | * | 70 | * |
| 80 | * Set the hardware irq controller data for an irq | 71 | * Set the hardware irq controller data for an irq |
| 81 | */ | 72 | */ |
| 82 | int set_irq_data(unsigned int irq, void *data) | 73 | int irq_set_handler_data(unsigned int irq, void *data) |
| 83 | { | 74 | { |
| 84 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 85 | unsigned long flags; | 75 | unsigned long flags; |
| 76 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
| 86 | 77 | ||
| 87 | if (!desc) { | 78 | if (!desc) |
| 88 | printk(KERN_ERR | ||
| 89 | "Trying to install controller data for IRQ%d\n", irq); | ||
| 90 | return -EINVAL; | 79 | return -EINVAL; |
| 91 | } | ||
| 92 | |||
| 93 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 94 | desc->irq_data.handler_data = data; | 80 | desc->irq_data.handler_data = data; |
| 95 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 81 | irq_put_desc_unlock(desc, flags); |
| 96 | return 0; | 82 | return 0; |
| 97 | } | 83 | } |
| 98 | EXPORT_SYMBOL(set_irq_data); | 84 | EXPORT_SYMBOL(irq_set_handler_data); |
| 99 | 85 | ||
| 100 | /** | 86 | /** |
| 101 | * set_irq_msi - set MSI descriptor data for an irq | 87 | * irq_set_msi_desc - set MSI descriptor data for an irq |
| 102 | * @irq: Interrupt number | 88 | * @irq: Interrupt number |
| 103 | * @entry: Pointer to MSI descriptor data | 89 | * @entry: Pointer to MSI descriptor data |
| 104 | * | 90 | * |
| 105 | * Set the MSI descriptor entry for an irq | 91 | * Set the MSI descriptor entry for an irq |
| 106 | */ | 92 | */ |
| 107 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) | 93 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
| 108 | { | 94 | { |
| 109 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 110 | unsigned long flags; | 95 | unsigned long flags; |
| 96 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
| 111 | 97 | ||
| 112 | if (!desc) { | 98 | if (!desc) |
| 113 | printk(KERN_ERR | ||
| 114 | "Trying to install msi data for IRQ%d\n", irq); | ||
| 115 | return -EINVAL; | 99 | return -EINVAL; |
| 116 | } | ||
| 117 | |||
| 118 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 119 | desc->irq_data.msi_desc = entry; | 100 | desc->irq_data.msi_desc = entry; |
| 120 | if (entry) | 101 | if (entry) |
| 121 | entry->irq = irq; | 102 | entry->irq = irq; |
| 122 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 103 | irq_put_desc_unlock(desc, flags); |
| 123 | return 0; | 104 | return 0; |
| 124 | } | 105 | } |
| 125 | 106 | ||
| 126 | /** | 107 | /** |
| 127 | * set_irq_chip_data - set irq chip data for an irq | 108 | * irq_set_chip_data - set irq chip data for an irq |
| 128 | * @irq: Interrupt number | 109 | * @irq: Interrupt number |
| 129 | * @data: Pointer to chip specific data | 110 | * @data: Pointer to chip specific data |
| 130 | * | 111 | * |
| 131 | * Set the hardware irq chip data for an irq | 112 | * Set the hardware irq chip data for an irq |
| 132 | */ | 113 | */ |
| 133 | int set_irq_chip_data(unsigned int irq, void *data) | 114 | int irq_set_chip_data(unsigned int irq, void *data) |
| 134 | { | 115 | { |
| 135 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 136 | unsigned long flags; | 116 | unsigned long flags; |
| 117 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
| 137 | 118 | ||
| 138 | if (!desc) { | 119 | if (!desc) |
| 139 | printk(KERN_ERR | ||
| 140 | "Trying to install chip data for IRQ%d\n", irq); | ||
| 141 | return -EINVAL; | ||
| 142 | } | ||
| 143 | |||
| 144 | if (!desc->irq_data.chip) { | ||
| 145 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); | ||
| 146 | return -EINVAL; | 120 | return -EINVAL; |
| 147 | } | ||
| 148 | |||
| 149 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 150 | desc->irq_data.chip_data = data; | 121 | desc->irq_data.chip_data = data; |
| 151 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 122 | irq_put_desc_unlock(desc, flags); |
| 152 | |||
| 153 | return 0; | 123 | return 0; |
| 154 | } | 124 | } |
| 155 | EXPORT_SYMBOL(set_irq_chip_data); | 125 | EXPORT_SYMBOL(irq_set_chip_data); |
| 156 | 126 | ||
| 157 | struct irq_data *irq_get_irq_data(unsigned int irq) | 127 | struct irq_data *irq_get_irq_data(unsigned int irq) |
| 158 | { | 128 | { |
| @@ -162,72 +132,75 @@ struct irq_data *irq_get_irq_data(unsigned int irq) | |||
| 162 | } | 132 | } |
| 163 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | 133 | EXPORT_SYMBOL_GPL(irq_get_irq_data); |
| 164 | 134 | ||
| 165 | /** | 135 | static void irq_state_clr_disabled(struct irq_desc *desc) |
| 166 | * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq | ||
| 167 | * | ||
| 168 | * @irq: Interrupt number | ||
| 169 | * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag | ||
| 170 | * | ||
| 171 | * The IRQ_NESTED_THREAD flag indicates that on | ||
| 172 | * request_threaded_irq() no separate interrupt thread should be | ||
| 173 | * created for the irq as the handler are called nested in the | ||
| 174 | * context of a demultiplexing interrupt handler thread. | ||
| 175 | */ | ||
| 176 | void set_irq_nested_thread(unsigned int irq, int nest) | ||
| 177 | { | 136 | { |
| 178 | struct irq_desc *desc = irq_to_desc(irq); | 137 | desc->istate &= ~IRQS_DISABLED; |
| 179 | unsigned long flags; | 138 | irq_compat_clr_disabled(desc); |
| 180 | |||
| 181 | if (!desc) | ||
| 182 | return; | ||
| 183 | |||
| 184 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 185 | if (nest) | ||
| 186 | desc->status |= IRQ_NESTED_THREAD; | ||
| 187 | else | ||
| 188 | desc->status &= ~IRQ_NESTED_THREAD; | ||
| 189 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 190 | } | 139 | } |
| 191 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); | ||
| 192 | 140 | ||
| 193 | /* | 141 | static void irq_state_set_disabled(struct irq_desc *desc) |
| 194 | * default enable function | ||
| 195 | */ | ||
| 196 | static void default_enable(struct irq_data *data) | ||
| 197 | { | 142 | { |
| 198 | struct irq_desc *desc = irq_data_to_desc(data); | 143 | desc->istate |= IRQS_DISABLED; |
| 144 | irq_compat_set_disabled(desc); | ||
| 145 | } | ||
| 199 | 146 | ||
| 200 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 147 | static void irq_state_clr_masked(struct irq_desc *desc) |
| 201 | desc->status &= ~IRQ_MASKED; | 148 | { |
| 149 | desc->istate &= ~IRQS_MASKED; | ||
| 150 | irq_compat_clr_masked(desc); | ||
| 202 | } | 151 | } |
| 203 | 152 | ||
| 204 | /* | 153 | static void irq_state_set_masked(struct irq_desc *desc) |
| 205 | * default disable function | ||
| 206 | */ | ||
| 207 | static void default_disable(struct irq_data *data) | ||
| 208 | { | 154 | { |
| 155 | desc->istate |= IRQS_MASKED; | ||
| 156 | irq_compat_set_masked(desc); | ||
| 209 | } | 157 | } |
| 210 | 158 | ||
| 211 | /* | 159 | int irq_startup(struct irq_desc *desc) |
| 212 | * default startup function | ||
| 213 | */ | ||
| 214 | static unsigned int default_startup(struct irq_data *data) | ||
| 215 | { | 160 | { |
| 216 | struct irq_desc *desc = irq_data_to_desc(data); | 161 | irq_state_clr_disabled(desc); |
| 162 | desc->depth = 0; | ||
| 163 | |||
| 164 | if (desc->irq_data.chip->irq_startup) { | ||
| 165 | int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
| 166 | irq_state_clr_masked(desc); | ||
| 167 | return ret; | ||
| 168 | } | ||
| 217 | 169 | ||
| 218 | desc->irq_data.chip->irq_enable(data); | 170 | irq_enable(desc); |
| 219 | return 0; | 171 | return 0; |
| 220 | } | 172 | } |
| 221 | 173 | ||
| 222 | /* | 174 | void irq_shutdown(struct irq_desc *desc) |
| 223 | * default shutdown function | ||
| 224 | */ | ||
| 225 | static void default_shutdown(struct irq_data *data) | ||
| 226 | { | 175 | { |
| 227 | struct irq_desc *desc = irq_data_to_desc(data); | 176 | irq_state_set_disabled(desc); |
| 177 | desc->depth = 1; | ||
| 178 | if (desc->irq_data.chip->irq_shutdown) | ||
| 179 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | ||
| 180 | if (desc->irq_data.chip->irq_disable) | ||
| 181 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
| 182 | else | ||
| 183 | desc->irq_data.chip->irq_mask(&desc->irq_data); | ||
| 184 | irq_state_set_masked(desc); | ||
| 185 | } | ||
| 228 | 186 | ||
| 229 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 187 | void irq_enable(struct irq_desc *desc) |
| 230 | desc->status |= IRQ_MASKED; | 188 | { |
| 189 | irq_state_clr_disabled(desc); | ||
| 190 | if (desc->irq_data.chip->irq_enable) | ||
| 191 | desc->irq_data.chip->irq_enable(&desc->irq_data); | ||
| 192 | else | ||
| 193 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | ||
| 194 | irq_state_clr_masked(desc); | ||
| 195 | } | ||
| 196 | |||
| 197 | void irq_disable(struct irq_desc *desc) | ||
| 198 | { | ||
| 199 | irq_state_set_disabled(desc); | ||
| 200 | if (desc->irq_data.chip->irq_disable) { | ||
| 201 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
| 202 | irq_state_set_masked(desc); | ||
| 203 | } | ||
| 231 | } | 204 | } |
| 232 | 205 | ||
| 233 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | 206 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
| @@ -315,10 +288,6 @@ static void compat_bus_sync_unlock(struct irq_data *data) | |||
| 315 | void irq_chip_set_defaults(struct irq_chip *chip) | 288 | void irq_chip_set_defaults(struct irq_chip *chip) |
| 316 | { | 289 | { |
| 317 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | 290 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
| 318 | /* | ||
| 319 | * Compat fixup functions need to be before we set the | ||
| 320 | * defaults for enable/disable/startup/shutdown | ||
| 321 | */ | ||
| 322 | if (chip->enable) | 291 | if (chip->enable) |
| 323 | chip->irq_enable = compat_irq_enable; | 292 | chip->irq_enable = compat_irq_enable; |
| 324 | if (chip->disable) | 293 | if (chip->disable) |
| @@ -327,33 +296,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) | |||
| 327 | chip->irq_shutdown = compat_irq_shutdown; | 296 | chip->irq_shutdown = compat_irq_shutdown; |
| 328 | if (chip->startup) | 297 | if (chip->startup) |
| 329 | chip->irq_startup = compat_irq_startup; | 298 | chip->irq_startup = compat_irq_startup; |
| 330 | #endif | ||
| 331 | /* | ||
| 332 | * The real defaults | ||
| 333 | */ | ||
| 334 | if (!chip->irq_enable) | ||
| 335 | chip->irq_enable = default_enable; | ||
| 336 | if (!chip->irq_disable) | ||
| 337 | chip->irq_disable = default_disable; | ||
| 338 | if (!chip->irq_startup) | ||
| 339 | chip->irq_startup = default_startup; | ||
| 340 | /* | ||
| 341 | * We use chip->irq_disable, when the user provided its own. When | ||
| 342 | * we have default_disable set for chip->irq_disable, then we need | ||
| 343 | * to use default_shutdown, otherwise the irq line is not | ||
| 344 | * disabled on free_irq(): | ||
| 345 | */ | ||
| 346 | if (!chip->irq_shutdown) | ||
| 347 | chip->irq_shutdown = chip->irq_disable != default_disable ? | ||
| 348 | chip->irq_disable : default_shutdown; | ||
| 349 | |||
| 350 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 351 | if (!chip->end) | 299 | if (!chip->end) |
| 352 | chip->end = dummy_irq_chip.end; | 300 | chip->end = dummy_irq_chip.end; |
| 353 | |||
| 354 | /* | ||
| 355 | * Now fix up the remaining compat handlers | ||
| 356 | */ | ||
| 357 | if (chip->bus_lock) | 301 | if (chip->bus_lock) |
| 358 | chip->irq_bus_lock = compat_bus_lock; | 302 | chip->irq_bus_lock = compat_bus_lock; |
| 359 | if (chip->bus_sync_unlock) | 303 | if (chip->bus_sync_unlock) |
| @@ -388,22 +332,22 @@ static inline void mask_ack_irq(struct irq_desc *desc) | |||
| 388 | if (desc->irq_data.chip->irq_ack) | 332 | if (desc->irq_data.chip->irq_ack) |
| 389 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 333 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
| 390 | } | 334 | } |
| 391 | desc->status |= IRQ_MASKED; | 335 | irq_state_set_masked(desc); |
| 392 | } | 336 | } |
| 393 | 337 | ||
| 394 | static inline void mask_irq(struct irq_desc *desc) | 338 | void mask_irq(struct irq_desc *desc) |
| 395 | { | 339 | { |
| 396 | if (desc->irq_data.chip->irq_mask) { | 340 | if (desc->irq_data.chip->irq_mask) { |
| 397 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 341 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
| 398 | desc->status |= IRQ_MASKED; | 342 | irq_state_set_masked(desc); |
| 399 | } | 343 | } |
| 400 | } | 344 | } |
| 401 | 345 | ||
| 402 | static inline void unmask_irq(struct irq_desc *desc) | 346 | void unmask_irq(struct irq_desc *desc) |
| 403 | { | 347 | { |
| 404 | if (desc->irq_data.chip->irq_unmask) { | 348 | if (desc->irq_data.chip->irq_unmask) { |
| 405 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 349 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
| 406 | desc->status &= ~IRQ_MASKED; | 350 | irq_state_clr_masked(desc); |
| 407 | } | 351 | } |
| 408 | } | 352 | } |
| 409 | 353 | ||
| @@ -428,10 +372,11 @@ void handle_nested_irq(unsigned int irq) | |||
| 428 | kstat_incr_irqs_this_cpu(irq, desc); | 372 | kstat_incr_irqs_this_cpu(irq, desc); |
| 429 | 373 | ||
| 430 | action = desc->action; | 374 | action = desc->action; |
| 431 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | 375 | if (unlikely(!action || (desc->istate & IRQS_DISABLED))) |
| 432 | goto out_unlock; | 376 | goto out_unlock; |
| 433 | 377 | ||
| 434 | desc->status |= IRQ_INPROGRESS; | 378 | irq_compat_set_progress(desc); |
| 379 | desc->istate |= IRQS_INPROGRESS; | ||
| 435 | raw_spin_unlock_irq(&desc->lock); | 380 | raw_spin_unlock_irq(&desc->lock); |
| 436 | 381 | ||
| 437 | action_ret = action->thread_fn(action->irq, action->dev_id); | 382 | action_ret = action->thread_fn(action->irq, action->dev_id); |
| @@ -439,13 +384,21 @@ void handle_nested_irq(unsigned int irq) | |||
| 439 | note_interrupt(irq, desc, action_ret); | 384 | note_interrupt(irq, desc, action_ret); |
| 440 | 385 | ||
| 441 | raw_spin_lock_irq(&desc->lock); | 386 | raw_spin_lock_irq(&desc->lock); |
| 442 | desc->status &= ~IRQ_INPROGRESS; | 387 | desc->istate &= ~IRQS_INPROGRESS; |
| 388 | irq_compat_clr_progress(desc); | ||
| 443 | 389 | ||
| 444 | out_unlock: | 390 | out_unlock: |
| 445 | raw_spin_unlock_irq(&desc->lock); | 391 | raw_spin_unlock_irq(&desc->lock); |
| 446 | } | 392 | } |
| 447 | EXPORT_SYMBOL_GPL(handle_nested_irq); | 393 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
| 448 | 394 | ||
| 395 | static bool irq_check_poll(struct irq_desc *desc) | ||
| 396 | { | ||
| 397 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) | ||
| 398 | return false; | ||
| 399 | return irq_wait_for_poll(desc); | ||
| 400 | } | ||
| 401 | |||
| 449 | /** | 402 | /** |
| 450 | * handle_simple_irq - Simple and software-decoded IRQs. | 403 | * handle_simple_irq - Simple and software-decoded IRQs. |
| 451 | * @irq: the interrupt number | 404 | * @irq: the interrupt number |
| @@ -461,29 +414,20 @@ EXPORT_SYMBOL_GPL(handle_nested_irq); | |||
| 461 | void | 414 | void |
| 462 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) | 415 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
| 463 | { | 416 | { |
| 464 | struct irqaction *action; | ||
| 465 | irqreturn_t action_ret; | ||
| 466 | |||
| 467 | raw_spin_lock(&desc->lock); | 417 | raw_spin_lock(&desc->lock); |
| 468 | 418 | ||
| 469 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 419 | if (unlikely(desc->istate & IRQS_INPROGRESS)) |
| 470 | goto out_unlock; | 420 | if (!irq_check_poll(desc)) |
| 471 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 421 | goto out_unlock; |
| 422 | |||
| 423 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
| 472 | kstat_incr_irqs_this_cpu(irq, desc); | 424 | kstat_incr_irqs_this_cpu(irq, desc); |
| 473 | 425 | ||
| 474 | action = desc->action; | 426 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) |
| 475 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | ||
| 476 | goto out_unlock; | 427 | goto out_unlock; |
| 477 | 428 | ||
| 478 | desc->status |= IRQ_INPROGRESS; | 429 | handle_irq_event(desc); |
| 479 | raw_spin_unlock(&desc->lock); | ||
| 480 | 430 | ||
| 481 | action_ret = handle_IRQ_event(irq, action); | ||
| 482 | if (!noirqdebug) | ||
| 483 | note_interrupt(irq, desc, action_ret); | ||
| 484 | |||
| 485 | raw_spin_lock(&desc->lock); | ||
| 486 | desc->status &= ~IRQ_INPROGRESS; | ||
| 487 | out_unlock: | 431 | out_unlock: |
| 488 | raw_spin_unlock(&desc->lock); | 432 | raw_spin_unlock(&desc->lock); |
| 489 | } | 433 | } |
| @@ -501,42 +445,42 @@ out_unlock: | |||
| 501 | void | 445 | void |
| 502 | handle_level_irq(unsigned int irq, struct irq_desc *desc) | 446 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
| 503 | { | 447 | { |
| 504 | struct irqaction *action; | ||
| 505 | irqreturn_t action_ret; | ||
| 506 | |||
| 507 | raw_spin_lock(&desc->lock); | 448 | raw_spin_lock(&desc->lock); |
| 508 | mask_ack_irq(desc); | 449 | mask_ack_irq(desc); |
| 509 | 450 | ||
| 510 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 451 | if (unlikely(desc->istate & IRQS_INPROGRESS)) |
| 511 | goto out_unlock; | 452 | if (!irq_check_poll(desc)) |
| 512 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 453 | goto out_unlock; |
| 454 | |||
| 455 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
| 513 | kstat_incr_irqs_this_cpu(irq, desc); | 456 | kstat_incr_irqs_this_cpu(irq, desc); |
| 514 | 457 | ||
| 515 | /* | 458 | /* |
| 516 | * If its disabled or no action available | 459 | * If its disabled or no action available |
| 517 | * keep it masked and get out of here | 460 | * keep it masked and get out of here |
| 518 | */ | 461 | */ |
| 519 | action = desc->action; | 462 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) |
| 520 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | ||
| 521 | goto out_unlock; | 463 | goto out_unlock; |
| 522 | 464 | ||
| 523 | desc->status |= IRQ_INPROGRESS; | 465 | handle_irq_event(desc); |
| 524 | raw_spin_unlock(&desc->lock); | ||
| 525 | |||
| 526 | action_ret = handle_IRQ_event(irq, action); | ||
| 527 | if (!noirqdebug) | ||
| 528 | note_interrupt(irq, desc, action_ret); | ||
| 529 | 466 | ||
| 530 | raw_spin_lock(&desc->lock); | 467 | if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) |
| 531 | desc->status &= ~IRQ_INPROGRESS; | ||
| 532 | |||
| 533 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) | ||
| 534 | unmask_irq(desc); | 468 | unmask_irq(desc); |
| 535 | out_unlock: | 469 | out_unlock: |
| 536 | raw_spin_unlock(&desc->lock); | 470 | raw_spin_unlock(&desc->lock); |
| 537 | } | 471 | } |
| 538 | EXPORT_SYMBOL_GPL(handle_level_irq); | 472 | EXPORT_SYMBOL_GPL(handle_level_irq); |
| 539 | 473 | ||
| 474 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
| 475 | static inline void preflow_handler(struct irq_desc *desc) | ||
| 476 | { | ||
| 477 | if (desc->preflow_handler) | ||
| 478 | desc->preflow_handler(&desc->irq_data); | ||
| 479 | } | ||
| 480 | #else | ||
| 481 | static inline void preflow_handler(struct irq_desc *desc) { } | ||
| 482 | #endif | ||
| 483 | |||
| 540 | /** | 484 | /** |
| 541 | * handle_fasteoi_irq - irq handler for transparent controllers | 485 | * handle_fasteoi_irq - irq handler for transparent controllers |
| 542 | * @irq: the interrupt number | 486 | * @irq: the interrupt number |
| @@ -550,42 +494,41 @@ EXPORT_SYMBOL_GPL(handle_level_irq); | |||
| 550 | void | 494 | void |
| 551 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | 495 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
| 552 | { | 496 | { |
| 553 | struct irqaction *action; | ||
| 554 | irqreturn_t action_ret; | ||
| 555 | |||
| 556 | raw_spin_lock(&desc->lock); | 497 | raw_spin_lock(&desc->lock); |
| 557 | 498 | ||
| 558 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 499 | if (unlikely(desc->istate & IRQS_INPROGRESS)) |
| 559 | goto out; | 500 | if (!irq_check_poll(desc)) |
| 501 | goto out; | ||
| 560 | 502 | ||
| 561 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 503 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 562 | kstat_incr_irqs_this_cpu(irq, desc); | 504 | kstat_incr_irqs_this_cpu(irq, desc); |
| 563 | 505 | ||
| 564 | /* | 506 | /* |
| 565 | * If its disabled or no action available | 507 | * If its disabled or no action available |
| 566 | * then mask it and get out of here: | 508 | * then mask it and get out of here: |
| 567 | */ | 509 | */ |
| 568 | action = desc->action; | 510 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { |
| 569 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 511 | irq_compat_set_pending(desc); |
| 570 | desc->status |= IRQ_PENDING; | 512 | desc->istate |= IRQS_PENDING; |
| 571 | mask_irq(desc); | 513 | mask_irq(desc); |
| 572 | goto out; | 514 | goto out; |
| 573 | } | 515 | } |
| 574 | 516 | ||
| 575 | desc->status |= IRQ_INPROGRESS; | 517 | if (desc->istate & IRQS_ONESHOT) |
| 576 | desc->status &= ~IRQ_PENDING; | 518 | mask_irq(desc); |
| 577 | raw_spin_unlock(&desc->lock); | ||
| 578 | 519 | ||
| 579 | action_ret = handle_IRQ_event(irq, action); | 520 | preflow_handler(desc); |
| 580 | if (!noirqdebug) | 521 | handle_irq_event(desc); |
| 581 | note_interrupt(irq, desc, action_ret); | ||
| 582 | 522 | ||
| 583 | raw_spin_lock(&desc->lock); | 523 | out_eoi: |
| 584 | desc->status &= ~IRQ_INPROGRESS; | ||
| 585 | out: | ||
| 586 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | 524 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
| 587 | 525 | out_unlock: | |
| 588 | raw_spin_unlock(&desc->lock); | 526 | raw_spin_unlock(&desc->lock); |
| 527 | return; | ||
| 528 | out: | ||
| 529 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | ||
| 530 | goto out_eoi; | ||
| 531 | goto out_unlock; | ||
| 589 | } | 532 | } |
| 590 | 533 | ||
| 591 | /** | 534 | /** |
| @@ -609,32 +552,28 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 609 | { | 552 | { |
| 610 | raw_spin_lock(&desc->lock); | 553 | raw_spin_lock(&desc->lock); |
| 611 | 554 | ||
| 612 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 555 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 613 | |||
| 614 | /* | 556 | /* |
| 615 | * If we're currently running this IRQ, or its disabled, | 557 | * If we're currently running this IRQ, or its disabled, |
| 616 | * we shouldn't process the IRQ. Mark it pending, handle | 558 | * we shouldn't process the IRQ. Mark it pending, handle |
| 617 | * the necessary masking and go out | 559 | * the necessary masking and go out |
| 618 | */ | 560 | */ |
| 619 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || | 561 | if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || |
| 620 | !desc->action)) { | 562 | !desc->action))) { |
| 621 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 563 | if (!irq_check_poll(desc)) { |
| 622 | mask_ack_irq(desc); | 564 | irq_compat_set_pending(desc); |
| 623 | goto out_unlock; | 565 | desc->istate |= IRQS_PENDING; |
| 566 | mask_ack_irq(desc); | ||
| 567 | goto out_unlock; | ||
| 568 | } | ||
| 624 | } | 569 | } |
| 625 | kstat_incr_irqs_this_cpu(irq, desc); | 570 | kstat_incr_irqs_this_cpu(irq, desc); |
| 626 | 571 | ||
| 627 | /* Start handling the irq */ | 572 | /* Start handling the irq */ |
| 628 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 573 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
| 629 | 574 | ||
| 630 | /* Mark the IRQ currently in progress.*/ | ||
| 631 | desc->status |= IRQ_INPROGRESS; | ||
| 632 | |||
| 633 | do { | 575 | do { |
| 634 | struct irqaction *action = desc->action; | 576 | if (unlikely(!desc->action)) { |
| 635 | irqreturn_t action_ret; | ||
| 636 | |||
| 637 | if (unlikely(!action)) { | ||
| 638 | mask_irq(desc); | 577 | mask_irq(desc); |
| 639 | goto out_unlock; | 578 | goto out_unlock; |
| 640 | } | 579 | } |
| @@ -644,22 +583,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 644 | * one, we could have masked the irq. | 583 | * one, we could have masked the irq. |
| 645 | * Renable it, if it was not disabled in meantime. | 584 | * Renable it, if it was not disabled in meantime. |
| 646 | */ | 585 | */ |
| 647 | if (unlikely((desc->status & | 586 | if (unlikely(desc->istate & IRQS_PENDING)) { |
| 648 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == | 587 | if (!(desc->istate & IRQS_DISABLED) && |
| 649 | (IRQ_PENDING | IRQ_MASKED))) { | 588 | (desc->istate & IRQS_MASKED)) |
| 650 | unmask_irq(desc); | 589 | unmask_irq(desc); |
| 651 | } | 590 | } |
| 652 | 591 | ||
| 653 | desc->status &= ~IRQ_PENDING; | 592 | handle_irq_event(desc); |
| 654 | raw_spin_unlock(&desc->lock); | ||
| 655 | action_ret = handle_IRQ_event(irq, action); | ||
| 656 | if (!noirqdebug) | ||
| 657 | note_interrupt(irq, desc, action_ret); | ||
| 658 | raw_spin_lock(&desc->lock); | ||
| 659 | 593 | ||
| 660 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | 594 | } while ((desc->istate & IRQS_PENDING) && |
| 595 | !(desc->istate & IRQS_DISABLED)); | ||
| 661 | 596 | ||
| 662 | desc->status &= ~IRQ_INPROGRESS; | ||
| 663 | out_unlock: | 597 | out_unlock: |
| 664 | raw_spin_unlock(&desc->lock); | 598 | raw_spin_unlock(&desc->lock); |
| 665 | } | 599 | } |
| @@ -674,103 +608,84 @@ out_unlock: | |||
| 674 | void | 608 | void |
| 675 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | 609 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
| 676 | { | 610 | { |
| 677 | irqreturn_t action_ret; | 611 | struct irq_chip *chip = irq_desc_get_chip(desc); |
| 678 | 612 | ||
| 679 | kstat_incr_irqs_this_cpu(irq, desc); | 613 | kstat_incr_irqs_this_cpu(irq, desc); |
| 680 | 614 | ||
| 681 | if (desc->irq_data.chip->irq_ack) | 615 | if (chip->irq_ack) |
| 682 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 616 | chip->irq_ack(&desc->irq_data); |
| 683 | 617 | ||
| 684 | action_ret = handle_IRQ_event(irq, desc->action); | 618 | handle_irq_event_percpu(desc, desc->action); |
| 685 | if (!noirqdebug) | ||
| 686 | note_interrupt(irq, desc, action_ret); | ||
| 687 | 619 | ||
| 688 | if (desc->irq_data.chip->irq_eoi) | 620 | if (chip->irq_eoi) |
| 689 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | 621 | chip->irq_eoi(&desc->irq_data); |
| 690 | } | 622 | } |
| 691 | 623 | ||
| 692 | void | 624 | void |
| 693 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 625 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
| 694 | const char *name) | 626 | const char *name) |
| 695 | { | 627 | { |
| 696 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 697 | unsigned long flags; | 628 | unsigned long flags; |
| 629 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
| 698 | 630 | ||
| 699 | if (!desc) { | 631 | if (!desc) |
| 700 | printk(KERN_ERR | ||
| 701 | "Trying to install type control for IRQ%d\n", irq); | ||
| 702 | return; | 632 | return; |
| 703 | } | ||
| 704 | 633 | ||
| 705 | if (!handle) | 634 | if (!handle) { |
| 706 | handle = handle_bad_irq; | 635 | handle = handle_bad_irq; |
| 707 | else if (desc->irq_data.chip == &no_irq_chip) { | 636 | } else { |
| 708 | printk(KERN_WARNING "Trying to install %sinterrupt handler " | 637 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) |
| 709 | "for IRQ%d\n", is_chained ? "chained " : "", irq); | 638 | goto out; |
| 710 | /* | ||
| 711 | * Some ARM implementations install a handler for really dumb | ||
| 712 | * interrupt hardware without setting an irq_chip. This worked | ||
| 713 | * with the ARM no_irq_chip but the check in setup_irq would | ||
| 714 | * prevent us to setup the interrupt at all. Switch it to | ||
| 715 | * dummy_irq_chip for easy transition. | ||
| 716 | */ | ||
| 717 | desc->irq_data.chip = &dummy_irq_chip; | ||
| 718 | } | 639 | } |
| 719 | 640 | ||
| 720 | chip_bus_lock(desc); | ||
| 721 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 722 | |||
| 723 | /* Uninstall? */ | 641 | /* Uninstall? */ |
| 724 | if (handle == handle_bad_irq) { | 642 | if (handle == handle_bad_irq) { |
| 725 | if (desc->irq_data.chip != &no_irq_chip) | 643 | if (desc->irq_data.chip != &no_irq_chip) |
| 726 | mask_ack_irq(desc); | 644 | mask_ack_irq(desc); |
| 727 | desc->status |= IRQ_DISABLED; | 645 | irq_compat_set_disabled(desc); |
| 646 | desc->istate |= IRQS_DISABLED; | ||
| 728 | desc->depth = 1; | 647 | desc->depth = 1; |
| 729 | } | 648 | } |
| 730 | desc->handle_irq = handle; | 649 | desc->handle_irq = handle; |
| 731 | desc->name = name; | 650 | desc->name = name; |
| 732 | 651 | ||
| 733 | if (handle != handle_bad_irq && is_chained) { | 652 | if (handle != handle_bad_irq && is_chained) { |
| 734 | desc->status &= ~IRQ_DISABLED; | 653 | irq_settings_set_noprobe(desc); |
| 735 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 654 | irq_settings_set_norequest(desc); |
| 736 | desc->depth = 0; | 655 | irq_startup(desc); |
| 737 | desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
| 738 | } | 656 | } |
| 739 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 657 | out: |
| 740 | chip_bus_sync_unlock(desc); | 658 | irq_put_desc_busunlock(desc, flags); |
| 741 | } | ||
| 742 | EXPORT_SYMBOL_GPL(__set_irq_handler); | ||
| 743 | |||
| 744 | void | ||
| 745 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
| 746 | irq_flow_handler_t handle) | ||
| 747 | { | ||
| 748 | set_irq_chip(irq, chip); | ||
| 749 | __set_irq_handler(irq, handle, 0, NULL); | ||
| 750 | } | 659 | } |
| 660 | EXPORT_SYMBOL_GPL(__irq_set_handler); | ||
| 751 | 661 | ||
| 752 | void | 662 | void |
| 753 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | 663 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
| 754 | irq_flow_handler_t handle, const char *name) | 664 | irq_flow_handler_t handle, const char *name) |
| 755 | { | 665 | { |
| 756 | set_irq_chip(irq, chip); | 666 | irq_set_chip(irq, chip); |
| 757 | __set_irq_handler(irq, handle, 0, name); | 667 | __irq_set_handler(irq, handle, 0, name); |
| 758 | } | 668 | } |
| 759 | 669 | ||
| 760 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | 670 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
| 761 | { | 671 | { |
| 762 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 763 | unsigned long flags; | 672 | unsigned long flags; |
| 673 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
| 764 | 674 | ||
| 765 | if (!desc) | 675 | if (!desc) |
| 766 | return; | 676 | return; |
| 677 | irq_settings_clr_and_set(desc, clr, set); | ||
| 678 | |||
| 679 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | | ||
| 680 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); | ||
| 681 | if (irq_settings_has_no_balance_set(desc)) | ||
| 682 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | ||
| 683 | if (irq_settings_is_per_cpu(desc)) | ||
| 684 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | ||
| 685 | if (irq_settings_can_move_pcntxt(desc)) | ||
| 686 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | ||
| 767 | 687 | ||
| 768 | /* Sanitize flags */ | 688 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
| 769 | set &= IRQF_MODIFY_MASK; | ||
| 770 | clr &= IRQF_MODIFY_MASK; | ||
| 771 | 689 | ||
| 772 | raw_spin_lock_irqsave(&desc->lock, flags); | 690 | irq_put_desc_unlock(desc, flags); |
| 773 | desc->status &= ~clr; | ||
| 774 | desc->status |= set; | ||
| 775 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 776 | } | 691 | } |
diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h new file mode 100644 index 000000000000..6bbaf66aca85 --- /dev/null +++ b/kernel/irq/compat.h | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * Compat layer for transition period | ||
| 3 | */ | ||
| 4 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 5 | static inline void irq_compat_set_progress(struct irq_desc *desc) | ||
| 6 | { | ||
| 7 | desc->status |= IRQ_INPROGRESS; | ||
| 8 | } | ||
| 9 | |||
| 10 | static inline void irq_compat_clr_progress(struct irq_desc *desc) | ||
| 11 | { | ||
| 12 | desc->status &= ~IRQ_INPROGRESS; | ||
| 13 | } | ||
| 14 | static inline void irq_compat_set_disabled(struct irq_desc *desc) | ||
| 15 | { | ||
| 16 | desc->status |= IRQ_DISABLED; | ||
| 17 | } | ||
| 18 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) | ||
| 19 | { | ||
| 20 | desc->status &= ~IRQ_DISABLED; | ||
| 21 | } | ||
| 22 | static inline void irq_compat_set_pending(struct irq_desc *desc) | ||
| 23 | { | ||
| 24 | desc->status |= IRQ_PENDING; | ||
| 25 | } | ||
| 26 | |||
| 27 | static inline void irq_compat_clr_pending(struct irq_desc *desc) | ||
| 28 | { | ||
| 29 | desc->status &= ~IRQ_PENDING; | ||
| 30 | } | ||
| 31 | static inline void irq_compat_set_masked(struct irq_desc *desc) | ||
| 32 | { | ||
| 33 | desc->status |= IRQ_MASKED; | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline void irq_compat_clr_masked(struct irq_desc *desc) | ||
| 37 | { | ||
| 38 | desc->status &= ~IRQ_MASKED; | ||
| 39 | } | ||
| 40 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) | ||
| 41 | { | ||
| 42 | desc->status |= IRQ_MOVE_PENDING; | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) | ||
| 46 | { | ||
| 47 | desc->status &= ~IRQ_MOVE_PENDING; | ||
| 48 | } | ||
| 49 | static inline void irq_compat_set_affinity(struct irq_desc *desc) | ||
| 50 | { | ||
| 51 | desc->status |= IRQ_AFFINITY_SET; | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) | ||
| 55 | { | ||
| 56 | desc->status &= ~IRQ_AFFINITY_SET; | ||
| 57 | } | ||
| 58 | #else | ||
| 59 | static inline void irq_compat_set_progress(struct irq_desc *desc) { } | ||
| 60 | static inline void irq_compat_clr_progress(struct irq_desc *desc) { } | ||
| 61 | static inline void irq_compat_set_disabled(struct irq_desc *desc) { } | ||
| 62 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) { } | ||
| 63 | static inline void irq_compat_set_pending(struct irq_desc *desc) { } | ||
| 64 | static inline void irq_compat_clr_pending(struct irq_desc *desc) { } | ||
| 65 | static inline void irq_compat_set_masked(struct irq_desc *desc) { } | ||
| 66 | static inline void irq_compat_clr_masked(struct irq_desc *desc) { } | ||
| 67 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } | ||
| 68 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } | ||
| 69 | static inline void irq_compat_set_affinity(struct irq_desc *desc) { } | ||
| 70 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } | ||
| 71 | #endif | ||
| 72 | |||
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h new file mode 100644 index 000000000000..d1a33b7fa61d --- /dev/null +++ b/kernel/irq/debug.h | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | /* | ||
| 2 | * Debugging printout: | ||
| 3 | */ | ||
| 4 | |||
| 5 | #include <linux/kallsyms.h> | ||
| 6 | |||
| 7 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) | ||
| 8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) | ||
| 9 | |||
| 10 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 11 | { | ||
| 12 | printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", | ||
| 13 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); | ||
| 14 | printk("->handle_irq(): %p, ", desc->handle_irq); | ||
| 15 | print_symbol("%s\n", (unsigned long)desc->handle_irq); | ||
| 16 | printk("->irq_data.chip(): %p, ", desc->irq_data.chip); | ||
| 17 | print_symbol("%s\n", (unsigned long)desc->irq_data.chip); | ||
| 18 | printk("->action(): %p\n", desc->action); | ||
| 19 | if (desc->action) { | ||
| 20 | printk("->action->handler(): %p, ", desc->action->handler); | ||
| 21 | print_symbol("%s\n", (unsigned long)desc->action->handler); | ||
| 22 | } | ||
| 23 | |||
| 24 | P(IRQ_LEVEL); | ||
| 25 | P(IRQ_PER_CPU); | ||
| 26 | P(IRQ_NOPROBE); | ||
| 27 | P(IRQ_NOREQUEST); | ||
| 28 | P(IRQ_NOAUTOEN); | ||
| 29 | |||
| 30 | PS(IRQS_AUTODETECT); | ||
| 31 | PS(IRQS_INPROGRESS); | ||
| 32 | PS(IRQS_REPLAY); | ||
| 33 | PS(IRQS_WAITING); | ||
| 34 | PS(IRQS_DISABLED); | ||
| 35 | PS(IRQS_PENDING); | ||
| 36 | PS(IRQS_MASKED); | ||
| 37 | } | ||
| 38 | |||
| 39 | #undef P | ||
| 40 | #undef PS | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 3540a7190122..517561fc7317 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -51,30 +51,92 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
| 51 | "but no thread function available.", irq, action->name); | 51 | "but no thread function available.", irq, action->name); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | /** | 54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) |
| 55 | * handle_IRQ_event - irq action chain handler | 55 | { |
| 56 | * @irq: the interrupt number | 56 | /* |
| 57 | * @action: the interrupt action chain for this irq | 57 | * Wake up the handler thread for this action. In case the |
| 58 | * | 58 | * thread crashed and was killed we just pretend that we |
| 59 | * Handles the action chain of an irq event | 59 | * handled the interrupt. The hardirq handler has disabled the |
| 60 | */ | 60 | * device interrupt, so no irq storm is lurking. If the |
| 61 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | 61 | * RUNTHREAD bit is already set, nothing to do. |
| 62 | */ | ||
| 63 | if (test_bit(IRQTF_DIED, &action->thread_flags) || | ||
| 64 | test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
| 65 | return; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * It's safe to OR the mask lockless here. We have only two | ||
| 69 | * places which write to threads_oneshot: This code and the | ||
| 70 | * irq thread. | ||
| 71 | * | ||
| 72 | * This code is the hard irq context and can never run on two | ||
| 73 | * cpus in parallel. If it ever does we have more serious | ||
| 74 | * problems than this bitmask. | ||
| 75 | * | ||
| 76 | * The irq threads of this irq which clear their "running" bit | ||
| 77 | * in threads_oneshot are serialized via desc->lock against | ||
| 78 | * each other and they are serialized against this code by | ||
| 79 | * IRQS_INPROGRESS. | ||
| 80 | * | ||
| 81 | * Hard irq handler: | ||
| 82 | * | ||
| 83 | * spin_lock(desc->lock); | ||
| 84 | * desc->state |= IRQS_INPROGRESS; | ||
| 85 | * spin_unlock(desc->lock); | ||
| 86 | * set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
| 87 | * desc->threads_oneshot |= mask; | ||
| 88 | * spin_lock(desc->lock); | ||
| 89 | * desc->state &= ~IRQS_INPROGRESS; | ||
| 90 | * spin_unlock(desc->lock); | ||
| 91 | * | ||
| 92 | * irq thread: | ||
| 93 | * | ||
| 94 | * again: | ||
| 95 | * spin_lock(desc->lock); | ||
| 96 | * if (desc->state & IRQS_INPROGRESS) { | ||
| 97 | * spin_unlock(desc->lock); | ||
| 98 | * while(desc->state & IRQS_INPROGRESS) | ||
| 99 | * cpu_relax(); | ||
| 100 | * goto again; | ||
| 101 | * } | ||
| 102 | * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
| 103 | * desc->threads_oneshot &= ~mask; | ||
| 104 | * spin_unlock(desc->lock); | ||
| 105 | * | ||
| 106 | * So either the thread waits for us to clear IRQS_INPROGRESS | ||
| 107 | * or we are waiting in the flow handler for desc->lock to be | ||
| 108 | * released before we reach this point. The thread also checks | ||
| 109 | * IRQTF_RUNTHREAD under desc->lock. If set it leaves | ||
| 110 | * threads_oneshot untouched and runs the thread another time. | ||
| 111 | */ | ||
| 112 | desc->threads_oneshot |= action->thread_mask; | ||
| 113 | wake_up_process(action->thread); | ||
| 114 | } | ||
| 115 | |||
| 116 | irqreturn_t | ||
| 117 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | ||
| 62 | { | 118 | { |
| 63 | irqreturn_t ret, retval = IRQ_NONE; | 119 | irqreturn_t retval = IRQ_NONE; |
| 64 | unsigned int status = 0; | 120 | unsigned int random = 0, irq = desc->irq_data.irq; |
| 65 | 121 | ||
| 66 | do { | 122 | do { |
| 123 | irqreturn_t res; | ||
| 124 | |||
| 67 | trace_irq_handler_entry(irq, action); | 125 | trace_irq_handler_entry(irq, action); |
| 68 | ret = action->handler(irq, action->dev_id); | 126 | res = action->handler(irq, action->dev_id); |
| 69 | trace_irq_handler_exit(irq, action, ret); | 127 | trace_irq_handler_exit(irq, action, res); |
| 70 | 128 | ||
| 71 | switch (ret) { | 129 | if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", |
| 130 | irq, action->handler)) | ||
| 131 | local_irq_disable(); | ||
| 132 | |||
| 133 | switch (res) { | ||
| 72 | case IRQ_WAKE_THREAD: | 134 | case IRQ_WAKE_THREAD: |
| 73 | /* | 135 | /* |
| 74 | * Set result to handled so the spurious check | 136 | * Set result to handled so the spurious check |
| 75 | * does not trigger. | 137 | * does not trigger. |
| 76 | */ | 138 | */ |
| 77 | ret = IRQ_HANDLED; | 139 | res = IRQ_HANDLED; |
| 78 | 140 | ||
| 79 | /* | 141 | /* |
| 80 | * Catch drivers which return WAKE_THREAD but | 142 | * Catch drivers which return WAKE_THREAD but |
| @@ -85,36 +147,56 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 85 | break; | 147 | break; |
| 86 | } | 148 | } |
| 87 | 149 | ||
| 88 | /* | 150 | irq_wake_thread(desc, action); |
| 89 | * Wake up the handler thread for this | ||
| 90 | * action. In case the thread crashed and was | ||
| 91 | * killed we just pretend that we handled the | ||
| 92 | * interrupt. The hardirq handler above has | ||
| 93 | * disabled the device interrupt, so no irq | ||
| 94 | * storm is lurking. | ||
| 95 | */ | ||
| 96 | if (likely(!test_bit(IRQTF_DIED, | ||
| 97 | &action->thread_flags))) { | ||
| 98 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
| 99 | wake_up_process(action->thread); | ||
| 100 | } | ||
| 101 | 151 | ||
| 102 | /* Fall through to add to randomness */ | 152 | /* Fall through to add to randomness */ |
| 103 | case IRQ_HANDLED: | 153 | case IRQ_HANDLED: |
| 104 | status |= action->flags; | 154 | random |= action->flags; |
| 105 | break; | 155 | break; |
| 106 | 156 | ||
| 107 | default: | 157 | default: |
| 108 | break; | 158 | break; |
| 109 | } | 159 | } |
| 110 | 160 | ||
| 111 | retval |= ret; | 161 | retval |= res; |
| 112 | action = action->next; | 162 | action = action->next; |
| 113 | } while (action); | 163 | } while (action); |
| 114 | 164 | ||
| 115 | if (status & IRQF_SAMPLE_RANDOM) | 165 | if (random & IRQF_SAMPLE_RANDOM) |
| 116 | add_interrupt_randomness(irq); | 166 | add_interrupt_randomness(irq); |
| 117 | local_irq_disable(); | ||
| 118 | 167 | ||
| 168 | if (!noirqdebug) | ||
| 169 | note_interrupt(irq, desc, retval); | ||
| 119 | return retval; | 170 | return retval; |
| 120 | } | 171 | } |
| 172 | |||
| 173 | irqreturn_t handle_irq_event(struct irq_desc *desc) | ||
| 174 | { | ||
| 175 | struct irqaction *action = desc->action; | ||
| 176 | irqreturn_t ret; | ||
| 177 | |||
| 178 | irq_compat_clr_pending(desc); | ||
| 179 | desc->istate &= ~IRQS_PENDING; | ||
| 180 | irq_compat_set_progress(desc); | ||
| 181 | desc->istate |= IRQS_INPROGRESS; | ||
| 182 | raw_spin_unlock(&desc->lock); | ||
| 183 | |||
| 184 | ret = handle_irq_event_percpu(desc, action); | ||
| 185 | |||
| 186 | raw_spin_lock(&desc->lock); | ||
| 187 | desc->istate &= ~IRQS_INPROGRESS; | ||
| 188 | irq_compat_clr_progress(desc); | ||
| 189 | return ret; | ||
| 190 | } | ||
| 191 | |||
| 192 | /** | ||
| 193 | * handle_IRQ_event - irq action chain handler | ||
| 194 | * @irq: the interrupt number | ||
| 195 | * @action: the interrupt action chain for this irq | ||
| 196 | * | ||
| 197 | * Handles the action chain of an irq event | ||
| 198 | */ | ||
| 199 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | ||
| 200 | { | ||
| 201 | return handle_irq_event_percpu(irq_to_desc(irq), action); | ||
| 202 | } | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 99c3bc8a6fb4..6c6ec9a49027 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -1,5 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * IRQ subsystem internal functions and variables: | 2 | * IRQ subsystem internal functions and variables: |
| 3 | * | ||
| 4 | * Do not ever include this file from anything else than | ||
| 5 | * kernel/irq/. Do not even think about using any information outside | ||
| 6 | * of this file for your non core code. | ||
| 3 | */ | 7 | */ |
| 4 | #include <linux/irqdesc.h> | 8 | #include <linux/irqdesc.h> |
| 5 | 9 | ||
| @@ -9,25 +13,89 @@ | |||
| 9 | # define IRQ_BITMAP_BITS NR_IRQS | 13 | # define IRQ_BITMAP_BITS NR_IRQS |
| 10 | #endif | 14 | #endif |
| 11 | 15 | ||
| 16 | #define istate core_internal_state__do_not_mess_with_it | ||
| 17 | |||
| 18 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 19 | # define status status_use_accessors | ||
| 20 | #endif | ||
| 21 | |||
| 12 | extern int noirqdebug; | 22 | extern int noirqdebug; |
| 13 | 23 | ||
| 24 | /* | ||
| 25 | * Bits used by threaded handlers: | ||
| 26 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
| 27 | * IRQTF_DIED - handler thread died | ||
| 28 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
| 29 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | ||
| 30 | * IRQTF_FORCED_THREAD - irq action is force threaded | ||
| 31 | */ | ||
| 32 | enum { | ||
| 33 | IRQTF_RUNTHREAD, | ||
| 34 | IRQTF_DIED, | ||
| 35 | IRQTF_WARNED, | ||
| 36 | IRQTF_AFFINITY, | ||
| 37 | IRQTF_FORCED_THREAD, | ||
| 38 | }; | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Bit masks for desc->state | ||
| 42 | * | ||
| 43 | * IRQS_AUTODETECT - autodetection in progress | ||
| 44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt | ||
| 45 | * detection | ||
| 46 | * IRQS_POLL_INPROGRESS - polling in progress | ||
| 47 | * IRQS_INPROGRESS - Interrupt in progress | ||
| 48 | * IRQS_ONESHOT - irq is not unmasked in primary handler | ||
| 49 | * IRQS_REPLAY - irq is replayed | ||
| 50 | * IRQS_WAITING - irq is waiting | ||
| 51 | * IRQS_DISABLED - irq is disabled | ||
| 52 | * IRQS_PENDING - irq is pending and replayed later | ||
| 53 | * IRQS_MASKED - irq is masked | ||
| 54 | * IRQS_SUSPENDED - irq is suspended | ||
| 55 | */ | ||
| 56 | enum { | ||
| 57 | IRQS_AUTODETECT = 0x00000001, | ||
| 58 | IRQS_SPURIOUS_DISABLED = 0x00000002, | ||
| 59 | IRQS_POLL_INPROGRESS = 0x00000008, | ||
| 60 | IRQS_INPROGRESS = 0x00000010, | ||
| 61 | IRQS_ONESHOT = 0x00000020, | ||
| 62 | IRQS_REPLAY = 0x00000040, | ||
| 63 | IRQS_WAITING = 0x00000080, | ||
| 64 | IRQS_DISABLED = 0x00000100, | ||
| 65 | IRQS_PENDING = 0x00000200, | ||
| 66 | IRQS_MASKED = 0x00000400, | ||
| 67 | IRQS_SUSPENDED = 0x00000800, | ||
| 68 | }; | ||
| 69 | |||
| 70 | #include "compat.h" | ||
| 71 | #include "debug.h" | ||
| 72 | #include "settings.h" | ||
| 73 | |||
| 14 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 74 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
| 15 | 75 | ||
| 16 | /* Set default functions for irq_chip structures: */ | 76 | /* Set default functions for irq_chip structures: */ |
| 17 | extern void irq_chip_set_defaults(struct irq_chip *chip); | 77 | extern void irq_chip_set_defaults(struct irq_chip *chip); |
| 18 | 78 | ||
| 19 | /* Set default handler: */ | ||
| 20 | extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); | ||
| 21 | |||
| 22 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 79 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 23 | unsigned long flags); | 80 | unsigned long flags); |
| 24 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 81 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
| 25 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 82 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
| 26 | 83 | ||
| 84 | extern int irq_startup(struct irq_desc *desc); | ||
| 85 | extern void irq_shutdown(struct irq_desc *desc); | ||
| 86 | extern void irq_enable(struct irq_desc *desc); | ||
| 87 | extern void irq_disable(struct irq_desc *desc); | ||
| 88 | extern void mask_irq(struct irq_desc *desc); | ||
| 89 | extern void unmask_irq(struct irq_desc *desc); | ||
| 90 | |||
| 27 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 91 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
| 28 | 92 | ||
| 93 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); | ||
| 94 | irqreturn_t handle_irq_event(struct irq_desc *desc); | ||
| 95 | |||
| 29 | /* Resending of interrupts :*/ | 96 | /* Resending of interrupts :*/ |
| 30 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 97 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
| 98 | bool irq_wait_for_poll(struct irq_desc *desc); | ||
| 31 | 99 | ||
| 32 | #ifdef CONFIG_PROC_FS | 100 | #ifdef CONFIG_PROC_FS |
| 33 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 101 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
| @@ -43,20 +111,10 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
| 43 | struct irqaction *action) { } | 111 | struct irqaction *action) { } |
| 44 | #endif | 112 | #endif |
| 45 | 113 | ||
| 46 | extern int irq_select_affinity_usr(unsigned int irq); | 114 | extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); |
| 47 | 115 | ||
| 48 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 116 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
| 49 | 117 | ||
| 50 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 51 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) | ||
| 52 | { | ||
| 53 | if (desc->irq_data.chip && desc->irq_data.chip->end) | ||
| 54 | desc->irq_data.chip->end(irq); | ||
| 55 | } | ||
| 56 | #else | ||
| 57 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } | ||
| 58 | #endif | ||
| 59 | |||
| 60 | /* Inline functions for support of irq chips on slow busses */ | 118 | /* Inline functions for support of irq chips on slow busses */ |
| 61 | static inline void chip_bus_lock(struct irq_desc *desc) | 119 | static inline void chip_bus_lock(struct irq_desc *desc) |
| 62 | { | 120 | { |
| @@ -70,43 +128,60 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) | |||
| 70 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); | 128 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
| 71 | } | 129 | } |
| 72 | 130 | ||
| 131 | struct irq_desc * | ||
| 132 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus); | ||
| 133 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); | ||
| 134 | |||
| 135 | static inline struct irq_desc * | ||
| 136 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags) | ||
| 137 | { | ||
| 138 | return __irq_get_desc_lock(irq, flags, true); | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline void | ||
| 142 | irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) | ||
| 143 | { | ||
| 144 | __irq_put_desc_unlock(desc, flags, true); | ||
| 145 | } | ||
| 146 | |||
| 147 | static inline struct irq_desc * | ||
| 148 | irq_get_desc_lock(unsigned int irq, unsigned long *flags) | ||
| 149 | { | ||
| 150 | return __irq_get_desc_lock(irq, flags, false); | ||
| 151 | } | ||
| 152 | |||
| 153 | static inline void | ||
| 154 | irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) | ||
| 155 | { | ||
| 156 | __irq_put_desc_unlock(desc, flags, false); | ||
| 157 | } | ||
| 158 | |||
| 73 | /* | 159 | /* |
| 74 | * Debugging printout: | 160 | * Manipulation functions for irq_data.state |
| 75 | */ | 161 | */ |
| 162 | static inline void irqd_set_move_pending(struct irq_data *d) | ||
| 163 | { | ||
| 164 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; | ||
| 165 | irq_compat_set_move_pending(irq_data_to_desc(d)); | ||
| 166 | } | ||
| 76 | 167 | ||
| 77 | #include <linux/kallsyms.h> | 168 | static inline void irqd_clr_move_pending(struct irq_data *d) |
| 78 | 169 | { | |
| 79 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) | 170 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; |
| 171 | irq_compat_clr_move_pending(irq_data_to_desc(d)); | ||
| 172 | } | ||
| 80 | 173 | ||
| 81 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | 174 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
| 82 | { | 175 | { |
| 83 | printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", | 176 | d->state_use_accessors &= ~mask; |
| 84 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); | ||
| 85 | printk("->handle_irq(): %p, ", desc->handle_irq); | ||
| 86 | print_symbol("%s\n", (unsigned long)desc->handle_irq); | ||
| 87 | printk("->irq_data.chip(): %p, ", desc->irq_data.chip); | ||
| 88 | print_symbol("%s\n", (unsigned long)desc->irq_data.chip); | ||
| 89 | printk("->action(): %p\n", desc->action); | ||
| 90 | if (desc->action) { | ||
| 91 | printk("->action->handler(): %p, ", desc->action->handler); | ||
| 92 | print_symbol("%s\n", (unsigned long)desc->action->handler); | ||
| 93 | } | ||
| 94 | |||
| 95 | P(IRQ_INPROGRESS); | ||
| 96 | P(IRQ_DISABLED); | ||
| 97 | P(IRQ_PENDING); | ||
| 98 | P(IRQ_REPLAY); | ||
| 99 | P(IRQ_AUTODETECT); | ||
| 100 | P(IRQ_WAITING); | ||
| 101 | P(IRQ_LEVEL); | ||
| 102 | P(IRQ_MASKED); | ||
| 103 | #ifdef CONFIG_IRQ_PER_CPU | ||
| 104 | P(IRQ_PER_CPU); | ||
| 105 | #endif | ||
| 106 | P(IRQ_NOPROBE); | ||
| 107 | P(IRQ_NOREQUEST); | ||
| 108 | P(IRQ_NOAUTOEN); | ||
| 109 | } | 177 | } |
| 110 | 178 | ||
| 111 | #undef P | 179 | static inline void irqd_set(struct irq_data *d, unsigned int mask) |
| 180 | { | ||
| 181 | d->state_use_accessors |= mask; | ||
| 182 | } | ||
| 112 | 183 | ||
| 184 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | ||
| 185 | { | ||
| 186 | return d->state_use_accessors & mask; | ||
| 187 | } | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 2039bea31bdf..dbccc799407f 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -79,7 +79,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
| 79 | desc->irq_data.chip_data = NULL; | 79 | desc->irq_data.chip_data = NULL; |
| 80 | desc->irq_data.handler_data = NULL; | 80 | desc->irq_data.handler_data = NULL; |
| 81 | desc->irq_data.msi_desc = NULL; | 81 | desc->irq_data.msi_desc = NULL; |
| 82 | desc->status = IRQ_DEFAULT_INIT_FLAGS; | 82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
| 83 | desc->istate = IRQS_DISABLED; | ||
| 83 | desc->handle_irq = handle_bad_irq; | 84 | desc->handle_irq = handle_bad_irq; |
| 84 | desc->depth = 1; | 85 | desc->depth = 1; |
| 85 | desc->irq_count = 0; | 86 | desc->irq_count = 0; |
| @@ -206,6 +207,14 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | |||
| 206 | return NULL; | 207 | return NULL; |
| 207 | } | 208 | } |
| 208 | 209 | ||
| 210 | static int irq_expand_nr_irqs(unsigned int nr) | ||
| 211 | { | ||
| 212 | if (nr > IRQ_BITMAP_BITS) | ||
| 213 | return -ENOMEM; | ||
| 214 | nr_irqs = nr; | ||
| 215 | return 0; | ||
| 216 | } | ||
| 217 | |||
| 209 | int __init early_irq_init(void) | 218 | int __init early_irq_init(void) |
| 210 | { | 219 | { |
| 211 | int i, initcnt, node = first_online_node; | 220 | int i, initcnt, node = first_online_node; |
| @@ -238,7 +247,7 @@ int __init early_irq_init(void) | |||
| 238 | 247 | ||
| 239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 248 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 240 | [0 ... NR_IRQS-1] = { | 249 | [0 ... NR_IRQS-1] = { |
| 241 | .status = IRQ_DEFAULT_INIT_FLAGS, | 250 | .istate = IRQS_DISABLED, |
| 242 | .handle_irq = handle_bad_irq, | 251 | .handle_irq = handle_bad_irq, |
| 243 | .depth = 1, | 252 | .depth = 1, |
| 244 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 253 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| @@ -260,8 +269,8 @@ int __init early_irq_init(void) | |||
| 260 | for (i = 0; i < count; i++) { | 269 | for (i = 0; i < count; i++) { |
| 261 | desc[i].irq_data.irq = i; | 270 | desc[i].irq_data.irq = i; |
| 262 | desc[i].irq_data.chip = &no_irq_chip; | 271 | desc[i].irq_data.chip = &no_irq_chip; |
| 263 | /* TODO : do this allocation on-demand ... */ | ||
| 264 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | 272 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
| 273 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | ||
| 265 | alloc_masks(desc + i, GFP_KERNEL, node); | 274 | alloc_masks(desc + i, GFP_KERNEL, node); |
| 266 | desc_smp_init(desc + i, node); | 275 | desc_smp_init(desc + i, node); |
| 267 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 276 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| @@ -286,24 +295,14 @@ static void free_desc(unsigned int irq) | |||
| 286 | 295 | ||
| 287 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 296 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) |
| 288 | { | 297 | { |
| 289 | #if defined(CONFIG_KSTAT_IRQS_ONDEMAND) | ||
| 290 | struct irq_desc *desc; | ||
| 291 | unsigned int i; | ||
| 292 | |||
| 293 | for (i = 0; i < cnt; i++) { | ||
| 294 | desc = irq_to_desc(start + i); | ||
| 295 | if (desc && !desc->kstat_irqs) { | ||
| 296 | unsigned int __percpu *stats = alloc_percpu(unsigned int); | ||
| 297 | |||
| 298 | if (!stats) | ||
| 299 | return -1; | ||
| 300 | if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL) | ||
| 301 | free_percpu(stats); | ||
| 302 | } | ||
| 303 | } | ||
| 304 | #endif | ||
| 305 | return start; | 298 | return start; |
| 306 | } | 299 | } |
| 300 | |||
| 301 | static int irq_expand_nr_irqs(unsigned int nr) | ||
| 302 | { | ||
| 303 | return -ENOMEM; | ||
| 304 | } | ||
| 305 | |||
| 307 | #endif /* !CONFIG_SPARSE_IRQ */ | 306 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 308 | 307 | ||
| 309 | /* Dynamic interrupt handling */ | 308 | /* Dynamic interrupt handling */ |
| @@ -347,14 +346,17 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | |||
| 347 | 346 | ||
| 348 | mutex_lock(&sparse_irq_lock); | 347 | mutex_lock(&sparse_irq_lock); |
| 349 | 348 | ||
| 350 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | 349 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
| 350 | from, cnt, 0); | ||
| 351 | ret = -EEXIST; | 351 | ret = -EEXIST; |
| 352 | if (irq >=0 && start != irq) | 352 | if (irq >=0 && start != irq) |
| 353 | goto err; | 353 | goto err; |
| 354 | 354 | ||
| 355 | ret = -ENOMEM; | 355 | if (start + cnt > nr_irqs) { |
| 356 | if (start >= nr_irqs) | 356 | ret = irq_expand_nr_irqs(start + cnt); |
| 357 | goto err; | 357 | if (ret) |
| 358 | goto err; | ||
| 359 | } | ||
| 358 | 360 | ||
| 359 | bitmap_set(allocated_irqs, start, cnt); | 361 | bitmap_set(allocated_irqs, start, cnt); |
| 360 | mutex_unlock(&sparse_irq_lock); | 362 | mutex_unlock(&sparse_irq_lock); |
| @@ -401,6 +403,26 @@ unsigned int irq_get_next_irq(unsigned int offset) | |||
| 401 | return find_next_bit(allocated_irqs, nr_irqs, offset); | 403 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
| 402 | } | 404 | } |
| 403 | 405 | ||
| 406 | struct irq_desc * | ||
| 407 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) | ||
| 408 | { | ||
| 409 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 410 | |||
| 411 | if (desc) { | ||
| 412 | if (bus) | ||
| 413 | chip_bus_lock(desc); | ||
| 414 | raw_spin_lock_irqsave(&desc->lock, *flags); | ||
| 415 | } | ||
| 416 | return desc; | ||
| 417 | } | ||
| 418 | |||
| 419 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | ||
| 420 | { | ||
| 421 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 422 | if (bus) | ||
| 423 | chip_bus_sync_unlock(desc); | ||
| 424 | } | ||
| 425 | |||
| 404 | /** | 426 | /** |
| 405 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 427 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq |
| 406 | * @irq: irq number to initialize | 428 | * @irq: irq number to initialize |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 2782bacdf494..acd599a43bfb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -17,6 +17,17 @@ | |||
| 17 | 17 | ||
| 18 | #include "internals.h" | 18 | #include "internals.h" |
| 19 | 19 | ||
| 20 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
| 21 | __read_mostly bool force_irqthreads; | ||
| 22 | |||
| 23 | static int __init setup_forced_irqthreads(char *arg) | ||
| 24 | { | ||
| 25 | force_irqthreads = true; | ||
| 26 | return 0; | ||
| 27 | } | ||
| 28 | early_param("threadirqs", setup_forced_irqthreads); | ||
| 29 | #endif | ||
| 30 | |||
| 20 | /** | 31 | /** |
| 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 32 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
| 22 | * @irq: interrupt number to wait for | 33 | * @irq: interrupt number to wait for |
| @@ -30,7 +41,7 @@ | |||
| 30 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
| 31 | { | 42 | { |
| 32 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
| 33 | unsigned int status; | 44 | unsigned int state; |
| 34 | 45 | ||
| 35 | if (!desc) | 46 | if (!desc) |
| 36 | return; | 47 | return; |
| @@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq) | |||
| 42 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
| 43 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
| 44 | */ | 55 | */ |
| 45 | while (desc->status & IRQ_INPROGRESS) | 56 | while (desc->istate & IRQS_INPROGRESS) |
| 46 | cpu_relax(); | 57 | cpu_relax(); |
| 47 | 58 | ||
| 48 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
| 49 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 50 | status = desc->status; | 61 | state = desc->istate; |
| 51 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 52 | 63 | ||
| 53 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
| 54 | } while (status & IRQ_INPROGRESS); | 65 | } while (state & IRQS_INPROGRESS); |
| 55 | 66 | ||
| 56 | /* | 67 | /* |
| 57 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
| @@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 73 | { | 84 | { |
| 74 | struct irq_desc *desc = irq_to_desc(irq); | 85 | struct irq_desc *desc = irq_to_desc(irq); |
| 75 | 86 | ||
| 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || | 87 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
| 77 | !desc->irq_data.chip->irq_set_affinity) | 88 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
| 78 | return 0; | 89 | return 0; |
| 79 | 90 | ||
| 80 | return 1; | 91 | return 1; |
| @@ -100,67 +111,169 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
| 100 | } | 111 | } |
| 101 | } | 112 | } |
| 102 | 113 | ||
| 114 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 115 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) | ||
| 116 | { | ||
| 117 | return irq_settings_can_move_pcntxt(desc); | ||
| 118 | } | ||
| 119 | static inline bool irq_move_pending(struct irq_desc *desc) | ||
| 120 | { | ||
| 121 | return irqd_is_setaffinity_pending(&desc->irq_data); | ||
| 122 | } | ||
| 123 | static inline void | ||
| 124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | ||
| 125 | { | ||
| 126 | cpumask_copy(desc->pending_mask, mask); | ||
| 127 | } | ||
| 128 | static inline void | ||
| 129 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | ||
| 130 | { | ||
| 131 | cpumask_copy(mask, desc->pending_mask); | ||
| 132 | } | ||
| 133 | #else | ||
| 134 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } | ||
| 135 | static inline bool irq_move_pending(struct irq_desc *desc) { return false; } | ||
| 136 | static inline void | ||
| 137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | ||
| 138 | static inline void | ||
| 139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | ||
| 140 | #endif | ||
| 141 | |||
| 103 | /** | 142 | /** |
| 104 | * irq_set_affinity - Set the irq affinity of a given irq | 143 | * irq_set_affinity - Set the irq affinity of a given irq |
| 105 | * @irq: Interrupt to set affinity | 144 | * @irq: Interrupt to set affinity |
| 106 | * @cpumask: cpumask | 145 | * @cpumask: cpumask |
| 107 | * | 146 | * |
| 108 | */ | 147 | */ |
| 109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 148 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) |
| 110 | { | 149 | { |
| 111 | struct irq_desc *desc = irq_to_desc(irq); | 150 | struct irq_desc *desc = irq_to_desc(irq); |
| 112 | struct irq_chip *chip = desc->irq_data.chip; | 151 | struct irq_chip *chip = desc->irq_data.chip; |
| 113 | unsigned long flags; | 152 | unsigned long flags; |
| 153 | int ret = 0; | ||
| 114 | 154 | ||
| 115 | if (!chip->irq_set_affinity) | 155 | if (!chip->irq_set_affinity) |
| 116 | return -EINVAL; | 156 | return -EINVAL; |
| 117 | 157 | ||
| 118 | raw_spin_lock_irqsave(&desc->lock, flags); | 158 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 119 | 159 | ||
| 120 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 160 | if (irq_can_move_pcntxt(desc)) { |
| 121 | if (desc->status & IRQ_MOVE_PCNTXT) { | 161 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
| 122 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | 162 | switch (ret) { |
| 123 | cpumask_copy(desc->irq_data.affinity, cpumask); | 163 | case IRQ_SET_MASK_OK: |
| 164 | cpumask_copy(desc->irq_data.affinity, mask); | ||
| 165 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 124 | irq_set_thread_affinity(desc); | 166 | irq_set_thread_affinity(desc); |
| 167 | ret = 0; | ||
| 125 | } | 168 | } |
| 169 | } else { | ||
| 170 | irqd_set_move_pending(&desc->irq_data); | ||
| 171 | irq_copy_pending(desc, mask); | ||
| 126 | } | 172 | } |
| 127 | else { | 173 | |
| 128 | desc->status |= IRQ_MOVE_PENDING; | 174 | if (desc->affinity_notify) { |
| 129 | cpumask_copy(desc->pending_mask, cpumask); | 175 | kref_get(&desc->affinity_notify->kref); |
| 130 | } | 176 | schedule_work(&desc->affinity_notify->work); |
| 131 | #else | ||
| 132 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | ||
| 133 | cpumask_copy(desc->irq_data.affinity, cpumask); | ||
| 134 | irq_set_thread_affinity(desc); | ||
| 135 | } | 177 | } |
| 136 | #endif | 178 | irq_compat_set_affinity(desc); |
| 137 | desc->status |= IRQ_AFFINITY_SET; | 179 | irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); |
| 138 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 180 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 139 | return 0; | 181 | return ret; |
| 140 | } | 182 | } |
| 141 | 183 | ||
| 142 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 184 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
| 143 | { | 185 | { |
| 186 | unsigned long flags; | ||
| 187 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
| 188 | |||
| 189 | if (!desc) | ||
| 190 | return -EINVAL; | ||
| 191 | desc->affinity_hint = m; | ||
| 192 | irq_put_desc_unlock(desc, flags); | ||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | ||
| 196 | |||
| 197 | static void irq_affinity_notify(struct work_struct *work) | ||
| 198 | { | ||
| 199 | struct irq_affinity_notify *notify = | ||
| 200 | container_of(work, struct irq_affinity_notify, work); | ||
| 201 | struct irq_desc *desc = irq_to_desc(notify->irq); | ||
| 202 | cpumask_var_t cpumask; | ||
| 203 | unsigned long flags; | ||
| 204 | |||
| 205 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | ||
| 206 | goto out; | ||
| 207 | |||
| 208 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 209 | if (irq_move_pending(desc)) | ||
| 210 | irq_get_pending(cpumask, desc); | ||
| 211 | else | ||
| 212 | cpumask_copy(cpumask, desc->irq_data.affinity); | ||
| 213 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 214 | |||
| 215 | notify->notify(notify, cpumask); | ||
| 216 | |||
| 217 | free_cpumask_var(cpumask); | ||
| 218 | out: | ||
| 219 | kref_put(¬ify->kref, notify->release); | ||
| 220 | } | ||
| 221 | |||
| 222 | /** | ||
| 223 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | ||
| 224 | * @irq: Interrupt for which to enable/disable notification | ||
| 225 | * @notify: Context for notification, or %NULL to disable | ||
| 226 | * notification. Function pointers must be initialised; | ||
| 227 | * the other fields will be initialised by this function. | ||
| 228 | * | ||
| 229 | * Must be called in process context. Notification may only be enabled | ||
| 230 | * after the IRQ is allocated and must be disabled before the IRQ is | ||
| 231 | * freed using free_irq(). | ||
| 232 | */ | ||
| 233 | int | ||
| 234 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | ||
| 235 | { | ||
| 144 | struct irq_desc *desc = irq_to_desc(irq); | 236 | struct irq_desc *desc = irq_to_desc(irq); |
| 237 | struct irq_affinity_notify *old_notify; | ||
| 145 | unsigned long flags; | 238 | unsigned long flags; |
| 146 | 239 | ||
| 240 | /* The release function is promised process context */ | ||
| 241 | might_sleep(); | ||
| 242 | |||
| 147 | if (!desc) | 243 | if (!desc) |
| 148 | return -EINVAL; | 244 | return -EINVAL; |
| 149 | 245 | ||
| 246 | /* Complete initialisation of *notify */ | ||
| 247 | if (notify) { | ||
| 248 | notify->irq = irq; | ||
| 249 | kref_init(¬ify->kref); | ||
| 250 | INIT_WORK(¬ify->work, irq_affinity_notify); | ||
| 251 | } | ||
| 252 | |||
| 150 | raw_spin_lock_irqsave(&desc->lock, flags); | 253 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 151 | desc->affinity_hint = m; | 254 | old_notify = desc->affinity_notify; |
| 255 | desc->affinity_notify = notify; | ||
| 152 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 256 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 153 | 257 | ||
| 258 | if (old_notify) | ||
| 259 | kref_put(&old_notify->kref, old_notify->release); | ||
| 260 | |||
| 154 | return 0; | 261 | return 0; |
| 155 | } | 262 | } |
| 156 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 263 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
| 157 | 264 | ||
| 158 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 265 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
| 159 | /* | 266 | /* |
| 160 | * Generic version of the affinity autoselector. | 267 | * Generic version of the affinity autoselector. |
| 161 | */ | 268 | */ |
| 162 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) | 269 | static int |
| 270 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
| 163 | { | 271 | { |
| 272 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 273 | struct cpumask *set = irq_default_affinity; | ||
| 274 | int ret; | ||
| 275 | |||
| 276 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | ||
| 164 | if (!irq_can_set_affinity(irq)) | 277 | if (!irq_can_set_affinity(irq)) |
| 165 | return 0; | 278 | return 0; |
| 166 | 279 | ||
| @@ -168,22 +281,29 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 168 | * Preserve an userspace affinity setup, but make sure that | 281 | * Preserve an userspace affinity setup, but make sure that |
| 169 | * one of the targets is online. | 282 | * one of the targets is online. |
| 170 | */ | 283 | */ |
| 171 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 284 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
| 172 | if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) | 285 | if (cpumask_intersects(desc->irq_data.affinity, |
| 173 | < nr_cpu_ids) | 286 | cpu_online_mask)) |
| 174 | goto set_affinity; | 287 | set = desc->irq_data.affinity; |
| 175 | else | 288 | else { |
| 176 | desc->status &= ~IRQ_AFFINITY_SET; | 289 | irq_compat_clr_affinity(desc); |
| 290 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | ||
| 291 | } | ||
| 177 | } | 292 | } |
| 178 | 293 | ||
| 179 | cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); | 294 | cpumask_and(mask, cpu_online_mask, set); |
| 180 | set_affinity: | 295 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
| 181 | desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); | 296 | switch (ret) { |
| 182 | 297 | case IRQ_SET_MASK_OK: | |
| 298 | cpumask_copy(desc->irq_data.affinity, mask); | ||
| 299 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 300 | irq_set_thread_affinity(desc); | ||
| 301 | } | ||
| 183 | return 0; | 302 | return 0; |
| 184 | } | 303 | } |
| 185 | #else | 304 | #else |
| 186 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | 305 | static inline int |
| 306 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | ||
| 187 | { | 307 | { |
| 188 | return irq_select_affinity(irq); | 308 | return irq_select_affinity(irq); |
| 189 | } | 309 | } |
| @@ -192,23 +312,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | |||
| 192 | /* | 312 | /* |
| 193 | * Called when affinity is set via /proc/irq | 313 | * Called when affinity is set via /proc/irq |
| 194 | */ | 314 | */ |
| 195 | int irq_select_affinity_usr(unsigned int irq) | 315 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
| 196 | { | 316 | { |
| 197 | struct irq_desc *desc = irq_to_desc(irq); | 317 | struct irq_desc *desc = irq_to_desc(irq); |
| 198 | unsigned long flags; | 318 | unsigned long flags; |
| 199 | int ret; | 319 | int ret; |
| 200 | 320 | ||
| 201 | raw_spin_lock_irqsave(&desc->lock, flags); | 321 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 202 | ret = setup_affinity(irq, desc); | 322 | ret = setup_affinity(irq, desc, mask); |
| 203 | if (!ret) | ||
| 204 | irq_set_thread_affinity(desc); | ||
| 205 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 323 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 206 | |||
| 207 | return ret; | 324 | return ret; |
| 208 | } | 325 | } |
| 209 | 326 | ||
| 210 | #else | 327 | #else |
| 211 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | 328 | static inline int |
| 329 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
| 212 | { | 330 | { |
| 213 | return 0; | 331 | return 0; |
| 214 | } | 332 | } |
| @@ -219,13 +337,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
| 219 | if (suspend) { | 337 | if (suspend) { |
| 220 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 338 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
| 221 | return; | 339 | return; |
| 222 | desc->status |= IRQ_SUSPENDED; | 340 | desc->istate |= IRQS_SUSPENDED; |
| 223 | } | 341 | } |
| 224 | 342 | ||
| 225 | if (!desc->depth++) { | 343 | if (!desc->depth++) |
| 226 | desc->status |= IRQ_DISABLED; | 344 | irq_disable(desc); |
| 227 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 345 | } |
| 228 | } | 346 | |
| 347 | static int __disable_irq_nosync(unsigned int irq) | ||
| 348 | { | ||
| 349 | unsigned long flags; | ||
| 350 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
| 351 | |||
| 352 | if (!desc) | ||
| 353 | return -EINVAL; | ||
| 354 | __disable_irq(desc, irq, false); | ||
| 355 | irq_put_desc_busunlock(desc, flags); | ||
| 356 | return 0; | ||
| 229 | } | 357 | } |
| 230 | 358 | ||
| 231 | /** | 359 | /** |
| @@ -241,17 +369,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
| 241 | */ | 369 | */ |
| 242 | void disable_irq_nosync(unsigned int irq) | 370 | void disable_irq_nosync(unsigned int irq) |
| 243 | { | 371 | { |
| 244 | struct irq_desc *desc = irq_to_desc(irq); | 372 | __disable_irq_nosync(irq); |
| 245 | unsigned long flags; | ||
| 246 | |||
| 247 | if (!desc) | ||
| 248 | return; | ||
| 249 | |||
| 250 | chip_bus_lock(desc); | ||
| 251 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 252 | __disable_irq(desc, irq, false); | ||
| 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 254 | chip_bus_sync_unlock(desc); | ||
| 255 | } | 373 | } |
| 256 | EXPORT_SYMBOL(disable_irq_nosync); | 374 | EXPORT_SYMBOL(disable_irq_nosync); |
| 257 | 375 | ||
| @@ -269,13 +387,7 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
| 269 | */ | 387 | */ |
| 270 | void disable_irq(unsigned int irq) | 388 | void disable_irq(unsigned int irq) |
| 271 | { | 389 | { |
| 272 | struct irq_desc *desc = irq_to_desc(irq); | 390 | if (!__disable_irq_nosync(irq)) |
| 273 | |||
| 274 | if (!desc) | ||
| 275 | return; | ||
| 276 | |||
| 277 | disable_irq_nosync(irq); | ||
| 278 | if (desc->action) | ||
| 279 | synchronize_irq(irq); | 391 | synchronize_irq(irq); |
| 280 | } | 392 | } |
| 281 | EXPORT_SYMBOL(disable_irq); | 393 | EXPORT_SYMBOL(disable_irq); |
| @@ -283,7 +395,7 @@ EXPORT_SYMBOL(disable_irq); | |||
| 283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 395 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
| 284 | { | 396 | { |
| 285 | if (resume) { | 397 | if (resume) { |
| 286 | if (!(desc->status & IRQ_SUSPENDED)) { | 398 | if (!(desc->istate & IRQS_SUSPENDED)) { |
| 287 | if (!desc->action) | 399 | if (!desc->action) |
| 288 | return; | 400 | return; |
| 289 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | 401 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) |
| @@ -291,7 +403,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
| 291 | /* Pretend that it got disabled ! */ | 403 | /* Pretend that it got disabled ! */ |
| 292 | desc->depth++; | 404 | desc->depth++; |
| 293 | } | 405 | } |
| 294 | desc->status &= ~IRQ_SUSPENDED; | 406 | desc->istate &= ~IRQS_SUSPENDED; |
| 295 | } | 407 | } |
| 296 | 408 | ||
| 297 | switch (desc->depth) { | 409 | switch (desc->depth) { |
| @@ -300,12 +412,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
| 300 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 412 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
| 301 | break; | 413 | break; |
| 302 | case 1: { | 414 | case 1: { |
| 303 | unsigned int status = desc->status & ~IRQ_DISABLED; | 415 | if (desc->istate & IRQS_SUSPENDED) |
| 304 | |||
| 305 | if (desc->status & IRQ_SUSPENDED) | ||
| 306 | goto err_out; | 416 | goto err_out; |
| 307 | /* Prevent probing on this irq: */ | 417 | /* Prevent probing on this irq: */ |
| 308 | desc->status = status | IRQ_NOPROBE; | 418 | irq_settings_set_noprobe(desc); |
| 419 | irq_enable(desc); | ||
| 309 | check_irq_resend(desc, irq); | 420 | check_irq_resend(desc, irq); |
| 310 | /* fall-through */ | 421 | /* fall-through */ |
| 311 | } | 422 | } |
| @@ -327,21 +438,18 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
| 327 | */ | 438 | */ |
| 328 | void enable_irq(unsigned int irq) | 439 | void enable_irq(unsigned int irq) |
| 329 | { | 440 | { |
| 330 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 331 | unsigned long flags; | 441 | unsigned long flags; |
| 442 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
| 332 | 443 | ||
| 333 | if (!desc) | 444 | if (!desc) |
| 334 | return; | 445 | return; |
| 446 | if (WARN(!desc->irq_data.chip, | ||
| 447 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | ||
| 448 | goto out; | ||
| 335 | 449 | ||
| 336 | if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable, | ||
| 337 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | ||
| 338 | return; | ||
| 339 | |||
| 340 | chip_bus_lock(desc); | ||
| 341 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 342 | __enable_irq(desc, irq, false); | 450 | __enable_irq(desc, irq, false); |
| 343 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 451 | out: |
| 344 | chip_bus_sync_unlock(desc); | 452 | irq_put_desc_busunlock(desc, flags); |
| 345 | } | 453 | } |
| 346 | EXPORT_SYMBOL(enable_irq); | 454 | EXPORT_SYMBOL(enable_irq); |
| 347 | 455 | ||
| @@ -357,7 +465,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
| 357 | } | 465 | } |
| 358 | 466 | ||
| 359 | /** | 467 | /** |
| 360 | * set_irq_wake - control irq power management wakeup | 468 | * irq_set_irq_wake - control irq power management wakeup |
| 361 | * @irq: interrupt to control | 469 | * @irq: interrupt to control |
| 362 | * @on: enable/disable power management wakeup | 470 | * @on: enable/disable power management wakeup |
| 363 | * | 471 | * |
| @@ -368,23 +476,22 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
| 368 | * Wakeup mode lets this IRQ wake the system from sleep | 476 | * Wakeup mode lets this IRQ wake the system from sleep |
| 369 | * states like "suspend to RAM". | 477 | * states like "suspend to RAM". |
| 370 | */ | 478 | */ |
| 371 | int set_irq_wake(unsigned int irq, unsigned int on) | 479 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
| 372 | { | 480 | { |
| 373 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 374 | unsigned long flags; | 481 | unsigned long flags; |
| 482 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
| 375 | int ret = 0; | 483 | int ret = 0; |
| 376 | 484 | ||
| 377 | /* wakeup-capable irqs can be shared between drivers that | 485 | /* wakeup-capable irqs can be shared between drivers that |
| 378 | * don't need to have the same sleep mode behaviors. | 486 | * don't need to have the same sleep mode behaviors. |
| 379 | */ | 487 | */ |
| 380 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 381 | if (on) { | 488 | if (on) { |
| 382 | if (desc->wake_depth++ == 0) { | 489 | if (desc->wake_depth++ == 0) { |
| 383 | ret = set_irq_wake_real(irq, on); | 490 | ret = set_irq_wake_real(irq, on); |
| 384 | if (ret) | 491 | if (ret) |
| 385 | desc->wake_depth = 0; | 492 | desc->wake_depth = 0; |
| 386 | else | 493 | else |
| 387 | desc->status |= IRQ_WAKEUP; | 494 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
| 388 | } | 495 | } |
| 389 | } else { | 496 | } else { |
| 390 | if (desc->wake_depth == 0) { | 497 | if (desc->wake_depth == 0) { |
| @@ -394,14 +501,13 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
| 394 | if (ret) | 501 | if (ret) |
| 395 | desc->wake_depth = 1; | 502 | desc->wake_depth = 1; |
| 396 | else | 503 | else |
| 397 | desc->status &= ~IRQ_WAKEUP; | 504 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
| 398 | } | 505 | } |
| 399 | } | 506 | } |
| 400 | 507 | irq_put_desc_busunlock(desc, flags); | |
| 401 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 402 | return ret; | 508 | return ret; |
| 403 | } | 509 | } |
| 404 | EXPORT_SYMBOL(set_irq_wake); | 510 | EXPORT_SYMBOL(irq_set_irq_wake); |
| 405 | 511 | ||
| 406 | /* | 512 | /* |
| 407 | * Internal function that tells the architecture code whether a | 513 | * Internal function that tells the architecture code whether a |
| @@ -410,43 +516,27 @@ EXPORT_SYMBOL(set_irq_wake); | |||
| 410 | */ | 516 | */ |
| 411 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 517 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
| 412 | { | 518 | { |
| 413 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 414 | struct irqaction *action; | ||
| 415 | unsigned long flags; | 519 | unsigned long flags; |
| 520 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
| 521 | int canrequest = 0; | ||
| 416 | 522 | ||
| 417 | if (!desc) | 523 | if (!desc) |
| 418 | return 0; | 524 | return 0; |
| 419 | 525 | ||
| 420 | if (desc->status & IRQ_NOREQUEST) | 526 | if (irq_settings_can_request(desc)) { |
| 421 | return 0; | 527 | if (desc->action) |
| 422 | 528 | if (irqflags & desc->action->flags & IRQF_SHARED) | |
| 423 | raw_spin_lock_irqsave(&desc->lock, flags); | 529 | canrequest =1; |
| 424 | action = desc->action; | 530 | } |
| 425 | if (action) | 531 | irq_put_desc_unlock(desc, flags); |
| 426 | if (irqflags & action->flags & IRQF_SHARED) | 532 | return canrequest; |
| 427 | action = NULL; | ||
| 428 | |||
| 429 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 430 | |||
| 431 | return !action; | ||
| 432 | } | ||
| 433 | |||
| 434 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | ||
| 435 | { | ||
| 436 | /* | ||
| 437 | * If the architecture still has not overriden | ||
| 438 | * the flow handler then zap the default. This | ||
| 439 | * should catch incorrect flow-type setting. | ||
| 440 | */ | ||
| 441 | if (desc->handle_irq == &handle_bad_irq) | ||
| 442 | desc->handle_irq = NULL; | ||
| 443 | } | 533 | } |
| 444 | 534 | ||
| 445 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 535 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 446 | unsigned long flags) | 536 | unsigned long flags) |
| 447 | { | 537 | { |
| 448 | int ret; | ||
| 449 | struct irq_chip *chip = desc->irq_data.chip; | 538 | struct irq_chip *chip = desc->irq_data.chip; |
| 539 | int ret, unmask = 0; | ||
| 450 | 540 | ||
| 451 | if (!chip || !chip->irq_set_type) { | 541 | if (!chip || !chip->irq_set_type) { |
| 452 | /* | 542 | /* |
| @@ -458,23 +548,43 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 458 | return 0; | 548 | return 0; |
| 459 | } | 549 | } |
| 460 | 550 | ||
| 551 | flags &= IRQ_TYPE_SENSE_MASK; | ||
| 552 | |||
| 553 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | ||
| 554 | if (!(desc->istate & IRQS_MASKED)) | ||
| 555 | mask_irq(desc); | ||
| 556 | if (!(desc->istate & IRQS_DISABLED)) | ||
| 557 | unmask = 1; | ||
| 558 | } | ||
| 559 | |||
| 461 | /* caller masked out all except trigger mode flags */ | 560 | /* caller masked out all except trigger mode flags */ |
| 462 | ret = chip->irq_set_type(&desc->irq_data, flags); | 561 | ret = chip->irq_set_type(&desc->irq_data, flags); |
| 463 | 562 | ||
| 464 | if (ret) | 563 | switch (ret) { |
| 465 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | 564 | case IRQ_SET_MASK_OK: |
| 466 | flags, irq, chip->irq_set_type); | 565 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
| 467 | else { | 566 | irqd_set(&desc->irq_data, flags); |
| 468 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 567 | |
| 469 | flags |= IRQ_LEVEL; | 568 | case IRQ_SET_MASK_OK_NOCOPY: |
| 470 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | 569 | flags = irqd_get_trigger_type(&desc->irq_data); |
| 471 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 570 | irq_settings_set_trigger_mask(desc, flags); |
| 472 | desc->status |= flags; | 571 | irqd_clear(&desc->irq_data, IRQD_LEVEL); |
| 572 | irq_settings_clr_level(desc); | ||
| 573 | if (flags & IRQ_TYPE_LEVEL_MASK) { | ||
| 574 | irq_settings_set_level(desc); | ||
| 575 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
| 576 | } | ||
| 473 | 577 | ||
| 474 | if (chip != desc->irq_data.chip) | 578 | if (chip != desc->irq_data.chip) |
| 475 | irq_chip_set_defaults(desc->irq_data.chip); | 579 | irq_chip_set_defaults(desc->irq_data.chip); |
| 580 | ret = 0; | ||
| 581 | break; | ||
| 582 | default: | ||
| 583 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | ||
| 584 | flags, irq, chip->irq_set_type); | ||
| 476 | } | 585 | } |
| 477 | 586 | if (unmask) | |
| 587 | unmask_irq(desc); | ||
| 478 | return ret; | 588 | return ret; |
| 479 | } | 589 | } |
| 480 | 590 | ||
| @@ -518,8 +628,11 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
| 518 | * handler finished. unmask if the interrupt has not been disabled and | 628 | * handler finished. unmask if the interrupt has not been disabled and |
| 519 | * is marked MASKED. | 629 | * is marked MASKED. |
| 520 | */ | 630 | */ |
| 521 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 631 | static void irq_finalize_oneshot(struct irq_desc *desc, |
| 632 | struct irqaction *action, bool force) | ||
| 522 | { | 633 | { |
| 634 | if (!(desc->istate & IRQS_ONESHOT)) | ||
| 635 | return; | ||
| 523 | again: | 636 | again: |
| 524 | chip_bus_lock(desc); | 637 | chip_bus_lock(desc); |
| 525 | raw_spin_lock_irq(&desc->lock); | 638 | raw_spin_lock_irq(&desc->lock); |
| @@ -531,26 +644,44 @@ again: | |||
| 531 | * The thread is faster done than the hard interrupt handler | 644 | * The thread is faster done than the hard interrupt handler |
| 532 | * on the other CPU. If we unmask the irq line then the | 645 | * on the other CPU. If we unmask the irq line then the |
| 533 | * interrupt can come in again and masks the line, leaves due | 646 | * interrupt can come in again and masks the line, leaves due |
| 534 | * to IRQ_INPROGRESS and the irq line is masked forever. | 647 | * to IRQS_INPROGRESS and the irq line is masked forever. |
| 648 | * | ||
| 649 | * This also serializes the state of shared oneshot handlers | ||
| 650 | * versus "desc->threads_onehsot |= action->thread_mask;" in | ||
| 651 | * irq_wake_thread(). See the comment there which explains the | ||
| 652 | * serialization. | ||
| 535 | */ | 653 | */ |
| 536 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | 654 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { |
| 537 | raw_spin_unlock_irq(&desc->lock); | 655 | raw_spin_unlock_irq(&desc->lock); |
| 538 | chip_bus_sync_unlock(desc); | 656 | chip_bus_sync_unlock(desc); |
| 539 | cpu_relax(); | 657 | cpu_relax(); |
| 540 | goto again; | 658 | goto again; |
| 541 | } | 659 | } |
| 542 | 660 | ||
| 543 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 661 | /* |
| 544 | desc->status &= ~IRQ_MASKED; | 662 | * Now check again, whether the thread should run. Otherwise |
| 663 | * we would clear the threads_oneshot bit of this thread which | ||
| 664 | * was just set. | ||
| 665 | */ | ||
| 666 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
| 667 | goto out_unlock; | ||
| 668 | |||
| 669 | desc->threads_oneshot &= ~action->thread_mask; | ||
| 670 | |||
| 671 | if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && | ||
| 672 | (desc->istate & IRQS_MASKED)) { | ||
| 673 | irq_compat_clr_masked(desc); | ||
| 674 | desc->istate &= ~IRQS_MASKED; | ||
| 545 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 675 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
| 546 | } | 676 | } |
| 677 | out_unlock: | ||
| 547 | raw_spin_unlock_irq(&desc->lock); | 678 | raw_spin_unlock_irq(&desc->lock); |
| 548 | chip_bus_sync_unlock(desc); | 679 | chip_bus_sync_unlock(desc); |
| 549 | } | 680 | } |
| 550 | 681 | ||
| 551 | #ifdef CONFIG_SMP | 682 | #ifdef CONFIG_SMP |
| 552 | /* | 683 | /* |
| 553 | * Check whether we need to change the affinity of the interrupt thread. | 684 | * Check whether we need to chasnge the affinity of the interrupt thread. |
| 554 | */ | 685 | */ |
| 555 | static void | 686 | static void |
| 556 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 687 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
| @@ -582,6 +713,32 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
| 582 | #endif | 713 | #endif |
| 583 | 714 | ||
| 584 | /* | 715 | /* |
| 716 | * Interrupts which are not explicitely requested as threaded | ||
| 717 | * interrupts rely on the implicit bh/preempt disable of the hard irq | ||
| 718 | * context. So we need to disable bh here to avoid deadlocks and other | ||
| 719 | * side effects. | ||
| 720 | */ | ||
| 721 | static void | ||
| 722 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
| 723 | { | ||
| 724 | local_bh_disable(); | ||
| 725 | action->thread_fn(action->irq, action->dev_id); | ||
| 726 | irq_finalize_oneshot(desc, action, false); | ||
| 727 | local_bh_enable(); | ||
| 728 | } | ||
| 729 | |||
| 730 | /* | ||
| 731 | * Interrupts explicitely requested as threaded interupts want to be | ||
| 732 | * preemtible - many of them need to sleep and wait for slow busses to | ||
| 733 | * complete. | ||
| 734 | */ | ||
| 735 | static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
| 736 | { | ||
| 737 | action->thread_fn(action->irq, action->dev_id); | ||
| 738 | irq_finalize_oneshot(desc, action, false); | ||
| 739 | } | ||
| 740 | |||
| 741 | /* | ||
| 585 | * Interrupt handler thread | 742 | * Interrupt handler thread |
| 586 | */ | 743 | */ |
| 587 | static int irq_thread(void *data) | 744 | static int irq_thread(void *data) |
| @@ -591,7 +748,14 @@ static int irq_thread(void *data) | |||
| 591 | }; | 748 | }; |
| 592 | struct irqaction *action = data; | 749 | struct irqaction *action = data; |
| 593 | struct irq_desc *desc = irq_to_desc(action->irq); | 750 | struct irq_desc *desc = irq_to_desc(action->irq); |
| 594 | int wake, oneshot = desc->status & IRQ_ONESHOT; | 751 | void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); |
| 752 | int wake; | ||
| 753 | |||
| 754 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | ||
| 755 | &action->thread_flags)) | ||
| 756 | handler_fn = irq_forced_thread_fn; | ||
| 757 | else | ||
| 758 | handler_fn = irq_thread_fn; | ||
| 595 | 759 | ||
| 596 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 760 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
| 597 | current->irqaction = action; | 761 | current->irqaction = action; |
| @@ -603,23 +767,20 @@ static int irq_thread(void *data) | |||
| 603 | atomic_inc(&desc->threads_active); | 767 | atomic_inc(&desc->threads_active); |
| 604 | 768 | ||
| 605 | raw_spin_lock_irq(&desc->lock); | 769 | raw_spin_lock_irq(&desc->lock); |
| 606 | if (unlikely(desc->status & IRQ_DISABLED)) { | 770 | if (unlikely(desc->istate & IRQS_DISABLED)) { |
| 607 | /* | 771 | /* |
| 608 | * CHECKME: We might need a dedicated | 772 | * CHECKME: We might need a dedicated |
| 609 | * IRQ_THREAD_PENDING flag here, which | 773 | * IRQ_THREAD_PENDING flag here, which |
| 610 | * retriggers the thread in check_irq_resend() | 774 | * retriggers the thread in check_irq_resend() |
| 611 | * but AFAICT IRQ_PENDING should be fine as it | 775 | * but AFAICT IRQS_PENDING should be fine as it |
| 612 | * retriggers the interrupt itself --- tglx | 776 | * retriggers the interrupt itself --- tglx |
| 613 | */ | 777 | */ |
| 614 | desc->status |= IRQ_PENDING; | 778 | irq_compat_set_pending(desc); |
| 779 | desc->istate |= IRQS_PENDING; | ||
| 615 | raw_spin_unlock_irq(&desc->lock); | 780 | raw_spin_unlock_irq(&desc->lock); |
| 616 | } else { | 781 | } else { |
| 617 | raw_spin_unlock_irq(&desc->lock); | 782 | raw_spin_unlock_irq(&desc->lock); |
| 618 | 783 | handler_fn(desc, action); | |
| 619 | action->thread_fn(action->irq, action->dev_id); | ||
| 620 | |||
| 621 | if (oneshot) | ||
| 622 | irq_finalize_oneshot(action->irq, desc); | ||
| 623 | } | 784 | } |
| 624 | 785 | ||
| 625 | wake = atomic_dec_and_test(&desc->threads_active); | 786 | wake = atomic_dec_and_test(&desc->threads_active); |
| @@ -628,6 +789,9 @@ static int irq_thread(void *data) | |||
| 628 | wake_up(&desc->wait_for_threads); | 789 | wake_up(&desc->wait_for_threads); |
| 629 | } | 790 | } |
| 630 | 791 | ||
| 792 | /* Prevent a stale desc->threads_oneshot */ | ||
| 793 | irq_finalize_oneshot(desc, action, true); | ||
| 794 | |||
| 631 | /* | 795 | /* |
| 632 | * Clear irqaction. Otherwise exit_irq_thread() would make | 796 | * Clear irqaction. Otherwise exit_irq_thread() would make |
| 633 | * fuzz about an active irq thread going into nirvana. | 797 | * fuzz about an active irq thread going into nirvana. |
| @@ -642,6 +806,7 @@ static int irq_thread(void *data) | |||
| 642 | void exit_irq_thread(void) | 806 | void exit_irq_thread(void) |
| 643 | { | 807 | { |
| 644 | struct task_struct *tsk = current; | 808 | struct task_struct *tsk = current; |
| 809 | struct irq_desc *desc; | ||
| 645 | 810 | ||
| 646 | if (!tsk->irqaction) | 811 | if (!tsk->irqaction) |
| 647 | return; | 812 | return; |
| @@ -650,6 +815,14 @@ void exit_irq_thread(void) | |||
| 650 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 815 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
| 651 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 816 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); |
| 652 | 817 | ||
| 818 | desc = irq_to_desc(tsk->irqaction->irq); | ||
| 819 | |||
| 820 | /* | ||
| 821 | * Prevent a stale desc->threads_oneshot. Must be called | ||
| 822 | * before setting the IRQTF_DIED flag. | ||
| 823 | */ | ||
| 824 | irq_finalize_oneshot(desc, tsk->irqaction, true); | ||
| 825 | |||
| 653 | /* | 826 | /* |
| 654 | * Set the THREAD DIED flag to prevent further wakeups of the | 827 | * Set the THREAD DIED flag to prevent further wakeups of the |
| 655 | * soon to be gone threaded handler. | 828 | * soon to be gone threaded handler. |
| @@ -657,6 +830,22 @@ void exit_irq_thread(void) | |||
| 657 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 830 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); |
| 658 | } | 831 | } |
| 659 | 832 | ||
| 833 | static void irq_setup_forced_threading(struct irqaction *new) | ||
| 834 | { | ||
| 835 | if (!force_irqthreads) | ||
| 836 | return; | ||
| 837 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | ||
| 838 | return; | ||
| 839 | |||
| 840 | new->flags |= IRQF_ONESHOT; | ||
| 841 | |||
| 842 | if (!new->thread_fn) { | ||
| 843 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | ||
| 844 | new->thread_fn = new->handler; | ||
| 845 | new->handler = irq_default_primary_handler; | ||
| 846 | } | ||
| 847 | } | ||
| 848 | |||
| 660 | /* | 849 | /* |
| 661 | * Internal function to register an irqaction - typically used to | 850 | * Internal function to register an irqaction - typically used to |
| 662 | * allocate special interrupts that are part of the architecture. | 851 | * allocate special interrupts that are part of the architecture. |
| @@ -666,9 +855,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 666 | { | 855 | { |
| 667 | struct irqaction *old, **old_ptr; | 856 | struct irqaction *old, **old_ptr; |
| 668 | const char *old_name = NULL; | 857 | const char *old_name = NULL; |
| 669 | unsigned long flags; | 858 | unsigned long flags, thread_mask = 0; |
| 670 | int nested, shared = 0; | 859 | int ret, nested, shared = 0; |
| 671 | int ret; | 860 | cpumask_var_t mask; |
| 672 | 861 | ||
| 673 | if (!desc) | 862 | if (!desc) |
| 674 | return -EINVAL; | 863 | return -EINVAL; |
| @@ -692,15 +881,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 692 | rand_initialize_irq(irq); | 881 | rand_initialize_irq(irq); |
| 693 | } | 882 | } |
| 694 | 883 | ||
| 695 | /* Oneshot interrupts are not allowed with shared */ | ||
| 696 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | ||
| 697 | return -EINVAL; | ||
| 698 | |||
| 699 | /* | 884 | /* |
| 700 | * Check whether the interrupt nests into another interrupt | 885 | * Check whether the interrupt nests into another interrupt |
| 701 | * thread. | 886 | * thread. |
| 702 | */ | 887 | */ |
| 703 | nested = desc->status & IRQ_NESTED_THREAD; | 888 | nested = irq_settings_is_nested_thread(desc); |
| 704 | if (nested) { | 889 | if (nested) { |
| 705 | if (!new->thread_fn) | 890 | if (!new->thread_fn) |
| 706 | return -EINVAL; | 891 | return -EINVAL; |
| @@ -710,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 710 | * dummy function which warns when called. | 895 | * dummy function which warns when called. |
| 711 | */ | 896 | */ |
| 712 | new->handler = irq_nested_primary_handler; | 897 | new->handler = irq_nested_primary_handler; |
| 898 | } else { | ||
| 899 | irq_setup_forced_threading(new); | ||
| 713 | } | 900 | } |
| 714 | 901 | ||
| 715 | /* | 902 | /* |
| @@ -733,6 +920,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 733 | new->thread = t; | 920 | new->thread = t; |
| 734 | } | 921 | } |
| 735 | 922 | ||
| 923 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
| 924 | ret = -ENOMEM; | ||
| 925 | goto out_thread; | ||
| 926 | } | ||
| 927 | |||
| 736 | /* | 928 | /* |
| 737 | * The following block of code has to be executed atomically | 929 | * The following block of code has to be executed atomically |
| 738 | */ | 930 | */ |
| @@ -744,29 +936,40 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 744 | * Can't share interrupts unless both agree to and are | 936 | * Can't share interrupts unless both agree to and are |
| 745 | * the same type (level, edge, polarity). So both flag | 937 | * the same type (level, edge, polarity). So both flag |
| 746 | * fields must have IRQF_SHARED set and the bits which | 938 | * fields must have IRQF_SHARED set and the bits which |
| 747 | * set the trigger type must match. | 939 | * set the trigger type must match. Also all must |
| 940 | * agree on ONESHOT. | ||
| 748 | */ | 941 | */ |
| 749 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 942 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
| 750 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | 943 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
| 944 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) { | ||
| 751 | old_name = old->name; | 945 | old_name = old->name; |
| 752 | goto mismatch; | 946 | goto mismatch; |
| 753 | } | 947 | } |
| 754 | 948 | ||
| 755 | #if defined(CONFIG_IRQ_PER_CPU) | ||
| 756 | /* All handlers must agree on per-cpuness */ | 949 | /* All handlers must agree on per-cpuness */ |
| 757 | if ((old->flags & IRQF_PERCPU) != | 950 | if ((old->flags & IRQF_PERCPU) != |
| 758 | (new->flags & IRQF_PERCPU)) | 951 | (new->flags & IRQF_PERCPU)) |
| 759 | goto mismatch; | 952 | goto mismatch; |
| 760 | #endif | ||
| 761 | 953 | ||
| 762 | /* add new interrupt at end of irq queue */ | 954 | /* add new interrupt at end of irq queue */ |
| 763 | do { | 955 | do { |
| 956 | thread_mask |= old->thread_mask; | ||
| 764 | old_ptr = &old->next; | 957 | old_ptr = &old->next; |
| 765 | old = *old_ptr; | 958 | old = *old_ptr; |
| 766 | } while (old); | 959 | } while (old); |
| 767 | shared = 1; | 960 | shared = 1; |
| 768 | } | 961 | } |
| 769 | 962 | ||
| 963 | /* | ||
| 964 | * Setup the thread mask for this irqaction. Unlikely to have | ||
| 965 | * 32 resp 64 irqs sharing one line, but who knows. | ||
| 966 | */ | ||
| 967 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | ||
| 968 | ret = -EBUSY; | ||
| 969 | goto out_mask; | ||
| 970 | } | ||
| 971 | new->thread_mask = 1 << ffz(thread_mask); | ||
| 972 | |||
| 770 | if (!shared) { | 973 | if (!shared) { |
| 771 | irq_chip_set_defaults(desc->irq_data.chip); | 974 | irq_chip_set_defaults(desc->irq_data.chip); |
| 772 | 975 | ||
| @@ -778,42 +981,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 778 | new->flags & IRQF_TRIGGER_MASK); | 981 | new->flags & IRQF_TRIGGER_MASK); |
| 779 | 982 | ||
| 780 | if (ret) | 983 | if (ret) |
| 781 | goto out_thread; | 984 | goto out_mask; |
| 782 | } else | 985 | } |
| 783 | compat_irq_chip_set_default_handler(desc); | ||
| 784 | #if defined(CONFIG_IRQ_PER_CPU) | ||
| 785 | if (new->flags & IRQF_PERCPU) | ||
| 786 | desc->status |= IRQ_PER_CPU; | ||
| 787 | #endif | ||
| 788 | 986 | ||
| 789 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | | 987 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
| 790 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 988 | IRQS_INPROGRESS | IRQS_ONESHOT | \ |
| 989 | IRQS_WAITING); | ||
| 990 | |||
| 991 | if (new->flags & IRQF_PERCPU) { | ||
| 992 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | ||
| 993 | irq_settings_set_per_cpu(desc); | ||
| 994 | } | ||
| 791 | 995 | ||
| 792 | if (new->flags & IRQF_ONESHOT) | 996 | if (new->flags & IRQF_ONESHOT) |
| 793 | desc->status |= IRQ_ONESHOT; | 997 | desc->istate |= IRQS_ONESHOT; |
| 794 | 998 | ||
| 795 | if (!(desc->status & IRQ_NOAUTOEN)) { | 999 | if (irq_settings_can_autoenable(desc)) |
| 796 | desc->depth = 0; | 1000 | irq_startup(desc); |
| 797 | desc->status &= ~IRQ_DISABLED; | 1001 | else |
| 798 | desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
| 799 | } else | ||
| 800 | /* Undo nested disables: */ | 1002 | /* Undo nested disables: */ |
| 801 | desc->depth = 1; | 1003 | desc->depth = 1; |
| 802 | 1004 | ||
| 803 | /* Exclude IRQ from balancing if requested */ | 1005 | /* Exclude IRQ from balancing if requested */ |
| 804 | if (new->flags & IRQF_NOBALANCING) | 1006 | if (new->flags & IRQF_NOBALANCING) { |
| 805 | desc->status |= IRQ_NO_BALANCING; | 1007 | irq_settings_set_no_balancing(desc); |
| 1008 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | ||
| 1009 | } | ||
| 806 | 1010 | ||
| 807 | /* Set default affinity mask once everything is setup */ | 1011 | /* Set default affinity mask once everything is setup */ |
| 808 | setup_affinity(irq, desc); | 1012 | setup_affinity(irq, desc, mask); |
| 809 | 1013 | ||
| 810 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 1014 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
| 811 | && (new->flags & IRQF_TRIGGER_MASK) | 1015 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
| 812 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | 1016 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
| 813 | /* hope the handler works with the actual trigger mode... */ | 1017 | |
| 814 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | 1018 | if (nmsk != omsk) |
| 815 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | 1019 | /* hope the handler works with current trigger mode */ |
| 816 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 1020 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", |
| 1021 | irq, nmsk, omsk); | ||
| 817 | } | 1022 | } |
| 818 | 1023 | ||
| 819 | new->irq = irq; | 1024 | new->irq = irq; |
| @@ -827,8 +1032,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 827 | * Check whether we disabled the irq via the spurious handler | 1032 | * Check whether we disabled the irq via the spurious handler |
| 828 | * before. Reenable it and give it another chance. | 1033 | * before. Reenable it and give it another chance. |
| 829 | */ | 1034 | */ |
| 830 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 1035 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
| 831 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 1036 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
| 832 | __enable_irq(desc, irq, false); | 1037 | __enable_irq(desc, irq, false); |
| 833 | } | 1038 | } |
| 834 | 1039 | ||
| @@ -858,6 +1063,9 @@ mismatch: | |||
| 858 | #endif | 1063 | #endif |
| 859 | ret = -EBUSY; | 1064 | ret = -EBUSY; |
| 860 | 1065 | ||
| 1066 | out_mask: | ||
| 1067 | free_cpumask_var(mask); | ||
| 1068 | |||
| 861 | out_thread: | 1069 | out_thread: |
| 862 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1070 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 863 | if (new->thread) { | 1071 | if (new->thread) { |
| @@ -880,9 +1088,14 @@ out_thread: | |||
| 880 | */ | 1088 | */ |
| 881 | int setup_irq(unsigned int irq, struct irqaction *act) | 1089 | int setup_irq(unsigned int irq, struct irqaction *act) |
| 882 | { | 1090 | { |
| 1091 | int retval; | ||
| 883 | struct irq_desc *desc = irq_to_desc(irq); | 1092 | struct irq_desc *desc = irq_to_desc(irq); |
| 884 | 1093 | ||
| 885 | return __setup_irq(irq, desc, act); | 1094 | chip_bus_lock(desc); |
| 1095 | retval = __setup_irq(irq, desc, act); | ||
| 1096 | chip_bus_sync_unlock(desc); | ||
| 1097 | |||
| 1098 | return retval; | ||
| 886 | } | 1099 | } |
| 887 | EXPORT_SYMBOL_GPL(setup_irq); | 1100 | EXPORT_SYMBOL_GPL(setup_irq); |
| 888 | 1101 | ||
| @@ -933,13 +1146,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 933 | #endif | 1146 | #endif |
| 934 | 1147 | ||
| 935 | /* If this was the last handler, shut down the IRQ line: */ | 1148 | /* If this was the last handler, shut down the IRQ line: */ |
| 936 | if (!desc->action) { | 1149 | if (!desc->action) |
| 937 | desc->status |= IRQ_DISABLED; | 1150 | irq_shutdown(desc); |
| 938 | if (desc->irq_data.chip->irq_shutdown) | ||
| 939 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | ||
| 940 | else | ||
| 941 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
| 942 | } | ||
| 943 | 1151 | ||
| 944 | #ifdef CONFIG_SMP | 1152 | #ifdef CONFIG_SMP |
| 945 | /* make sure affinity_hint is cleaned up */ | 1153 | /* make sure affinity_hint is cleaned up */ |
| @@ -1013,6 +1221,11 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 1013 | if (!desc) | 1221 | if (!desc) |
| 1014 | return; | 1222 | return; |
| 1015 | 1223 | ||
| 1224 | #ifdef CONFIG_SMP | ||
| 1225 | if (WARN_ON(desc->affinity_notify)) | ||
| 1226 | desc->affinity_notify = NULL; | ||
| 1227 | #endif | ||
| 1228 | |||
| 1016 | chip_bus_lock(desc); | 1229 | chip_bus_lock(desc); |
| 1017 | kfree(__free_irq(irq, dev_id)); | 1230 | kfree(__free_irq(irq, dev_id)); |
| 1018 | chip_bus_sync_unlock(desc); | 1231 | chip_bus_sync_unlock(desc); |
| @@ -1083,7 +1296,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
| 1083 | if (!desc) | 1296 | if (!desc) |
| 1084 | return -EINVAL; | 1297 | return -EINVAL; |
| 1085 | 1298 | ||
| 1086 | if (desc->status & IRQ_NOREQUEST) | 1299 | if (!irq_settings_can_request(desc)) |
| 1087 | return -EINVAL; | 1300 | return -EINVAL; |
| 1088 | 1301 | ||
| 1089 | if (!handler) { | 1302 | if (!handler) { |
| @@ -1158,7 +1371,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |||
| 1158 | if (!desc) | 1371 | if (!desc) |
| 1159 | return -EINVAL; | 1372 | return -EINVAL; |
| 1160 | 1373 | ||
| 1161 | if (desc->status & IRQ_NESTED_THREAD) { | 1374 | if (irq_settings_is_nested_thread(desc)) { |
| 1162 | ret = request_threaded_irq(irq, NULL, handler, | 1375 | ret = request_threaded_irq(irq, NULL, handler, |
| 1163 | flags, name, dev_id); | 1376 | flags, name, dev_id); |
| 1164 | return !ret ? IRQC_IS_NESTED : ret; | 1377 | return !ret ? IRQC_IS_NESTED : ret; |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 441fd629ff04..ec4806d4778b 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -4,23 +4,23 @@ | |||
| 4 | 4 | ||
| 5 | #include "internals.h" | 5 | #include "internals.h" |
| 6 | 6 | ||
| 7 | void move_masked_irq(int irq) | 7 | void irq_move_masked_irq(struct irq_data *idata) |
| 8 | { | 8 | { |
| 9 | struct irq_desc *desc = irq_to_desc(irq); | 9 | struct irq_desc *desc = irq_data_to_desc(idata); |
| 10 | struct irq_chip *chip = desc->irq_data.chip; | 10 | struct irq_chip *chip = idata->chip; |
| 11 | 11 | ||
| 12 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 12 | if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) |
| 13 | return; | 13 | return; |
| 14 | 14 | ||
| 15 | /* | 15 | /* |
| 16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | 16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. |
| 17 | */ | 17 | */ |
| 18 | if (CHECK_IRQ_PER_CPU(desc->status)) { | 18 | if (!irqd_can_balance(&desc->irq_data)) { |
| 19 | WARN_ON(1); | 19 | WARN_ON(1); |
| 20 | return; | 20 | return; |
| 21 | } | 21 | } |
| 22 | 22 | ||
| 23 | desc->status &= ~IRQ_MOVE_PENDING; | 23 | irqd_clr_move_pending(&desc->irq_data); |
| 24 | 24 | ||
| 25 | if (unlikely(cpumask_empty(desc->pending_mask))) | 25 | if (unlikely(cpumask_empty(desc->pending_mask))) |
| 26 | return; | 26 | return; |
| @@ -53,15 +53,20 @@ void move_masked_irq(int irq) | |||
| 53 | cpumask_clear(desc->pending_mask); | 53 | cpumask_clear(desc->pending_mask); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | void move_native_irq(int irq) | 56 | void move_masked_irq(int irq) |
| 57 | { | ||
| 58 | irq_move_masked_irq(irq_get_irq_data(irq)); | ||
| 59 | } | ||
| 60 | |||
| 61 | void irq_move_irq(struct irq_data *idata) | ||
| 57 | { | 62 | { |
| 58 | struct irq_desc *desc = irq_to_desc(irq); | 63 | struct irq_desc *desc = irq_data_to_desc(idata); |
| 59 | bool masked; | 64 | bool masked; |
| 60 | 65 | ||
| 61 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 66 | if (likely(!irqd_is_setaffinity_pending(idata))) |
| 62 | return; | 67 | return; |
| 63 | 68 | ||
| 64 | if (unlikely(desc->status & IRQ_DISABLED)) | 69 | if (unlikely(desc->istate & IRQS_DISABLED)) |
| 65 | return; | 70 | return; |
| 66 | 71 | ||
| 67 | /* | 72 | /* |
| @@ -69,10 +74,15 @@ void move_native_irq(int irq) | |||
| 69 | * threaded interrupt with ONESHOT set, we can end up with an | 74 | * threaded interrupt with ONESHOT set, we can end up with an |
| 70 | * interrupt storm. | 75 | * interrupt storm. |
| 71 | */ | 76 | */ |
| 72 | masked = desc->status & IRQ_MASKED; | 77 | masked = desc->istate & IRQS_MASKED; |
| 73 | if (!masked) | 78 | if (!masked) |
| 74 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 79 | idata->chip->irq_mask(idata); |
| 75 | move_masked_irq(irq); | 80 | irq_move_masked_irq(idata); |
| 76 | if (!masked) | 81 | if (!masked) |
| 77 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 82 | idata->chip->irq_unmask(idata); |
| 83 | } | ||
| 84 | |||
| 85 | void move_native_irq(int irq) | ||
| 86 | { | ||
| 87 | irq_move_irq(irq_get_irq_data(irq)); | ||
| 78 | } | 88 | } |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index d6bfb89cce91..f76fc00c9877 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | * During system-wide suspend or hibernation device drivers need to be prevented | 18 | * During system-wide suspend or hibernation device drivers need to be prevented |
| 19 | * from receiving interrupts and this function is provided for this purpose. | 19 | * from receiving interrupts and this function is provided for this purpose. |
| 20 | * It marks all interrupt lines in use, except for the timer ones, as disabled | 20 | * It marks all interrupt lines in use, except for the timer ones, as disabled |
| 21 | * and sets the IRQ_SUSPENDED flag for each of them. | 21 | * and sets the IRQS_SUSPENDED flag for each of them. |
| 22 | */ | 22 | */ |
| 23 | void suspend_device_irqs(void) | 23 | void suspend_device_irqs(void) |
| 24 | { | 24 | { |
| @@ -34,7 +34,7 @@ void suspend_device_irqs(void) | |||
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | for_each_irq_desc(irq, desc) | 36 | for_each_irq_desc(irq, desc) |
| 37 | if (desc->status & IRQ_SUSPENDED) | 37 | if (desc->istate & IRQS_SUSPENDED) |
| 38 | synchronize_irq(irq); | 38 | synchronize_irq(irq); |
| 39 | } | 39 | } |
| 40 | EXPORT_SYMBOL_GPL(suspend_device_irqs); | 40 | EXPORT_SYMBOL_GPL(suspend_device_irqs); |
| @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(suspend_device_irqs); | |||
| 43 | * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() | 43 | * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() |
| 44 | * | 44 | * |
| 45 | * Enable all interrupt lines previously disabled by suspend_device_irqs() that | 45 | * Enable all interrupt lines previously disabled by suspend_device_irqs() that |
| 46 | * have the IRQ_SUSPENDED flag set. | 46 | * have the IRQS_SUSPENDED flag set. |
| 47 | */ | 47 | */ |
| 48 | void resume_device_irqs(void) | 48 | void resume_device_irqs(void) |
| 49 | { | 49 | { |
| @@ -68,9 +68,24 @@ int check_wakeup_irqs(void) | |||
| 68 | struct irq_desc *desc; | 68 | struct irq_desc *desc; |
| 69 | int irq; | 69 | int irq; |
| 70 | 70 | ||
| 71 | for_each_irq_desc(irq, desc) | 71 | for_each_irq_desc(irq, desc) { |
| 72 | if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING)) | 72 | if (irqd_is_wakeup_set(&desc->irq_data)) { |
| 73 | return -EBUSY; | 73 | if (desc->istate & IRQS_PENDING) |
| 74 | return -EBUSY; | ||
| 75 | continue; | ||
| 76 | } | ||
| 77 | /* | ||
| 78 | * Check the non wakeup interrupts whether they need | ||
| 79 | * to be masked before finally going into suspend | ||
| 80 | * state. That's for hardware which has no wakeup | ||
| 81 | * source configuration facility. The chip | ||
| 82 | * implementation indicates that with | ||
| 83 | * IRQCHIP_MASK_ON_SUSPEND. | ||
| 84 | */ | ||
| 85 | if (desc->istate & IRQS_SUSPENDED && | ||
| 86 | irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
| 87 | mask_irq(desc); | ||
| 88 | } | ||
| 74 | 89 | ||
| 75 | return 0; | 90 | return 0; |
| 76 | } | 91 | } |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 6c8a2a9f8a7b..4cc2e5ed0bec 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
| 12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
| 13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> | ||
| 14 | 15 | ||
| 15 | #include "internals.h" | 16 | #include "internals.h" |
| 16 | 17 | ||
| @@ -24,7 +25,7 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v) | |||
| 24 | const struct cpumask *mask = desc->irq_data.affinity; | 25 | const struct cpumask *mask = desc->irq_data.affinity; |
| 25 | 26 | ||
| 26 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 27 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 27 | if (desc->status & IRQ_MOVE_PENDING) | 28 | if (irqd_is_setaffinity_pending(&desc->irq_data)) |
| 28 | mask = desc->pending_mask; | 29 | mask = desc->pending_mask; |
| 29 | #endif | 30 | #endif |
| 30 | seq_cpumask(m, mask); | 31 | seq_cpumask(m, mask); |
| @@ -65,8 +66,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
| 65 | cpumask_var_t new_value; | 66 | cpumask_var_t new_value; |
| 66 | int err; | 67 | int err; |
| 67 | 68 | ||
| 68 | if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || | 69 | if (!irq_can_set_affinity(irq) || no_irq_affinity) |
| 69 | irq_balancing_disabled(irq)) | ||
| 70 | return -EIO; | 70 | return -EIO; |
| 71 | 71 | ||
| 72 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | 72 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
| @@ -89,7 +89,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
| 89 | if (!cpumask_intersects(new_value, cpu_online_mask)) { | 89 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
| 90 | /* Special case for empty set - allow the architecture | 90 | /* Special case for empty set - allow the architecture |
| 91 | code to set default SMP affinity. */ | 91 | code to set default SMP affinity. */ |
| 92 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; | 92 | err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count; |
| 93 | } else { | 93 | } else { |
| 94 | irq_set_affinity(irq, new_value); | 94 | irq_set_affinity(irq, new_value); |
| 95 | err = count; | 95 | err = count; |
| @@ -357,3 +357,65 @@ void init_irq_proc(void) | |||
| 357 | } | 357 | } |
| 358 | } | 358 | } |
| 359 | 359 | ||
| 360 | #ifdef CONFIG_GENERIC_IRQ_SHOW | ||
| 361 | |||
| 362 | int __weak arch_show_interrupts(struct seq_file *p, int prec) | ||
| 363 | { | ||
| 364 | return 0; | ||
| 365 | } | ||
| 366 | |||
| 367 | int show_interrupts(struct seq_file *p, void *v) | ||
| 368 | { | ||
| 369 | static int prec; | ||
| 370 | |||
| 371 | unsigned long flags, any_count = 0; | ||
| 372 | int i = *(loff_t *) v, j; | ||
| 373 | struct irqaction *action; | ||
| 374 | struct irq_desc *desc; | ||
| 375 | |||
| 376 | if (i > nr_irqs) | ||
| 377 | return 0; | ||
| 378 | |||
| 379 | if (i == nr_irqs) | ||
| 380 | return arch_show_interrupts(p, prec); | ||
| 381 | |||
| 382 | /* print header and calculate the width of the first column */ | ||
| 383 | if (i == 0) { | ||
| 384 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
| 385 | j *= 10; | ||
| 386 | |||
| 387 | seq_printf(p, "%*s", prec + 8, ""); | ||
| 388 | for_each_online_cpu(j) | ||
| 389 | seq_printf(p, "CPU%-8d", j); | ||
| 390 | seq_putc(p, '\n'); | ||
| 391 | } | ||
| 392 | |||
| 393 | desc = irq_to_desc(i); | ||
| 394 | if (!desc) | ||
| 395 | return 0; | ||
| 396 | |||
| 397 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 398 | for_each_online_cpu(j) | ||
| 399 | any_count |= kstat_irqs_cpu(i, j); | ||
| 400 | action = desc->action; | ||
| 401 | if (!action && !any_count) | ||
| 402 | goto out; | ||
| 403 | |||
| 404 | seq_printf(p, "%*d: ", prec, i); | ||
| 405 | for_each_online_cpu(j) | ||
| 406 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
| 407 | seq_printf(p, " %8s", desc->irq_data.chip->name); | ||
| 408 | seq_printf(p, "-%-8s", desc->name); | ||
| 409 | |||
| 410 | if (action) { | ||
| 411 | seq_printf(p, " %s", action->name); | ||
| 412 | while ((action = action->next) != NULL) | ||
| 413 | seq_printf(p, ", %s", action->name); | ||
| 414 | } | ||
| 415 | |||
| 416 | seq_putc(p, '\n'); | ||
| 417 | out: | ||
| 418 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | #endif | ||
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index dc49358b73fa..ad683a99b1ec 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
| @@ -55,20 +55,19 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); | |||
| 55 | */ | 55 | */ |
| 56 | void check_irq_resend(struct irq_desc *desc, unsigned int irq) | 56 | void check_irq_resend(struct irq_desc *desc, unsigned int irq) |
| 57 | { | 57 | { |
| 58 | unsigned int status = desc->status; | ||
| 59 | |||
| 60 | /* | ||
| 61 | * Make sure the interrupt is enabled, before resending it: | ||
| 62 | */ | ||
| 63 | desc->irq_data.chip->irq_enable(&desc->irq_data); | ||
| 64 | |||
| 65 | /* | 58 | /* |
| 66 | * We do not resend level type interrupts. Level type | 59 | * We do not resend level type interrupts. Level type |
| 67 | * interrupts are resent by hardware when they are still | 60 | * interrupts are resent by hardware when they are still |
| 68 | * active. | 61 | * active. |
| 69 | */ | 62 | */ |
| 70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | 63 | if (irq_settings_is_level(desc)) |
| 71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; | 64 | return; |
| 65 | if (desc->istate & IRQS_REPLAY) | ||
| 66 | return; | ||
| 67 | if (desc->istate & IRQS_PENDING) { | ||
| 68 | irq_compat_clr_pending(desc); | ||
| 69 | desc->istate &= ~IRQS_PENDING; | ||
| 70 | desc->istate |= IRQS_REPLAY; | ||
| 72 | 71 | ||
| 73 | if (!desc->irq_data.chip->irq_retrigger || | 72 | if (!desc->irq_data.chip->irq_retrigger || |
| 74 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { | 73 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { |
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h new file mode 100644 index 000000000000..0227ad358272 --- /dev/null +++ b/kernel/irq/settings.h | |||
| @@ -0,0 +1,138 @@ | |||
| 1 | /* | ||
| 2 | * Internal header to deal with irq_desc->status which will be renamed | ||
| 3 | * to irq_desc->settings. | ||
| 4 | */ | ||
| 5 | enum { | ||
| 6 | _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS, | ||
| 7 | _IRQ_PER_CPU = IRQ_PER_CPU, | ||
| 8 | _IRQ_LEVEL = IRQ_LEVEL, | ||
| 9 | _IRQ_NOPROBE = IRQ_NOPROBE, | ||
| 10 | _IRQ_NOREQUEST = IRQ_NOREQUEST, | ||
| 11 | _IRQ_NOAUTOEN = IRQ_NOAUTOEN, | ||
| 12 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, | ||
| 13 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, | ||
| 14 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, | ||
| 15 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, | ||
| 16 | }; | ||
| 17 | |||
| 18 | #define IRQ_INPROGRESS GOT_YOU_MORON | ||
| 19 | #define IRQ_REPLAY GOT_YOU_MORON | ||
| 20 | #define IRQ_WAITING GOT_YOU_MORON | ||
| 21 | #define IRQ_DISABLED GOT_YOU_MORON | ||
| 22 | #define IRQ_PENDING GOT_YOU_MORON | ||
| 23 | #define IRQ_MASKED GOT_YOU_MORON | ||
| 24 | #define IRQ_WAKEUP GOT_YOU_MORON | ||
| 25 | #define IRQ_MOVE_PENDING GOT_YOU_MORON | ||
| 26 | #define IRQ_PER_CPU GOT_YOU_MORON | ||
| 27 | #define IRQ_NO_BALANCING GOT_YOU_MORON | ||
| 28 | #define IRQ_AFFINITY_SET GOT_YOU_MORON | ||
| 29 | #define IRQ_LEVEL GOT_YOU_MORON | ||
| 30 | #define IRQ_NOPROBE GOT_YOU_MORON | ||
| 31 | #define IRQ_NOREQUEST GOT_YOU_MORON | ||
| 32 | #define IRQ_NOAUTOEN GOT_YOU_MORON | ||
| 33 | #define IRQ_NESTED_THREAD GOT_YOU_MORON | ||
| 34 | #undef IRQF_MODIFY_MASK | ||
| 35 | #define IRQF_MODIFY_MASK GOT_YOU_MORON | ||
| 36 | |||
| 37 | static inline void | ||
| 38 | irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) | ||
| 39 | { | ||
| 40 | desc->status &= ~(clr & _IRQF_MODIFY_MASK); | ||
| 41 | desc->status |= (set & _IRQF_MODIFY_MASK); | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) | ||
| 45 | { | ||
| 46 | return desc->status & _IRQ_PER_CPU; | ||
| 47 | } | ||
| 48 | |||
| 49 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) | ||
| 50 | { | ||
| 51 | desc->status |= _IRQ_PER_CPU; | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void irq_settings_set_no_balancing(struct irq_desc *desc) | ||
| 55 | { | ||
| 56 | desc->status |= _IRQ_NO_BALANCING; | ||
| 57 | } | ||
| 58 | |||
| 59 | static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) | ||
| 60 | { | ||
| 61 | return desc->status & _IRQ_NO_BALANCING; | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) | ||
| 65 | { | ||
| 66 | return desc->status & IRQ_TYPE_SENSE_MASK; | ||
| 67 | } | ||
| 68 | |||
| 69 | static inline void | ||
| 70 | irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) | ||
| 71 | { | ||
| 72 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
| 73 | desc->status |= mask & IRQ_TYPE_SENSE_MASK; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline bool irq_settings_is_level(struct irq_desc *desc) | ||
| 77 | { | ||
| 78 | return desc->status & _IRQ_LEVEL; | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline void irq_settings_clr_level(struct irq_desc *desc) | ||
| 82 | { | ||
| 83 | desc->status &= ~_IRQ_LEVEL; | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void irq_settings_set_level(struct irq_desc *desc) | ||
| 87 | { | ||
| 88 | desc->status |= _IRQ_LEVEL; | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline bool irq_settings_can_request(struct irq_desc *desc) | ||
| 92 | { | ||
| 93 | return !(desc->status & _IRQ_NOREQUEST); | ||
| 94 | } | ||
| 95 | |||
| 96 | static inline void irq_settings_clr_norequest(struct irq_desc *desc) | ||
| 97 | { | ||
| 98 | desc->status &= ~_IRQ_NOREQUEST; | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline void irq_settings_set_norequest(struct irq_desc *desc) | ||
| 102 | { | ||
| 103 | desc->status |= _IRQ_NOREQUEST; | ||
| 104 | } | ||
| 105 | |||
| 106 | static inline bool irq_settings_can_probe(struct irq_desc *desc) | ||
| 107 | { | ||
| 108 | return !(desc->status & _IRQ_NOPROBE); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void irq_settings_clr_noprobe(struct irq_desc *desc) | ||
| 112 | { | ||
| 113 | desc->status &= ~_IRQ_NOPROBE; | ||
| 114 | } | ||
| 115 | |||
| 116 | static inline void irq_settings_set_noprobe(struct irq_desc *desc) | ||
| 117 | { | ||
| 118 | desc->status |= _IRQ_NOPROBE; | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) | ||
| 122 | { | ||
| 123 | return desc->status & _IRQ_MOVE_PCNTXT; | ||
| 124 | } | ||
| 125 | |||
| 126 | static inline bool irq_settings_can_autoenable(struct irq_desc *desc) | ||
| 127 | { | ||
| 128 | return !(desc->status & _IRQ_NOAUTOEN); | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) | ||
| 132 | { | ||
| 133 | return desc->status & _IRQ_NESTED_THREAD; | ||
| 134 | } | ||
| 135 | |||
| 136 | /* Nothing should touch desc->status from now on */ | ||
| 137 | #undef status | ||
| 138 | #define status USE_THE_PROPER_WRAPPERS_YOU_MORON | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 3089d3b9d5f3..dd586ebf9c8c 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -21,70 +21,94 @@ static int irqfixup __read_mostly; | |||
| 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
| 22 | static void poll_spurious_irqs(unsigned long dummy); | 22 | static void poll_spurious_irqs(unsigned long dummy); |
| 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
| 24 | static int irq_poll_cpu; | ||
| 25 | static atomic_t irq_poll_active; | ||
| 26 | |||
| 27 | /* | ||
| 28 | * We wait here for a poller to finish. | ||
| 29 | * | ||
| 30 | * If the poll runs on this CPU, then we yell loudly and return | ||
| 31 | * false. That will leave the interrupt line disabled in the worst | ||
| 32 | * case, but it should never happen. | ||
| 33 | * | ||
| 34 | * We wait until the poller is done and then recheck disabled and | ||
| 35 | * action (about to be disabled). Only if it's still active, we return | ||
| 36 | * true and let the handler run. | ||
| 37 | */ | ||
| 38 | bool irq_wait_for_poll(struct irq_desc *desc) | ||
| 39 | { | ||
| 40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | ||
| 41 | "irq poll in progress on cpu %d for irq %d\n", | ||
| 42 | smp_processor_id(), desc->irq_data.irq)) | ||
| 43 | return false; | ||
| 44 | |||
| 45 | #ifdef CONFIG_SMP | ||
| 46 | do { | ||
| 47 | raw_spin_unlock(&desc->lock); | ||
| 48 | while (desc->istate & IRQS_INPROGRESS) | ||
| 49 | cpu_relax(); | ||
| 50 | raw_spin_lock(&desc->lock); | ||
| 51 | } while (desc->istate & IRQS_INPROGRESS); | ||
| 52 | /* Might have been disabled in meantime */ | ||
| 53 | return !(desc->istate & IRQS_DISABLED) && desc->action; | ||
| 54 | #else | ||
| 55 | return false; | ||
| 56 | #endif | ||
| 57 | } | ||
| 58 | |||
| 24 | 59 | ||
| 25 | /* | 60 | /* |
| 26 | * Recovery handler for misrouted interrupts. | 61 | * Recovery handler for misrouted interrupts. |
| 27 | */ | 62 | */ |
| 28 | static int try_one_irq(int irq, struct irq_desc *desc) | 63 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
| 29 | { | 64 | { |
| 65 | irqreturn_t ret = IRQ_NONE; | ||
| 30 | struct irqaction *action; | 66 | struct irqaction *action; |
| 31 | int ok = 0, work = 0; | ||
| 32 | 67 | ||
| 33 | raw_spin_lock(&desc->lock); | 68 | raw_spin_lock(&desc->lock); |
| 34 | /* Already running on another processor */ | ||
| 35 | if (desc->status & IRQ_INPROGRESS) { | ||
| 36 | /* | ||
| 37 | * Already running: If it is shared get the other | ||
| 38 | * CPU to go looking for our mystery interrupt too | ||
| 39 | */ | ||
| 40 | if (desc->action && (desc->action->flags & IRQF_SHARED)) | ||
| 41 | desc->status |= IRQ_PENDING; | ||
| 42 | raw_spin_unlock(&desc->lock); | ||
| 43 | return ok; | ||
| 44 | } | ||
| 45 | /* Honour the normal IRQ locking */ | ||
| 46 | desc->status |= IRQ_INPROGRESS; | ||
| 47 | action = desc->action; | ||
| 48 | raw_spin_unlock(&desc->lock); | ||
| 49 | 69 | ||
| 50 | while (action) { | 70 | /* PER_CPU and nested thread interrupts are never polled */ |
| 51 | /* Only shared IRQ handlers are safe to call */ | 71 | if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) |
| 52 | if (action->flags & IRQF_SHARED) { | 72 | goto out; |
| 53 | if (action->handler(irq, action->dev_id) == | ||
| 54 | IRQ_HANDLED) | ||
| 55 | ok = 1; | ||
| 56 | } | ||
| 57 | action = action->next; | ||
| 58 | } | ||
| 59 | local_irq_disable(); | ||
| 60 | /* Now clean up the flags */ | ||
| 61 | raw_spin_lock(&desc->lock); | ||
| 62 | action = desc->action; | ||
| 63 | 73 | ||
| 64 | /* | 74 | /* |
| 65 | * While we were looking for a fixup someone queued a real | 75 | * Do not poll disabled interrupts unless the spurious |
| 66 | * IRQ clashing with our walk: | 76 | * disabled poller asks explicitely. |
| 67 | */ | 77 | */ |
| 68 | while ((desc->status & IRQ_PENDING) && action) { | 78 | if ((desc->istate & IRQS_DISABLED) && !force) |
| 79 | goto out; | ||
| 80 | |||
| 81 | /* | ||
| 82 | * All handlers must agree on IRQF_SHARED, so we test just the | ||
| 83 | * first. Check for action->next as well. | ||
| 84 | */ | ||
| 85 | action = desc->action; | ||
| 86 | if (!action || !(action->flags & IRQF_SHARED) || | ||
| 87 | (action->flags & __IRQF_TIMER) || !action->next) | ||
| 88 | goto out; | ||
| 89 | |||
| 90 | /* Already running on another processor */ | ||
| 91 | if (desc->istate & IRQS_INPROGRESS) { | ||
| 69 | /* | 92 | /* |
| 70 | * Perform real IRQ processing for the IRQ we deferred | 93 | * Already running: If it is shared get the other |
| 94 | * CPU to go looking for our mystery interrupt too | ||
| 71 | */ | 95 | */ |
| 72 | work = 1; | 96 | irq_compat_set_pending(desc); |
| 73 | raw_spin_unlock(&desc->lock); | 97 | desc->istate |= IRQS_PENDING; |
| 74 | handle_IRQ_event(irq, action); | 98 | goto out; |
| 75 | raw_spin_lock(&desc->lock); | ||
| 76 | desc->status &= ~IRQ_PENDING; | ||
| 77 | } | 99 | } |
| 78 | desc->status &= ~IRQ_INPROGRESS; | ||
| 79 | /* | ||
| 80 | * If we did actual work for the real IRQ line we must let the | ||
| 81 | * IRQ controller clean up too | ||
| 82 | */ | ||
| 83 | if (work) | ||
| 84 | irq_end(irq, desc); | ||
| 85 | raw_spin_unlock(&desc->lock); | ||
| 86 | 100 | ||
| 87 | return ok; | 101 | /* Mark it poll in progress */ |
| 102 | desc->istate |= IRQS_POLL_INPROGRESS; | ||
| 103 | do { | ||
| 104 | if (handle_irq_event(desc) == IRQ_HANDLED) | ||
| 105 | ret = IRQ_HANDLED; | ||
| 106 | action = desc->action; | ||
| 107 | } while ((desc->istate & IRQS_PENDING) && action); | ||
| 108 | desc->istate &= ~IRQS_POLL_INPROGRESS; | ||
| 109 | out: | ||
| 110 | raw_spin_unlock(&desc->lock); | ||
| 111 | return ret == IRQ_HANDLED; | ||
| 88 | } | 112 | } |
| 89 | 113 | ||
| 90 | static int misrouted_irq(int irq) | 114 | static int misrouted_irq(int irq) |
| @@ -92,6 +116,11 @@ static int misrouted_irq(int irq) | |||
| 92 | struct irq_desc *desc; | 116 | struct irq_desc *desc; |
| 93 | int i, ok = 0; | 117 | int i, ok = 0; |
| 94 | 118 | ||
| 119 | if (atomic_inc_return(&irq_poll_active) == 1) | ||
| 120 | goto out; | ||
| 121 | |||
| 122 | irq_poll_cpu = smp_processor_id(); | ||
| 123 | |||
| 95 | for_each_irq_desc(i, desc) { | 124 | for_each_irq_desc(i, desc) { |
| 96 | if (!i) | 125 | if (!i) |
| 97 | continue; | 126 | continue; |
| @@ -99,9 +128,11 @@ static int misrouted_irq(int irq) | |||
| 99 | if (i == irq) /* Already tried */ | 128 | if (i == irq) /* Already tried */ |
| 100 | continue; | 129 | continue; |
| 101 | 130 | ||
| 102 | if (try_one_irq(i, desc)) | 131 | if (try_one_irq(i, desc, false)) |
| 103 | ok = 1; | 132 | ok = 1; |
| 104 | } | 133 | } |
| 134 | out: | ||
| 135 | atomic_dec(&irq_poll_active); | ||
| 105 | /* So the caller can adjust the irq error counts */ | 136 | /* So the caller can adjust the irq error counts */ |
| 106 | return ok; | 137 | return ok; |
| 107 | } | 138 | } |
| @@ -111,23 +142,28 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
| 111 | struct irq_desc *desc; | 142 | struct irq_desc *desc; |
| 112 | int i; | 143 | int i; |
| 113 | 144 | ||
| 145 | if (atomic_inc_return(&irq_poll_active) != 1) | ||
| 146 | goto out; | ||
| 147 | irq_poll_cpu = smp_processor_id(); | ||
| 148 | |||
| 114 | for_each_irq_desc(i, desc) { | 149 | for_each_irq_desc(i, desc) { |
| 115 | unsigned int status; | 150 | unsigned int state; |
| 116 | 151 | ||
| 117 | if (!i) | 152 | if (!i) |
| 118 | continue; | 153 | continue; |
| 119 | 154 | ||
| 120 | /* Racy but it doesn't matter */ | 155 | /* Racy but it doesn't matter */ |
| 121 | status = desc->status; | 156 | state = desc->istate; |
| 122 | barrier(); | 157 | barrier(); |
| 123 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 158 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
| 124 | continue; | 159 | continue; |
| 125 | 160 | ||
| 126 | local_irq_disable(); | 161 | local_irq_disable(); |
| 127 | try_one_irq(i, desc); | 162 | try_one_irq(i, desc, true); |
| 128 | local_irq_enable(); | 163 | local_irq_enable(); |
| 129 | } | 164 | } |
| 130 | 165 | out: | |
| 166 | atomic_dec(&irq_poll_active); | ||
| 131 | mod_timer(&poll_spurious_irq_timer, | 167 | mod_timer(&poll_spurious_irq_timer, |
| 132 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 168 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
| 133 | } | 169 | } |
| @@ -139,15 +175,13 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
| 139 | * | 175 | * |
| 140 | * (The other 100-of-100,000 interrupts may have been a correctly | 176 | * (The other 100-of-100,000 interrupts may have been a correctly |
| 141 | * functioning device sharing an IRQ with the failing one) | 177 | * functioning device sharing an IRQ with the failing one) |
| 142 | * | ||
| 143 | * Called under desc->lock | ||
| 144 | */ | 178 | */ |
| 145 | |||
| 146 | static void | 179 | static void |
| 147 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, | 180 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
| 148 | irqreturn_t action_ret) | 181 | irqreturn_t action_ret) |
| 149 | { | 182 | { |
| 150 | struct irqaction *action; | 183 | struct irqaction *action; |
| 184 | unsigned long flags; | ||
| 151 | 185 | ||
| 152 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { | 186 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { |
| 153 | printk(KERN_ERR "irq event %d: bogus return value %x\n", | 187 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
| @@ -159,6 +193,13 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
| 159 | dump_stack(); | 193 | dump_stack(); |
| 160 | printk(KERN_ERR "handlers:\n"); | 194 | printk(KERN_ERR "handlers:\n"); |
| 161 | 195 | ||
| 196 | /* | ||
| 197 | * We need to take desc->lock here. note_interrupt() is called | ||
| 198 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | ||
| 199 | * with something else removing an action. It's ok to take | ||
| 200 | * desc->lock here. See synchronize_irq(). | ||
| 201 | */ | ||
| 202 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 162 | action = desc->action; | 203 | action = desc->action; |
| 163 | while (action) { | 204 | while (action) { |
| 164 | printk(KERN_ERR "[<%p>]", action->handler); | 205 | printk(KERN_ERR "[<%p>]", action->handler); |
| @@ -167,6 +208,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
| 167 | printk("\n"); | 208 | printk("\n"); |
| 168 | action = action->next; | 209 | action = action->next; |
| 169 | } | 210 | } |
| 211 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 170 | } | 212 | } |
| 171 | 213 | ||
| 172 | static void | 214 | static void |
| @@ -218,6 +260,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
| 218 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 260 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
| 219 | irqreturn_t action_ret) | 261 | irqreturn_t action_ret) |
| 220 | { | 262 | { |
| 263 | if (desc->istate & IRQS_POLL_INPROGRESS) | ||
| 264 | return; | ||
| 265 | |||
| 221 | if (unlikely(action_ret != IRQ_HANDLED)) { | 266 | if (unlikely(action_ret != IRQ_HANDLED)) { |
| 222 | /* | 267 | /* |
| 223 | * If we are seeing only the odd spurious IRQ caused by | 268 | * If we are seeing only the odd spurious IRQ caused by |
| @@ -254,9 +299,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
| 254 | * Now kill the IRQ | 299 | * Now kill the IRQ |
| 255 | */ | 300 | */ |
| 256 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 301 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
| 257 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 302 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
| 258 | desc->depth++; | 303 | desc->depth++; |
| 259 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 304 | irq_disable(desc); |
| 260 | 305 | ||
| 261 | mod_timer(&poll_spurious_irq_timer, | 306 | mod_timer(&poll_spurious_irq_timer, |
| 262 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 307 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
diff --git a/kernel/sched.c b/kernel/sched.c index 27125e413576..c8e40b7005c0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -2286,7 +2286,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
| 2286 | * yield - it could be a while. | 2286 | * yield - it could be a while. |
| 2287 | */ | 2287 | */ |
| 2288 | if (unlikely(on_rq)) { | 2288 | if (unlikely(on_rq)) { |
| 2289 | schedule_timeout_uninterruptible(1); | 2289 | ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); |
| 2290 | |||
| 2291 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 2292 | schedule_hrtimeout(&to, HRTIMER_MODE_REL); | ||
| 2290 | continue; | 2293 | continue; |
| 2291 | } | 2294 | } |
| 2292 | 2295 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 0cee50487629..56e5dec837f0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -311,9 +311,21 @@ void irq_enter(void) | |||
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 313 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
| 314 | # define invoke_softirq() __do_softirq() | 314 | static inline void invoke_softirq(void) |
| 315 | { | ||
| 316 | if (!force_irqthreads) | ||
| 317 | __do_softirq(); | ||
| 318 | else | ||
| 319 | wakeup_softirqd(); | ||
| 320 | } | ||
| 315 | #else | 321 | #else |
| 316 | # define invoke_softirq() do_softirq() | 322 | static inline void invoke_softirq(void) |
| 323 | { | ||
| 324 | if (!force_irqthreads) | ||
| 325 | do_softirq(); | ||
| 326 | else | ||
| 327 | wakeup_softirqd(); | ||
| 328 | } | ||
| 317 | #endif | 329 | #endif |
| 318 | 330 | ||
| 319 | /* | 331 | /* |
| @@ -737,7 +749,10 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
| 737 | don't process */ | 749 | don't process */ |
| 738 | if (cpu_is_offline((long)__bind_cpu)) | 750 | if (cpu_is_offline((long)__bind_cpu)) |
| 739 | goto wait_to_die; | 751 | goto wait_to_die; |
| 740 | do_softirq(); | 752 | local_irq_disable(); |
| 753 | if (local_softirq_pending()) | ||
| 754 | __do_softirq(); | ||
| 755 | local_irq_enable(); | ||
| 741 | preempt_enable_no_resched(); | 756 | preempt_enable_no_resched(); |
| 742 | cond_resched(); | 757 | cond_resched(); |
| 743 | preempt_disable(); | 758 | preempt_disable(); |
