diff options
| -rw-r--r-- | kernel/irq/handle.c | 49 |
1 files changed, 25 insertions, 24 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 266f7986aa08..76d5a671bfe1 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/kernel_stat.h> | 19 | #include <linux/kernel_stat.h> |
| 20 | #include <linux/rculist.h> | 20 | #include <linux/rculist.h> |
| 21 | #include <linux/hash.h> | 21 | #include <linux/hash.h> |
| 22 | #include <linux/radix-tree.h> | ||
| 22 | #include <trace/events/irq.h> | 23 | #include <trace/events/irq.h> |
| 23 | 24 | ||
| 24 | #include "internals.h" | 25 | #include "internals.h" |
| @@ -127,7 +128,26 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | |||
| 127 | */ | 128 | */ |
| 128 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); | 129 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); |
| 129 | 130 | ||
| 130 | static struct irq_desc **irq_desc_ptrs __read_mostly; | 131 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); |
| 132 | |||
| 133 | static void set_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 134 | { | ||
| 135 | radix_tree_insert(&irq_desc_tree, irq, desc); | ||
| 136 | } | ||
| 137 | |||
| 138 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
| 139 | { | ||
| 140 | return radix_tree_lookup(&irq_desc_tree, irq); | ||
| 141 | } | ||
| 142 | |||
| 143 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 144 | { | ||
| 145 | void **ptr; | ||
| 146 | |||
| 147 | ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); | ||
| 148 | if (ptr) | ||
| 149 | radix_tree_replace_slot(ptr, desc); | ||
| 150 | } | ||
| 131 | 151 | ||
| 132 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 152 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
| 133 | [0 ... NR_IRQS_LEGACY-1] = { | 153 | [0 ... NR_IRQS_LEGACY-1] = { |
| @@ -159,9 +179,6 @@ int __init early_irq_init(void) | |||
| 159 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 179 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
| 160 | node = first_online_node; | 180 | node = first_online_node; |
| 161 | 181 | ||
| 162 | /* allocate irq_desc_ptrs array based on nr_irqs */ | ||
| 163 | irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); | ||
| 164 | |||
| 165 | /* allocate based on nr_cpu_ids */ | 182 | /* allocate based on nr_cpu_ids */ |
| 166 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * | 183 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * |
| 167 | sizeof(int), GFP_NOWAIT, node); | 184 | sizeof(int), GFP_NOWAIT, node); |
| @@ -175,28 +192,12 @@ int __init early_irq_init(void) | |||
| 175 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 192 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 176 | alloc_desc_masks(&desc[i], node, true); | 193 | alloc_desc_masks(&desc[i], node, true); |
| 177 | init_desc_masks(&desc[i]); | 194 | init_desc_masks(&desc[i]); |
| 178 | irq_desc_ptrs[i] = desc + i; | 195 | set_irq_desc(i, &desc[i]); |
| 179 | } | 196 | } |
| 180 | 197 | ||
| 181 | for (i = legacy_count; i < nr_irqs; i++) | ||
| 182 | irq_desc_ptrs[i] = NULL; | ||
| 183 | |||
| 184 | return arch_early_irq_init(); | 198 | return arch_early_irq_init(); |
| 185 | } | 199 | } |
| 186 | 200 | ||
| 187 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
| 188 | { | ||
| 189 | if (irq_desc_ptrs && irq < nr_irqs) | ||
| 190 | return irq_desc_ptrs[irq]; | ||
| 191 | |||
| 192 | return NULL; | ||
| 193 | } | ||
| 194 | |||
| 195 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
| 196 | { | ||
| 197 | irq_desc_ptrs[irq] = desc; | ||
| 198 | } | ||
| 199 | |||
| 200 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | 201 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
| 201 | { | 202 | { |
| 202 | struct irq_desc *desc; | 203 | struct irq_desc *desc; |
| @@ -208,14 +209,14 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | |||
| 208 | return NULL; | 209 | return NULL; |
| 209 | } | 210 | } |
| 210 | 211 | ||
| 211 | desc = irq_desc_ptrs[irq]; | 212 | desc = irq_to_desc(irq); |
| 212 | if (desc) | 213 | if (desc) |
| 213 | return desc; | 214 | return desc; |
| 214 | 215 | ||
| 215 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); | 216 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 216 | 217 | ||
| 217 | /* We have to check it to avoid races with another CPU */ | 218 | /* We have to check it to avoid races with another CPU */ |
| 218 | desc = irq_desc_ptrs[irq]; | 219 | desc = irq_to_desc(irq); |
| 219 | if (desc) | 220 | if (desc) |
| 220 | goto out_unlock; | 221 | goto out_unlock; |
| 221 | 222 | ||
| @@ -228,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | |||
| 228 | } | 229 | } |
| 229 | init_one_irq_desc(irq, desc, node); | 230 | init_one_irq_desc(irq, desc, node); |
| 230 | 231 | ||
| 231 | irq_desc_ptrs[irq] = desc; | 232 | set_irq_desc(irq, desc); |
| 232 | 233 | ||
| 233 | out_unlock: | 234 | out_unlock: |
| 234 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | 235 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
