diff options
Diffstat (limited to 'kernel/irq/irqdomain.c')
-rw-r--r-- | kernel/irq/irqdomain.c | 579 |
1 files changed, 182 insertions, 397 deletions
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 1ed8dff17eb9..2d7cd3428365 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -23,9 +23,11 @@ static DEFINE_MUTEX(revmap_trees_mutex); | |||
23 | static struct irq_domain *irq_default_domain; | 23 | static struct irq_domain *irq_default_domain; |
24 | 24 | ||
25 | /** | 25 | /** |
26 | * irq_domain_alloc() - Allocate a new irq_domain data structure | 26 | * __irq_domain_add() - Allocate a new irq_domain data structure |
27 | * @of_node: optional device-tree node of the interrupt controller | 27 | * @of_node: optional device-tree node of the interrupt controller |
28 | * @revmap_type: type of reverse mapping to use | 28 | * @size: Size of linear map; 0 for radix mapping only |
29 | * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no | ||
30 | * direct mapping | ||
29 | * @ops: map/unmap domain callbacks | 31 | * @ops: map/unmap domain callbacks |
30 | * @host_data: Controller private data pointer | 32 | * @host_data: Controller private data pointer |
31 | * | 33 | * |
@@ -33,41 +35,35 @@ static struct irq_domain *irq_default_domain; | |||
33 | * register allocated irq_domain with irq_domain_register(). Returns pointer | 35 | * register allocated irq_domain with irq_domain_register(). Returns pointer |
34 | * to IRQ domain, or NULL on failure. | 36 | * to IRQ domain, or NULL on failure. |
35 | */ | 37 | */ |
36 | static struct irq_domain *irq_domain_alloc(struct device_node *of_node, | 38 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, |
37 | unsigned int revmap_type, | 39 | irq_hw_number_t hwirq_max, int direct_max, |
38 | const struct irq_domain_ops *ops, | 40 | const struct irq_domain_ops *ops, |
39 | void *host_data) | 41 | void *host_data) |
40 | { | 42 | { |
41 | struct irq_domain *domain; | 43 | struct irq_domain *domain; |
42 | 44 | ||
43 | domain = kzalloc_node(sizeof(*domain), GFP_KERNEL, | 45 | domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), |
44 | of_node_to_nid(of_node)); | 46 | GFP_KERNEL, of_node_to_nid(of_node)); |
45 | if (WARN_ON(!domain)) | 47 | if (WARN_ON(!domain)) |
46 | return NULL; | 48 | return NULL; |
47 | 49 | ||
48 | /* Fill structure */ | 50 | /* Fill structure */ |
49 | domain->revmap_type = revmap_type; | 51 | INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); |
50 | domain->ops = ops; | 52 | domain->ops = ops; |
51 | domain->host_data = host_data; | 53 | domain->host_data = host_data; |
52 | domain->of_node = of_node_get(of_node); | 54 | domain->of_node = of_node_get(of_node); |
55 | domain->hwirq_max = hwirq_max; | ||
56 | domain->revmap_size = size; | ||
57 | domain->revmap_direct_max_irq = direct_max; | ||
53 | 58 | ||
54 | return domain; | ||
55 | } | ||
56 | |||
57 | static void irq_domain_free(struct irq_domain *domain) | ||
58 | { | ||
59 | of_node_put(domain->of_node); | ||
60 | kfree(domain); | ||
61 | } | ||
62 | |||
63 | static void irq_domain_add(struct irq_domain *domain) | ||
64 | { | ||
65 | mutex_lock(&irq_domain_mutex); | 59 | mutex_lock(&irq_domain_mutex); |
66 | list_add(&domain->link, &irq_domain_list); | 60 | list_add(&domain->link, &irq_domain_list); |
67 | mutex_unlock(&irq_domain_mutex); | 61 | mutex_unlock(&irq_domain_mutex); |
68 | pr_debug("Allocated domain of type %d @0x%p\n", | 62 | |
69 | domain->revmap_type, domain); | 63 | pr_debug("Added domain %s\n", domain->name); |
64 | return domain; | ||
70 | } | 65 | } |
66 | EXPORT_SYMBOL_GPL(__irq_domain_add); | ||
71 | 67 | ||
72 | /** | 68 | /** |
73 | * irq_domain_remove() - Remove an irq domain. | 69 | * irq_domain_remove() - Remove an irq domain. |
@@ -81,29 +77,12 @@ void irq_domain_remove(struct irq_domain *domain) | |||
81 | { | 77 | { |
82 | mutex_lock(&irq_domain_mutex); | 78 | mutex_lock(&irq_domain_mutex); |
83 | 79 | ||
84 | switch (domain->revmap_type) { | 80 | /* |
85 | case IRQ_DOMAIN_MAP_LEGACY: | 81 | * radix_tree_delete() takes care of destroying the root |
86 | /* | 82 | * node when all entries are removed. Shout if there are |
87 | * Legacy domains don't manage their own irq_desc | 83 | * any mappings left. |
88 | * allocations, we expect the caller to handle irq_desc | 84 | */ |
89 | * freeing on their own. | 85 | WARN_ON(domain->revmap_tree.height); |
90 | */ | ||
91 | break; | ||
92 | case IRQ_DOMAIN_MAP_TREE: | ||
93 | /* | ||
94 | * radix_tree_delete() takes care of destroying the root | ||
95 | * node when all entries are removed. Shout if there are | ||
96 | * any mappings left. | ||
97 | */ | ||
98 | WARN_ON(domain->revmap_data.tree.height); | ||
99 | break; | ||
100 | case IRQ_DOMAIN_MAP_LINEAR: | ||
101 | kfree(domain->revmap_data.linear.revmap); | ||
102 | domain->revmap_data.linear.size = 0; | ||
103 | break; | ||
104 | case IRQ_DOMAIN_MAP_NOMAP: | ||
105 | break; | ||
106 | } | ||
107 | 86 | ||
108 | list_del(&domain->link); | 87 | list_del(&domain->link); |
109 | 88 | ||
@@ -115,44 +94,30 @@ void irq_domain_remove(struct irq_domain *domain) | |||
115 | 94 | ||
116 | mutex_unlock(&irq_domain_mutex); | 95 | mutex_unlock(&irq_domain_mutex); |
117 | 96 | ||
118 | pr_debug("Removed domain of type %d @0x%p\n", | 97 | pr_debug("Removed domain %s\n", domain->name); |
119 | domain->revmap_type, domain); | ||
120 | 98 | ||
121 | irq_domain_free(domain); | 99 | of_node_put(domain->of_node); |
100 | kfree(domain); | ||
122 | } | 101 | } |
123 | EXPORT_SYMBOL_GPL(irq_domain_remove); | 102 | EXPORT_SYMBOL_GPL(irq_domain_remove); |
124 | 103 | ||
125 | static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, | ||
126 | irq_hw_number_t hwirq) | ||
127 | { | ||
128 | irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq; | ||
129 | int size = domain->revmap_data.legacy.size; | ||
130 | |||
131 | if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size)) | ||
132 | return 0; | ||
133 | return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq; | ||
134 | } | ||
135 | |||
136 | /** | 104 | /** |
137 | * irq_domain_add_simple() - Allocate and register a simple irq_domain. | 105 | * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs |
138 | * @of_node: pointer to interrupt controller's device tree node. | 106 | * @of_node: pointer to interrupt controller's device tree node. |
139 | * @size: total number of irqs in mapping | 107 | * @size: total number of irqs in mapping |
140 | * @first_irq: first number of irq block assigned to the domain, | 108 | * @first_irq: first number of irq block assigned to the domain, |
141 | * pass zero to assign irqs on-the-fly. This will result in a | 109 | * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then |
142 | * linear IRQ domain so it is important to use irq_create_mapping() | 110 | * pre-map all of the irqs in the domain to virqs starting at first_irq. |
143 | * for each used IRQ, especially when SPARSE_IRQ is enabled. | ||
144 | * @ops: map/unmap domain callbacks | 111 | * @ops: map/unmap domain callbacks |
145 | * @host_data: Controller private data pointer | 112 | * @host_data: Controller private data pointer |
146 | * | 113 | * |
147 | * Allocates a legacy irq_domain if irq_base is positive or a linear | 114 | * Allocates an irq_domain, and optionally if first_irq is positive then also |
148 | * domain otherwise. For the legacy domain, IRQ descriptors will also | 115 | * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. |
149 | * be allocated. | ||
150 | * | 116 | * |
151 | * This is intended to implement the expected behaviour for most | 117 | * This is intended to implement the expected behaviour for most |
152 | * interrupt controllers which is that a linear mapping should | 118 | * interrupt controllers. If device tree is used, then first_irq will be 0 and |
153 | * normally be used unless the system requires a legacy mapping in | 119 | * irqs get mapped dynamically on the fly. However, if the controller requires |
154 | * order to support supplying interrupt numbers during non-DT | 120 | * static virq assignments (non-DT boot) then it will set that up correctly. |
155 | * registration of devices. | ||
156 | */ | 121 | */ |
157 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | 122 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, |
158 | unsigned int size, | 123 | unsigned int size, |
@@ -160,33 +125,25 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | |||
160 | const struct irq_domain_ops *ops, | 125 | const struct irq_domain_ops *ops, |
161 | void *host_data) | 126 | void *host_data) |
162 | { | 127 | { |
163 | if (first_irq > 0) { | 128 | struct irq_domain *domain; |
164 | int irq_base; | 129 | |
130 | domain = __irq_domain_add(of_node, size, size, 0, ops, host_data); | ||
131 | if (!domain) | ||
132 | return NULL; | ||
165 | 133 | ||
134 | if (first_irq > 0) { | ||
166 | if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { | 135 | if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { |
167 | /* | 136 | /* attempt to allocated irq_descs */ |
168 | * Set the descriptor allocator to search for a | 137 | int rc = irq_alloc_descs(first_irq, first_irq, size, |
169 | * 1-to-1 mapping, such as irq_alloc_desc_at(). | 138 | of_node_to_nid(of_node)); |
170 | * Use of_node_to_nid() which is defined to | 139 | if (rc < 0) |
171 | * numa_node_id() on platforms that have no custom | ||
172 | * implementation. | ||
173 | */ | ||
174 | irq_base = irq_alloc_descs(first_irq, first_irq, size, | ||
175 | of_node_to_nid(of_node)); | ||
176 | if (irq_base < 0) { | ||
177 | pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | 140 | pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
178 | first_irq); | 141 | first_irq); |
179 | irq_base = first_irq; | 142 | } |
180 | } | 143 | irq_domain_associate_many(domain, first_irq, 0, size); |
181 | } else | ||
182 | irq_base = first_irq; | ||
183 | |||
184 | return irq_domain_add_legacy(of_node, size, irq_base, 0, | ||
185 | ops, host_data); | ||
186 | } | 144 | } |
187 | 145 | ||
188 | /* A linear domain is the default */ | 146 | return domain; |
189 | return irq_domain_add_linear(of_node, size, ops, host_data); | ||
190 | } | 147 | } |
191 | EXPORT_SYMBOL_GPL(irq_domain_add_simple); | 148 | EXPORT_SYMBOL_GPL(irq_domain_add_simple); |
192 | 149 | ||
@@ -213,131 +170,19 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
213 | void *host_data) | 170 | void *host_data) |
214 | { | 171 | { |
215 | struct irq_domain *domain; | 172 | struct irq_domain *domain; |
216 | unsigned int i; | ||
217 | 173 | ||
218 | domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data); | 174 | domain = __irq_domain_add(of_node, first_hwirq + size, |
175 | first_hwirq + size, 0, ops, host_data); | ||
219 | if (!domain) | 176 | if (!domain) |
220 | return NULL; | 177 | return NULL; |
221 | 178 | ||
222 | domain->revmap_data.legacy.first_irq = first_irq; | 179 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); |
223 | domain->revmap_data.legacy.first_hwirq = first_hwirq; | ||
224 | domain->revmap_data.legacy.size = size; | ||
225 | 180 | ||
226 | mutex_lock(&irq_domain_mutex); | ||
227 | /* Verify that all the irqs are available */ | ||
228 | for (i = 0; i < size; i++) { | ||
229 | int irq = first_irq + i; | ||
230 | struct irq_data *irq_data = irq_get_irq_data(irq); | ||
231 | |||
232 | if (WARN_ON(!irq_data || irq_data->domain)) { | ||
233 | mutex_unlock(&irq_domain_mutex); | ||
234 | irq_domain_free(domain); | ||
235 | return NULL; | ||
236 | } | ||
237 | } | ||
238 | |||
239 | /* Claim all of the irqs before registering a legacy domain */ | ||
240 | for (i = 0; i < size; i++) { | ||
241 | struct irq_data *irq_data = irq_get_irq_data(first_irq + i); | ||
242 | irq_data->hwirq = first_hwirq + i; | ||
243 | irq_data->domain = domain; | ||
244 | } | ||
245 | mutex_unlock(&irq_domain_mutex); | ||
246 | |||
247 | for (i = 0; i < size; i++) { | ||
248 | int irq = first_irq + i; | ||
249 | int hwirq = first_hwirq + i; | ||
250 | |||
251 | /* IRQ0 gets ignored */ | ||
252 | if (!irq) | ||
253 | continue; | ||
254 | |||
255 | /* Legacy flags are left to default at this point, | ||
256 | * one can then use irq_create_mapping() to | ||
257 | * explicitly change them | ||
258 | */ | ||
259 | if (ops->map) | ||
260 | ops->map(domain, irq, hwirq); | ||
261 | |||
262 | /* Clear norequest flags */ | ||
263 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | ||
264 | } | ||
265 | |||
266 | irq_domain_add(domain); | ||
267 | return domain; | 181 | return domain; |
268 | } | 182 | } |
269 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); | 183 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); |
270 | 184 | ||
271 | /** | 185 | /** |
272 | * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. | ||
273 | * @of_node: pointer to interrupt controller's device tree node. | ||
274 | * @size: Number of interrupts in the domain. | ||
275 | * @ops: map/unmap domain callbacks | ||
276 | * @host_data: Controller private data pointer | ||
277 | */ | ||
278 | struct irq_domain *irq_domain_add_linear(struct device_node *of_node, | ||
279 | unsigned int size, | ||
280 | const struct irq_domain_ops *ops, | ||
281 | void *host_data) | ||
282 | { | ||
283 | struct irq_domain *domain; | ||
284 | unsigned int *revmap; | ||
285 | |||
286 | revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL, | ||
287 | of_node_to_nid(of_node)); | ||
288 | if (WARN_ON(!revmap)) | ||
289 | return NULL; | ||
290 | |||
291 | domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data); | ||
292 | if (!domain) { | ||
293 | kfree(revmap); | ||
294 | return NULL; | ||
295 | } | ||
296 | domain->revmap_data.linear.size = size; | ||
297 | domain->revmap_data.linear.revmap = revmap; | ||
298 | irq_domain_add(domain); | ||
299 | return domain; | ||
300 | } | ||
301 | EXPORT_SYMBOL_GPL(irq_domain_add_linear); | ||
302 | |||
303 | struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, | ||
304 | unsigned int max_irq, | ||
305 | const struct irq_domain_ops *ops, | ||
306 | void *host_data) | ||
307 | { | ||
308 | struct irq_domain *domain = irq_domain_alloc(of_node, | ||
309 | IRQ_DOMAIN_MAP_NOMAP, ops, host_data); | ||
310 | if (domain) { | ||
311 | domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; | ||
312 | irq_domain_add(domain); | ||
313 | } | ||
314 | return domain; | ||
315 | } | ||
316 | EXPORT_SYMBOL_GPL(irq_domain_add_nomap); | ||
317 | |||
318 | /** | ||
319 | * irq_domain_add_tree() | ||
320 | * @of_node: pointer to interrupt controller's device tree node. | ||
321 | * @ops: map/unmap domain callbacks | ||
322 | * | ||
323 | * Note: The radix tree will be allocated later during boot automatically | ||
324 | * (the reverse mapping will use the slow path until that happens). | ||
325 | */ | ||
326 | struct irq_domain *irq_domain_add_tree(struct device_node *of_node, | ||
327 | const struct irq_domain_ops *ops, | ||
328 | void *host_data) | ||
329 | { | ||
330 | struct irq_domain *domain = irq_domain_alloc(of_node, | ||
331 | IRQ_DOMAIN_MAP_TREE, ops, host_data); | ||
332 | if (domain) { | ||
333 | INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); | ||
334 | irq_domain_add(domain); | ||
335 | } | ||
336 | return domain; | ||
337 | } | ||
338 | EXPORT_SYMBOL_GPL(irq_domain_add_tree); | ||
339 | |||
340 | /** | ||
341 | * irq_find_host() - Locates a domain for a given device node | 186 | * irq_find_host() - Locates a domain for a given device node |
342 | * @node: device-tree node of the interrupt controller | 187 | * @node: device-tree node of the interrupt controller |
343 | */ | 188 | */ |
@@ -385,125 +230,108 @@ void irq_set_default_host(struct irq_domain *domain) | |||
385 | } | 230 | } |
386 | EXPORT_SYMBOL_GPL(irq_set_default_host); | 231 | EXPORT_SYMBOL_GPL(irq_set_default_host); |
387 | 232 | ||
388 | static void irq_domain_disassociate_many(struct irq_domain *domain, | 233 | static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) |
389 | unsigned int irq_base, int count) | ||
390 | { | 234 | { |
391 | /* | 235 | struct irq_data *irq_data = irq_get_irq_data(irq); |
392 | * disassociate in reverse order; | 236 | irq_hw_number_t hwirq; |
393 | * not strictly necessary, but nice for unwinding | ||
394 | */ | ||
395 | while (count--) { | ||
396 | int irq = irq_base + count; | ||
397 | struct irq_data *irq_data = irq_get_irq_data(irq); | ||
398 | irq_hw_number_t hwirq; | ||
399 | 237 | ||
400 | if (WARN_ON(!irq_data || irq_data->domain != domain)) | 238 | if (WARN(!irq_data || irq_data->domain != domain, |
401 | continue; | 239 | "virq%i doesn't exist; cannot disassociate\n", irq)) |
240 | return; | ||
402 | 241 | ||
403 | hwirq = irq_data->hwirq; | 242 | hwirq = irq_data->hwirq; |
404 | irq_set_status_flags(irq, IRQ_NOREQUEST); | 243 | irq_set_status_flags(irq, IRQ_NOREQUEST); |
405 | 244 | ||
406 | /* remove chip and handler */ | 245 | /* remove chip and handler */ |
407 | irq_set_chip_and_handler(irq, NULL, NULL); | 246 | irq_set_chip_and_handler(irq, NULL, NULL); |
408 | 247 | ||
409 | /* Make sure it's completed */ | 248 | /* Make sure it's completed */ |
410 | synchronize_irq(irq); | 249 | synchronize_irq(irq); |
411 | 250 | ||
412 | /* Tell the PIC about it */ | 251 | /* Tell the PIC about it */ |
413 | if (domain->ops->unmap) | 252 | if (domain->ops->unmap) |
414 | domain->ops->unmap(domain, irq); | 253 | domain->ops->unmap(domain, irq); |
415 | smp_mb(); | 254 | smp_mb(); |
416 | 255 | ||
417 | irq_data->domain = NULL; | 256 | irq_data->domain = NULL; |
418 | irq_data->hwirq = 0; | 257 | irq_data->hwirq = 0; |
419 | 258 | ||
420 | /* Clear reverse map */ | 259 | /* Clear reverse map for this hwirq */ |
421 | switch(domain->revmap_type) { | 260 | if (hwirq < domain->revmap_size) { |
422 | case IRQ_DOMAIN_MAP_LINEAR: | 261 | domain->linear_revmap[hwirq] = 0; |
423 | if (hwirq < domain->revmap_data.linear.size) | 262 | } else { |
424 | domain->revmap_data.linear.revmap[hwirq] = 0; | 263 | mutex_lock(&revmap_trees_mutex); |
425 | break; | 264 | radix_tree_delete(&domain->revmap_tree, hwirq); |
426 | case IRQ_DOMAIN_MAP_TREE: | 265 | mutex_unlock(&revmap_trees_mutex); |
427 | mutex_lock(&revmap_trees_mutex); | ||
428 | radix_tree_delete(&domain->revmap_data.tree, hwirq); | ||
429 | mutex_unlock(&revmap_trees_mutex); | ||
430 | break; | ||
431 | } | ||
432 | } | 266 | } |
433 | } | 267 | } |
434 | 268 | ||
435 | int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, | 269 | int irq_domain_associate(struct irq_domain *domain, unsigned int virq, |
436 | irq_hw_number_t hwirq_base, int count) | 270 | irq_hw_number_t hwirq) |
437 | { | 271 | { |
438 | unsigned int virq = irq_base; | 272 | struct irq_data *irq_data = irq_get_irq_data(virq); |
439 | irq_hw_number_t hwirq = hwirq_base; | 273 | int ret; |
440 | int i, ret; | ||
441 | 274 | ||
442 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, | 275 | if (WARN(hwirq >= domain->hwirq_max, |
443 | of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); | 276 | "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) |
277 | return -EINVAL; | ||
278 | if (WARN(!irq_data, "error: virq%i is not allocated", virq)) | ||
279 | return -EINVAL; | ||
280 | if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) | ||
281 | return -EINVAL; | ||
444 | 282 | ||
445 | for (i = 0; i < count; i++) { | 283 | mutex_lock(&irq_domain_mutex); |
446 | struct irq_data *irq_data = irq_get_irq_data(virq + i); | 284 | irq_data->hwirq = hwirq; |
447 | 285 | irq_data->domain = domain; | |
448 | if (WARN(!irq_data, "error: irq_desc not allocated; " | 286 | if (domain->ops->map) { |
449 | "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) | 287 | ret = domain->ops->map(domain, virq, hwirq); |
450 | return -EINVAL; | 288 | if (ret != 0) { |
451 | if (WARN(irq_data->domain, "error: irq_desc already associated; " | 289 | /* |
452 | "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i)) | 290 | * If map() returns -EPERM, this interrupt is protected |
453 | return -EINVAL; | 291 | * by the firmware or some other service and shall not |
454 | }; | 292 | * be mapped. Don't bother telling the user about it. |
455 | 293 | */ | |
456 | for (i = 0; i < count; i++, virq++, hwirq++) { | 294 | if (ret != -EPERM) { |
457 | struct irq_data *irq_data = irq_get_irq_data(virq); | 295 | pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", |
458 | 296 | domain->name, hwirq, virq, ret); | |
459 | irq_data->hwirq = hwirq; | ||
460 | irq_data->domain = domain; | ||
461 | if (domain->ops->map) { | ||
462 | ret = domain->ops->map(domain, virq, hwirq); | ||
463 | if (ret != 0) { | ||
464 | /* | ||
465 | * If map() returns -EPERM, this interrupt is protected | ||
466 | * by the firmware or some other service and shall not | ||
467 | * be mapped. | ||
468 | * | ||
469 | * Since on some platforms we blindly try to map everything | ||
470 | * we end up with a log full of backtraces. | ||
471 | * | ||
472 | * So instead, we silently fail on -EPERM, it is the | ||
473 | * responsibility of the PIC driver to display a relevant | ||
474 | * message if needed. | ||
475 | */ | ||
476 | if (ret != -EPERM) { | ||
477 | pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n", | ||
478 | virq, hwirq, ret); | ||
479 | WARN_ON(1); | ||
480 | } | ||
481 | irq_data->domain = NULL; | ||
482 | irq_data->hwirq = 0; | ||
483 | goto err_unmap; | ||
484 | } | 297 | } |
298 | irq_data->domain = NULL; | ||
299 | irq_data->hwirq = 0; | ||
300 | mutex_unlock(&irq_domain_mutex); | ||
301 | return ret; | ||
485 | } | 302 | } |
486 | 303 | ||
487 | switch (domain->revmap_type) { | 304 | /* If not already assigned, give the domain the chip's name */ |
488 | case IRQ_DOMAIN_MAP_LINEAR: | 305 | if (!domain->name && irq_data->chip) |
489 | if (hwirq < domain->revmap_data.linear.size) | 306 | domain->name = irq_data->chip->name; |
490 | domain->revmap_data.linear.revmap[hwirq] = virq; | 307 | } |
491 | break; | ||
492 | case IRQ_DOMAIN_MAP_TREE: | ||
493 | mutex_lock(&revmap_trees_mutex); | ||
494 | radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); | ||
495 | mutex_unlock(&revmap_trees_mutex); | ||
496 | break; | ||
497 | } | ||
498 | 308 | ||
499 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | 309 | if (hwirq < domain->revmap_size) { |
310 | domain->linear_revmap[hwirq] = virq; | ||
311 | } else { | ||
312 | mutex_lock(&revmap_trees_mutex); | ||
313 | radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); | ||
314 | mutex_unlock(&revmap_trees_mutex); | ||
500 | } | 315 | } |
316 | mutex_unlock(&irq_domain_mutex); | ||
317 | |||
318 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
501 | 319 | ||
502 | return 0; | 320 | return 0; |
321 | } | ||
322 | EXPORT_SYMBOL_GPL(irq_domain_associate); | ||
503 | 323 | ||
504 | err_unmap: | 324 | void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, |
505 | irq_domain_disassociate_many(domain, irq_base, i); | 325 | irq_hw_number_t hwirq_base, int count) |
506 | return -EINVAL; | 326 | { |
327 | int i; | ||
328 | |||
329 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, | ||
330 | of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); | ||
331 | |||
332 | for (i = 0; i < count; i++) { | ||
333 | irq_domain_associate(domain, irq_base + i, hwirq_base + i); | ||
334 | } | ||
507 | } | 335 | } |
508 | EXPORT_SYMBOL_GPL(irq_domain_associate_many); | 336 | EXPORT_SYMBOL_GPL(irq_domain_associate_many); |
509 | 337 | ||
@@ -513,7 +341,9 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many); | |||
513 | * | 341 | * |
514 | * This routine is used for irq controllers which can choose the hardware | 342 | * This routine is used for irq controllers which can choose the hardware |
515 | * interrupt numbers they generate. In such a case it's simplest to use | 343 | * interrupt numbers they generate. In such a case it's simplest to use |
516 | * the linux irq as the hardware interrupt number. | 344 | * the linux irq as the hardware interrupt number. It still uses the linear |
345 | * or radix tree to store the mapping, but the irq controller can optimize | ||
346 | * the revmap path by using the hwirq directly. | ||
517 | */ | 347 | */ |
518 | unsigned int irq_create_direct_mapping(struct irq_domain *domain) | 348 | unsigned int irq_create_direct_mapping(struct irq_domain *domain) |
519 | { | 349 | { |
@@ -522,17 +352,14 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) | |||
522 | if (domain == NULL) | 352 | if (domain == NULL) |
523 | domain = irq_default_domain; | 353 | domain = irq_default_domain; |
524 | 354 | ||
525 | if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP)) | ||
526 | return 0; | ||
527 | |||
528 | virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); | 355 | virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); |
529 | if (!virq) { | 356 | if (!virq) { |
530 | pr_debug("create_direct virq allocation failed\n"); | 357 | pr_debug("create_direct virq allocation failed\n"); |
531 | return 0; | 358 | return 0; |
532 | } | 359 | } |
533 | if (virq >= domain->revmap_data.nomap.max_irq) { | 360 | if (virq >= domain->revmap_direct_max_irq) { |
534 | pr_err("ERROR: no free irqs available below %i maximum\n", | 361 | pr_err("ERROR: no free irqs available below %i maximum\n", |
535 | domain->revmap_data.nomap.max_irq); | 362 | domain->revmap_direct_max_irq); |
536 | irq_free_desc(virq); | 363 | irq_free_desc(virq); |
537 | return 0; | 364 | return 0; |
538 | } | 365 | } |
@@ -569,9 +396,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
569 | if (domain == NULL) | 396 | if (domain == NULL) |
570 | domain = irq_default_domain; | 397 | domain = irq_default_domain; |
571 | if (domain == NULL) { | 398 | if (domain == NULL) { |
572 | pr_warning("irq_create_mapping called for" | 399 | WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); |
573 | " NULL domain, hwirq=%lx\n", hwirq); | ||
574 | WARN_ON(1); | ||
575 | return 0; | 400 | return 0; |
576 | } | 401 | } |
577 | pr_debug("-> using domain @%p\n", domain); | 402 | pr_debug("-> using domain @%p\n", domain); |
@@ -583,10 +408,6 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
583 | return virq; | 408 | return virq; |
584 | } | 409 | } |
585 | 410 | ||
586 | /* Get a virtual interrupt number */ | ||
587 | if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) | ||
588 | return irq_domain_legacy_revmap(domain, hwirq); | ||
589 | |||
590 | /* Allocate a virtual interrupt number */ | 411 | /* Allocate a virtual interrupt number */ |
591 | hint = hwirq % nr_irqs; | 412 | hint = hwirq % nr_irqs; |
592 | if (hint == 0) | 413 | if (hint == 0) |
@@ -639,12 +460,7 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, | |||
639 | if (unlikely(ret < 0)) | 460 | if (unlikely(ret < 0)) |
640 | return ret; | 461 | return ret; |
641 | 462 | ||
642 | ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count); | 463 | irq_domain_associate_many(domain, irq_base, hwirq_base, count); |
643 | if (unlikely(ret < 0)) { | ||
644 | irq_free_descs(irq_base, count); | ||
645 | return ret; | ||
646 | } | ||
647 | |||
648 | return 0; | 464 | return 0; |
649 | } | 465 | } |
650 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); | 466 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); |
@@ -671,8 +487,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
671 | if (intsize > 0) | 487 | if (intsize > 0) |
672 | return intspec[0]; | 488 | return intspec[0]; |
673 | #endif | 489 | #endif |
674 | pr_warning("no irq domain found for %s !\n", | 490 | pr_warn("no irq domain found for %s !\n", |
675 | of_node_full_name(controller)); | 491 | of_node_full_name(controller)); |
676 | return 0; | 492 | return 0; |
677 | } | 493 | } |
678 | 494 | ||
@@ -714,11 +530,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
714 | if (WARN_ON(domain == NULL)) | 530 | if (WARN_ON(domain == NULL)) |
715 | return; | 531 | return; |
716 | 532 | ||
717 | /* Never unmap legacy interrupts */ | 533 | irq_domain_disassociate(domain, virq); |
718 | if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) | ||
719 | return; | ||
720 | |||
721 | irq_domain_disassociate_many(domain, virq, 1); | ||
722 | irq_free_desc(virq); | 534 | irq_free_desc(virq); |
723 | } | 535 | } |
724 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | 536 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
@@ -739,63 +551,51 @@ unsigned int irq_find_mapping(struct irq_domain *domain, | |||
739 | if (domain == NULL) | 551 | if (domain == NULL) |
740 | return 0; | 552 | return 0; |
741 | 553 | ||
742 | switch (domain->revmap_type) { | 554 | if (hwirq < domain->revmap_direct_max_irq) { |
743 | case IRQ_DOMAIN_MAP_LEGACY: | ||
744 | return irq_domain_legacy_revmap(domain, hwirq); | ||
745 | case IRQ_DOMAIN_MAP_LINEAR: | ||
746 | return irq_linear_revmap(domain, hwirq); | ||
747 | case IRQ_DOMAIN_MAP_TREE: | ||
748 | rcu_read_lock(); | ||
749 | data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); | ||
750 | rcu_read_unlock(); | ||
751 | if (data) | ||
752 | return data->irq; | ||
753 | break; | ||
754 | case IRQ_DOMAIN_MAP_NOMAP: | ||
755 | data = irq_get_irq_data(hwirq); | 555 | data = irq_get_irq_data(hwirq); |
756 | if (data && (data->domain == domain) && (data->hwirq == hwirq)) | 556 | if (data && (data->domain == domain) && (data->hwirq == hwirq)) |
757 | return hwirq; | 557 | return hwirq; |
758 | break; | ||
759 | } | 558 | } |
760 | 559 | ||
761 | return 0; | 560 | /* Check if the hwirq is in the linear revmap. */ |
762 | } | 561 | if (hwirq < domain->revmap_size) |
763 | EXPORT_SYMBOL_GPL(irq_find_mapping); | 562 | return domain->linear_revmap[hwirq]; |
764 | 563 | ||
765 | /** | 564 | rcu_read_lock(); |
766 | * irq_linear_revmap() - Find a linux irq from a hw irq number. | 565 | data = radix_tree_lookup(&domain->revmap_tree, hwirq); |
767 | * @domain: domain owning this hardware interrupt | 566 | rcu_read_unlock(); |
768 | * @hwirq: hardware irq number in that domain space | 567 | return data ? data->irq : 0; |
769 | * | ||
770 | * This is a fast path that can be called directly by irq controller code to | ||
771 | * save a handful of instructions. | ||
772 | */ | ||
773 | unsigned int irq_linear_revmap(struct irq_domain *domain, | ||
774 | irq_hw_number_t hwirq) | ||
775 | { | ||
776 | BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR); | ||
777 | |||
778 | /* Check revmap bounds; complain if exceeded */ | ||
779 | if (WARN_ON(hwirq >= domain->revmap_data.linear.size)) | ||
780 | return 0; | ||
781 | |||
782 | return domain->revmap_data.linear.revmap[hwirq]; | ||
783 | } | 568 | } |
784 | EXPORT_SYMBOL_GPL(irq_linear_revmap); | 569 | EXPORT_SYMBOL_GPL(irq_find_mapping); |
785 | 570 | ||
786 | #ifdef CONFIG_IRQ_DOMAIN_DEBUG | 571 | #ifdef CONFIG_IRQ_DOMAIN_DEBUG |
787 | static int virq_debug_show(struct seq_file *m, void *private) | 572 | static int virq_debug_show(struct seq_file *m, void *private) |
788 | { | 573 | { |
789 | unsigned long flags; | 574 | unsigned long flags; |
790 | struct irq_desc *desc; | 575 | struct irq_desc *desc; |
791 | const char *p; | 576 | struct irq_domain *domain; |
792 | static const char none[] = "none"; | 577 | struct radix_tree_iter iter; |
793 | void *data; | 578 | void *data, **slot; |
794 | int i; | 579 | int i; |
795 | 580 | ||
796 | seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq", | 581 | seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", |
582 | "name", "mapped", "linear-max", "direct-max", "devtree-node"); | ||
583 | mutex_lock(&irq_domain_mutex); | ||
584 | list_for_each_entry(domain, &irq_domain_list, link) { | ||
585 | int count = 0; | ||
586 | radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) | ||
587 | count++; | ||
588 | seq_printf(m, "%c%-16s %6u %10u %10u %s\n", | ||
589 | domain == irq_default_domain ? '*' : ' ', domain->name, | ||
590 | domain->revmap_size + count, domain->revmap_size, | ||
591 | domain->revmap_direct_max_irq, | ||
592 | domain->of_node ? of_node_full_name(domain->of_node) : ""); | ||
593 | } | ||
594 | mutex_unlock(&irq_domain_mutex); | ||
595 | |||
596 | seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", | ||
797 | "chip name", (int)(2 * sizeof(void *) + 2), "chip data", | 597 | "chip name", (int)(2 * sizeof(void *) + 2), "chip data", |
798 | "domain name"); | 598 | "active", "type", "domain"); |
799 | 599 | ||
800 | for (i = 1; i < nr_irqs; i++) { | 600 | for (i = 1; i < nr_irqs; i++) { |
801 | desc = irq_to_desc(i); | 601 | desc = irq_to_desc(i); |
@@ -803,28 +603,28 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
803 | continue; | 603 | continue; |
804 | 604 | ||
805 | raw_spin_lock_irqsave(&desc->lock, flags); | 605 | raw_spin_lock_irqsave(&desc->lock, flags); |
606 | domain = desc->irq_data.domain; | ||
806 | 607 | ||
807 | if (desc->action && desc->action->handler) { | 608 | if (domain) { |
808 | struct irq_chip *chip; | 609 | struct irq_chip *chip; |
610 | int hwirq = desc->irq_data.hwirq; | ||
611 | bool direct; | ||
809 | 612 | ||
810 | seq_printf(m, "%5d ", i); | 613 | seq_printf(m, "%5d ", i); |
811 | seq_printf(m, "0x%05lx ", desc->irq_data.hwirq); | 614 | seq_printf(m, "0x%05x ", hwirq); |
812 | 615 | ||
813 | chip = irq_desc_get_chip(desc); | 616 | chip = irq_desc_get_chip(desc); |
814 | if (chip && chip->name) | 617 | seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); |
815 | p = chip->name; | ||
816 | else | ||
817 | p = none; | ||
818 | seq_printf(m, "%-15s ", p); | ||
819 | 618 | ||
820 | data = irq_desc_get_chip_data(desc); | 619 | data = irq_desc_get_chip_data(desc); |
821 | seq_printf(m, data ? "0x%p " : " %p ", data); | 620 | seq_printf(m, data ? "0x%p " : " %p ", data); |
822 | 621 | ||
823 | if (desc->irq_data.domain) | 622 | seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); |
824 | p = of_node_full_name(desc->irq_data.domain->of_node); | 623 | direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); |
825 | else | 624 | seq_printf(m, "%6s%-8s ", |
826 | p = none; | 625 | (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", |
827 | seq_printf(m, "%s\n", p); | 626 | direct ? "(DIRECT)" : ""); |
627 | seq_printf(m, "%s\n", desc->irq_data.domain->name); | ||
828 | } | 628 | } |
829 | 629 | ||
830 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 630 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
@@ -921,18 +721,3 @@ const struct irq_domain_ops irq_domain_simple_ops = { | |||
921 | .xlate = irq_domain_xlate_onetwocell, | 721 | .xlate = irq_domain_xlate_onetwocell, |
922 | }; | 722 | }; |
923 | EXPORT_SYMBOL_GPL(irq_domain_simple_ops); | 723 | EXPORT_SYMBOL_GPL(irq_domain_simple_ops); |
924 | |||
925 | #ifdef CONFIG_OF_IRQ | ||
926 | void irq_domain_generate_simple(const struct of_device_id *match, | ||
927 | u64 phys_base, unsigned int irq_start) | ||
928 | { | ||
929 | struct device_node *node; | ||
930 | pr_debug("looking for phys_base=%llx, irq_start=%i\n", | ||
931 | (unsigned long long) phys_base, (int) irq_start); | ||
932 | node = of_find_matching_node_by_address(NULL, match, phys_base); | ||
933 | if (node) | ||
934 | irq_domain_add_legacy(node, 32, irq_start, 0, | ||
935 | &irq_domain_simple_ops, NULL); | ||
936 | } | ||
937 | EXPORT_SYMBOL_GPL(irq_domain_generate_simple); | ||
938 | #endif | ||