diff options
author | Mark Salter <msalter@redhat.com> | 2012-01-26 09:26:21 -0500 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2012-02-16 08:11:24 -0500 |
commit | 0bd761e1b6d909d3fd08841be7d5035f9fde8a53 (patch) | |
tree | 9ea9032e6259e2a6fd495e75db7755cdb92cef3e /arch | |
parent | a18dc81bf58258ac0920bec26b91656cb0140d2a (diff) |
irq_domain/c6x: Convert c6x to use generic irq_domain support.
The C6X IRQ support was copied almost verbatim from the PowerPC virtual IRQ
code. The PowerPC code was used as the basis for generic irq_domain support,
so this patch mostly copies what what done to arch/powerpc by Grant Likely
in his irq_domain patch series.
Signed-off-by: Mark Salter <msalter@redhat.com>
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/c6x/Kconfig | 1 | ||||
-rw-r--r-- | arch/c6x/include/asm/irq.h | 245 | ||||
-rw-r--r-- | arch/c6x/kernel/irq.c | 611 | ||||
-rw-r--r-- | arch/c6x/platforms/megamod-pic.c | 13 |
4 files changed, 21 insertions, 849 deletions
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 26e67f0f0051..3c64b2894c13 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig | |||
@@ -12,6 +12,7 @@ config TMS320C6X | |||
12 | select HAVE_GENERIC_HARDIRQS | 12 | select HAVE_GENERIC_HARDIRQS |
13 | select HAVE_MEMBLOCK | 13 | select HAVE_MEMBLOCK |
14 | select HAVE_SPARSE_IRQ | 14 | select HAVE_SPARSE_IRQ |
15 | select IRQ_DOMAIN | ||
15 | select OF | 16 | select OF |
16 | select OF_EARLY_FLATTREE | 17 | select OF_EARLY_FLATTREE |
17 | 18 | ||
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h index a6ae3c9d9c40..f13b78d5e1ca 100644 --- a/arch/c6x/include/asm/irq.h +++ b/arch/c6x/include/asm/irq.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #ifndef _ASM_C6X_IRQ_H | 13 | #ifndef _ASM_C6X_IRQ_H |
14 | #define _ASM_C6X_IRQ_H | 14 | #define _ASM_C6X_IRQ_H |
15 | 15 | ||
16 | #include <linux/irqdomain.h> | ||
16 | #include <linux/threads.h> | 17 | #include <linux/threads.h> |
17 | #include <linux/list.h> | 18 | #include <linux/list.h> |
18 | #include <linux/radix-tree.h> | 19 | #include <linux/radix-tree.h> |
@@ -41,253 +42,9 @@ | |||
41 | /* This number is used when no interrupt has been assigned */ | 42 | /* This number is used when no interrupt has been assigned */ |
42 | #define NO_IRQ 0 | 43 | #define NO_IRQ 0 |
43 | 44 | ||
44 | /* This type is the placeholder for a hardware interrupt number. It has to | ||
45 | * be big enough to enclose whatever representation is used by a given | ||
46 | * platform. | ||
47 | */ | ||
48 | typedef unsigned long irq_hw_number_t; | ||
49 | |||
50 | /* Interrupt controller "host" data structure. This could be defined as a | ||
51 | * irq domain controller. That is, it handles the mapping between hardware | ||
52 | * and virtual interrupt numbers for a given interrupt domain. The host | ||
53 | * structure is generally created by the PIC code for a given PIC instance | ||
54 | * (though a host can cover more than one PIC if they have a flat number | ||
55 | * model). It's the host callbacks that are responsible for setting the | ||
56 | * irq_chip on a given irq_desc after it's been mapped. | ||
57 | * | ||
58 | * The host code and data structures are fairly agnostic to the fact that | ||
59 | * we use an open firmware device-tree. We do have references to struct | ||
60 | * device_node in two places: in irq_find_host() to find the host matching | ||
61 | * a given interrupt controller node, and of course as an argument to its | ||
62 | * counterpart host->ops->match() callback. However, those are treated as | ||
63 | * generic pointers by the core and the fact that it's actually a device-node | ||
64 | * pointer is purely a convention between callers and implementation. This | ||
65 | * code could thus be used on other architectures by replacing those two | ||
66 | * by some sort of arch-specific void * "token" used to identify interrupt | ||
67 | * controllers. | ||
68 | */ | ||
69 | struct irq_host; | ||
70 | struct radix_tree_root; | ||
71 | struct device_node; | ||
72 | |||
73 | /* Functions below are provided by the host and called whenever a new mapping | ||
74 | * is created or an old mapping is disposed. The host can then proceed to | ||
75 | * whatever internal data structures management is required. It also needs | ||
76 | * to setup the irq_desc when returning from map(). | ||
77 | */ | ||
78 | struct irq_host_ops { | ||
79 | /* Match an interrupt controller device node to a host, returns | ||
80 | * 1 on a match | ||
81 | */ | ||
82 | int (*match)(struct irq_host *h, struct device_node *node); | ||
83 | |||
84 | /* Create or update a mapping between a virtual irq number and a hw | ||
85 | * irq number. This is called only once for a given mapping. | ||
86 | */ | ||
87 | int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw); | ||
88 | |||
89 | /* Dispose of such a mapping */ | ||
90 | void (*unmap)(struct irq_host *h, unsigned int virq); | ||
91 | |||
92 | /* Translate device-tree interrupt specifier from raw format coming | ||
93 | * from the firmware to a irq_hw_number_t (interrupt line number) and | ||
94 | * type (sense) that can be passed to set_irq_type(). In the absence | ||
95 | * of this callback, irq_create_of_mapping() and irq_of_parse_and_map() | ||
96 | * will return the hw number in the first cell and IRQ_TYPE_NONE for | ||
97 | * the type (which amount to keeping whatever default value the | ||
98 | * interrupt controller has for that line) | ||
99 | */ | ||
100 | int (*xlate)(struct irq_host *h, struct device_node *ctrler, | ||
101 | const u32 *intspec, unsigned int intsize, | ||
102 | irq_hw_number_t *out_hwirq, unsigned int *out_type); | ||
103 | }; | ||
104 | |||
105 | struct irq_host { | ||
106 | struct list_head link; | ||
107 | |||
108 | /* type of reverse mapping technique */ | ||
109 | unsigned int revmap_type; | ||
110 | #define IRQ_HOST_MAP_PRIORITY 0 /* core priority irqs, get irqs 1..15 */ | ||
111 | #define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ | ||
112 | #define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ | ||
113 | #define IRQ_HOST_MAP_TREE 3 /* radix tree */ | ||
114 | union { | ||
115 | struct { | ||
116 | unsigned int size; | ||
117 | unsigned int *revmap; | ||
118 | } linear; | ||
119 | struct radix_tree_root tree; | ||
120 | } revmap_data; | ||
121 | struct irq_host_ops *ops; | ||
122 | void *host_data; | ||
123 | irq_hw_number_t inval_irq; | ||
124 | |||
125 | /* Optional device node pointer */ | ||
126 | struct device_node *of_node; | ||
127 | }; | ||
128 | |||
129 | struct irq_data; | 45 | struct irq_data; |
130 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); | 46 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); |
131 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | 47 | extern irq_hw_number_t virq_to_hw(unsigned int virq); |
132 | extern bool virq_is_host(unsigned int virq, struct irq_host *host); | ||
133 | |||
134 | /** | ||
135 | * irq_alloc_host - Allocate a new irq_host data structure | ||
136 | * @of_node: optional device-tree node of the interrupt controller | ||
137 | * @revmap_type: type of reverse mapping to use | ||
138 | * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map | ||
139 | * @ops: map/unmap host callbacks | ||
140 | * @inval_irq: provide a hw number in that host space that is always invalid | ||
141 | * | ||
142 | * Allocates and initialize and irq_host structure. Note that in the case of | ||
143 | * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns | ||
144 | * for all legacy interrupts except 0 (which is always the invalid irq for | ||
145 | * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by | ||
146 | * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated | ||
147 | * later during boot automatically (the reverse mapping will use the slow path | ||
148 | * until that happens). | ||
149 | */ | ||
150 | extern struct irq_host *irq_alloc_host(struct device_node *of_node, | ||
151 | unsigned int revmap_type, | ||
152 | unsigned int revmap_arg, | ||
153 | struct irq_host_ops *ops, | ||
154 | irq_hw_number_t inval_irq); | ||
155 | |||
156 | |||
157 | /** | ||
158 | * irq_find_host - Locates a host for a given device node | ||
159 | * @node: device-tree node of the interrupt controller | ||
160 | */ | ||
161 | extern struct irq_host *irq_find_host(struct device_node *node); | ||
162 | |||
163 | |||
164 | /** | ||
165 | * irq_set_default_host - Set a "default" host | ||
166 | * @host: default host pointer | ||
167 | * | ||
168 | * For convenience, it's possible to set a "default" host that will be used | ||
169 | * whenever NULL is passed to irq_create_mapping(). It makes life easier for | ||
170 | * platforms that want to manipulate a few hard coded interrupt numbers that | ||
171 | * aren't properly represented in the device-tree. | ||
172 | */ | ||
173 | extern void irq_set_default_host(struct irq_host *host); | ||
174 | |||
175 | |||
176 | /** | ||
177 | * irq_set_virq_count - Set the maximum number of virt irqs | ||
178 | * @count: number of linux virtual irqs, capped with NR_IRQS | ||
179 | * | ||
180 | * This is mainly for use by platforms like iSeries who want to program | ||
181 | * the virtual irq number in the controller to avoid the reverse mapping | ||
182 | */ | ||
183 | extern void irq_set_virq_count(unsigned int count); | ||
184 | |||
185 | |||
186 | /** | ||
187 | * irq_create_mapping - Map a hardware interrupt into linux virq space | ||
188 | * @host: host owning this hardware interrupt or NULL for default host | ||
189 | * @hwirq: hardware irq number in that host space | ||
190 | * | ||
191 | * Only one mapping per hardware interrupt is permitted. Returns a linux | ||
192 | * virq number. | ||
193 | * If the sense/trigger is to be specified, set_irq_type() should be called | ||
194 | * on the number returned from that call. | ||
195 | */ | ||
196 | extern unsigned int irq_create_mapping(struct irq_host *host, | ||
197 | irq_hw_number_t hwirq); | ||
198 | |||
199 | |||
200 | /** | ||
201 | * irq_dispose_mapping - Unmap an interrupt | ||
202 | * @virq: linux virq number of the interrupt to unmap | ||
203 | */ | ||
204 | extern void irq_dispose_mapping(unsigned int virq); | ||
205 | |||
206 | /** | ||
207 | * irq_find_mapping - Find a linux virq from an hw irq number. | ||
208 | * @host: host owning this hardware interrupt | ||
209 | * @hwirq: hardware irq number in that host space | ||
210 | * | ||
211 | * This is a slow path, for use by generic code. It's expected that an | ||
212 | * irq controller implementation directly calls the appropriate low level | ||
213 | * mapping function. | ||
214 | */ | ||
215 | extern unsigned int irq_find_mapping(struct irq_host *host, | ||
216 | irq_hw_number_t hwirq); | ||
217 | |||
218 | /** | ||
219 | * irq_create_direct_mapping - Allocate a virq for direct mapping | ||
220 | * @host: host to allocate the virq for or NULL for default host | ||
221 | * | ||
222 | * This routine is used for irq controllers which can choose the hardware | ||
223 | * interrupt numbers they generate. In such a case it's simplest to use | ||
224 | * the linux virq as the hardware interrupt number. | ||
225 | */ | ||
226 | extern unsigned int irq_create_direct_mapping(struct irq_host *host); | ||
227 | |||
228 | /** | ||
229 | * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. | ||
230 | * @host: host owning this hardware interrupt | ||
231 | * @virq: linux irq number | ||
232 | * @hwirq: hardware irq number in that host space | ||
233 | * | ||
234 | * This is for use by irq controllers that use a radix tree reverse | ||
235 | * mapping for fast lookup. | ||
236 | */ | ||
237 | extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | ||
238 | irq_hw_number_t hwirq); | ||
239 | |||
240 | /** | ||
241 | * irq_radix_revmap_lookup - Find a linux virq from a hw irq number. | ||
242 | * @host: host owning this hardware interrupt | ||
243 | * @hwirq: hardware irq number in that host space | ||
244 | * | ||
245 | * This is a fast path, for use by irq controller code that uses radix tree | ||
246 | * revmaps | ||
247 | */ | ||
248 | extern unsigned int irq_radix_revmap_lookup(struct irq_host *host, | ||
249 | irq_hw_number_t hwirq); | ||
250 | |||
251 | /** | ||
252 | * irq_linear_revmap - Find a linux virq from a hw irq number. | ||
253 | * @host: host owning this hardware interrupt | ||
254 | * @hwirq: hardware irq number in that host space | ||
255 | * | ||
256 | * This is a fast path, for use by irq controller code that uses linear | ||
257 | * revmaps. It does fallback to the slow path if the revmap doesn't exist | ||
258 | * yet and will create the revmap entry with appropriate locking | ||
259 | */ | ||
260 | |||
261 | extern unsigned int irq_linear_revmap(struct irq_host *host, | ||
262 | irq_hw_number_t hwirq); | ||
263 | |||
264 | |||
265 | |||
266 | /** | ||
267 | * irq_alloc_virt - Allocate virtual irq numbers | ||
268 | * @host: host owning these new virtual irqs | ||
269 | * @count: number of consecutive numbers to allocate | ||
270 | * @hint: pass a hint number, the allocator will try to use a 1:1 mapping | ||
271 | * | ||
272 | * This is a low level function that is used internally by irq_create_mapping() | ||
273 | * and that can be used by some irq controllers implementations for things | ||
274 | * like allocating ranges of numbers for MSIs. The revmaps are left untouched. | ||
275 | */ | ||
276 | extern unsigned int irq_alloc_virt(struct irq_host *host, | ||
277 | unsigned int count, | ||
278 | unsigned int hint); | ||
279 | |||
280 | /** | ||
281 | * irq_free_virt - Free virtual irq numbers | ||
282 | * @virq: virtual irq number of the first interrupt to free | ||
283 | * @count: number of interrupts to free | ||
284 | * | ||
285 | * This function is the opposite of irq_alloc_virt. It will not clear reverse | ||
286 | * maps, this should be done previously by unmap'ing the interrupt. In fact, | ||
287 | * all interrupts covered by the range being freed should have been unmapped | ||
288 | * prior to calling this. | ||
289 | */ | ||
290 | extern void irq_free_virt(unsigned int virq, unsigned int count); | ||
291 | 48 | ||
292 | extern void __init init_pic_c64xplus(void); | 49 | extern void __init init_pic_c64xplus(void); |
293 | 50 | ||
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c index 0929e4b2b244..3d5ac600a38b 100644 --- a/arch/c6x/kernel/irq.c +++ b/arch/c6x/kernel/irq.c | |||
@@ -73,10 +73,10 @@ asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs) | |||
73 | set_irq_regs(old_regs); | 73 | set_irq_regs(old_regs); |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct irq_host *core_host; | 76 | static struct irq_domain *core_domain; |
77 | 77 | ||
78 | static int core_host_map(struct irq_host *h, unsigned int virq, | 78 | static int core_domain_map(struct irq_domain *h, unsigned int virq, |
79 | irq_hw_number_t hw) | 79 | irq_hw_number_t hw) |
80 | { | 80 | { |
81 | if (hw < 4 || hw >= NR_PRIORITY_IRQS) | 81 | if (hw < 4 || hw >= NR_PRIORITY_IRQS) |
82 | return -EINVAL; | 82 | return -EINVAL; |
@@ -86,8 +86,8 @@ static int core_host_map(struct irq_host *h, unsigned int virq, | |||
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | static struct irq_host_ops core_host_ops = { | 89 | static struct irq_domain_ops core_domain_ops = { |
90 | .map = core_host_map, | 90 | .map = core_domain_map, |
91 | }; | 91 | }; |
92 | 92 | ||
93 | void __init init_IRQ(void) | 93 | void __init init_IRQ(void) |
@@ -100,10 +100,11 @@ void __init init_IRQ(void) | |||
100 | np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic"); | 100 | np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic"); |
101 | if (np != NULL) { | 101 | if (np != NULL) { |
102 | /* create the core host */ | 102 | /* create the core host */ |
103 | core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0, | 103 | core_domain = irq_domain_add_legacy(np, NR_PRIORITY_IRQS, |
104 | &core_host_ops, 0); | 104 | 0, 0, &core_domain_ops, |
105 | if (core_host) | 105 | NULL); |
106 | irq_set_default_host(core_host); | 106 | if (core_domain) |
107 | irq_set_default_host(core_domain); | ||
107 | of_node_put(np); | 108 | of_node_put(np); |
108 | } | 109 | } |
109 | 110 | ||
@@ -128,601 +129,15 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
128 | return 0; | 129 | return 0; |
129 | } | 130 | } |
130 | 131 | ||
131 | /* | ||
132 | * IRQ controller and virtual interrupts | ||
133 | */ | ||
134 | |||
135 | /* The main irq map itself is an array of NR_IRQ entries containing the | ||
136 | * associate host and irq number. An entry with a host of NULL is free. | ||
137 | * An entry can be allocated if it's free, the allocator always then sets | ||
138 | * hwirq first to the host's invalid irq number and then fills ops. | ||
139 | */ | ||
140 | struct irq_map_entry { | ||
141 | irq_hw_number_t hwirq; | ||
142 | struct irq_host *host; | ||
143 | }; | ||
144 | |||
145 | static LIST_HEAD(irq_hosts); | ||
146 | static DEFINE_RAW_SPINLOCK(irq_big_lock); | ||
147 | static DEFINE_MUTEX(revmap_trees_mutex); | ||
148 | static struct irq_map_entry irq_map[NR_IRQS]; | ||
149 | static unsigned int irq_virq_count = NR_IRQS; | ||
150 | static struct irq_host *irq_default_host; | ||
151 | |||
152 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 132 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
153 | { | 133 | { |
154 | return irq_map[d->irq].hwirq; | 134 | return d->hwirq; |
155 | } | 135 | } |
156 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); | 136 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); |
157 | 137 | ||
158 | irq_hw_number_t virq_to_hw(unsigned int virq) | 138 | irq_hw_number_t virq_to_hw(unsigned int virq) |
159 | { | 139 | { |
160 | return irq_map[virq].hwirq; | 140 | struct irq_data *irq_data = irq_get_irq_data(virq); |
141 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | ||
161 | } | 142 | } |
162 | EXPORT_SYMBOL_GPL(virq_to_hw); | 143 | EXPORT_SYMBOL_GPL(virq_to_hw); |
163 | |||
164 | bool virq_is_host(unsigned int virq, struct irq_host *host) | ||
165 | { | ||
166 | return irq_map[virq].host == host; | ||
167 | } | ||
168 | EXPORT_SYMBOL_GPL(virq_is_host); | ||
169 | |||
170 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) | ||
171 | { | ||
172 | return h->of_node != NULL && h->of_node == np; | ||
173 | } | ||
174 | |||
175 | struct irq_host *irq_alloc_host(struct device_node *of_node, | ||
176 | unsigned int revmap_type, | ||
177 | unsigned int revmap_arg, | ||
178 | struct irq_host_ops *ops, | ||
179 | irq_hw_number_t inval_irq) | ||
180 | { | ||
181 | struct irq_host *host; | ||
182 | unsigned int size = sizeof(struct irq_host); | ||
183 | unsigned int i; | ||
184 | unsigned int *rmap; | ||
185 | unsigned long flags; | ||
186 | |||
187 | /* Allocate structure and revmap table if using linear mapping */ | ||
188 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | ||
189 | size += revmap_arg * sizeof(unsigned int); | ||
190 | host = kzalloc(size, GFP_KERNEL); | ||
191 | if (host == NULL) | ||
192 | return NULL; | ||
193 | |||
194 | /* Fill structure */ | ||
195 | host->revmap_type = revmap_type; | ||
196 | host->inval_irq = inval_irq; | ||
197 | host->ops = ops; | ||
198 | host->of_node = of_node_get(of_node); | ||
199 | |||
200 | if (host->ops->match == NULL) | ||
201 | host->ops->match = default_irq_host_match; | ||
202 | |||
203 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
204 | |||
205 | /* Check for the priority controller. */ | ||
206 | if (revmap_type == IRQ_HOST_MAP_PRIORITY) { | ||
207 | if (irq_map[0].host != NULL) { | ||
208 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
209 | of_node_put(host->of_node); | ||
210 | kfree(host); | ||
211 | return NULL; | ||
212 | } | ||
213 | irq_map[0].host = host; | ||
214 | } | ||
215 | |||
216 | list_add(&host->link, &irq_hosts); | ||
217 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
218 | |||
219 | /* Additional setups per revmap type */ | ||
220 | switch (revmap_type) { | ||
221 | case IRQ_HOST_MAP_PRIORITY: | ||
222 | /* 0 is always the invalid number for priority */ | ||
223 | host->inval_irq = 0; | ||
224 | /* setup us as the host for all priority interrupts */ | ||
225 | for (i = 1; i < NR_PRIORITY_IRQS; i++) { | ||
226 | irq_map[i].hwirq = i; | ||
227 | smp_wmb(); | ||
228 | irq_map[i].host = host; | ||
229 | smp_wmb(); | ||
230 | |||
231 | ops->map(host, i, i); | ||
232 | } | ||
233 | break; | ||
234 | case IRQ_HOST_MAP_LINEAR: | ||
235 | rmap = (unsigned int *)(host + 1); | ||
236 | for (i = 0; i < revmap_arg; i++) | ||
237 | rmap[i] = NO_IRQ; | ||
238 | host->revmap_data.linear.size = revmap_arg; | ||
239 | smp_wmb(); | ||
240 | host->revmap_data.linear.revmap = rmap; | ||
241 | break; | ||
242 | case IRQ_HOST_MAP_TREE: | ||
243 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); | ||
244 | break; | ||
245 | default: | ||
246 | break; | ||
247 | } | ||
248 | |||
249 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | ||
250 | |||
251 | return host; | ||
252 | } | ||
253 | |||
254 | struct irq_host *irq_find_host(struct device_node *node) | ||
255 | { | ||
256 | struct irq_host *h, *found = NULL; | ||
257 | unsigned long flags; | ||
258 | |||
259 | /* We might want to match the legacy controller last since | ||
260 | * it might potentially be set to match all interrupts in | ||
261 | * the absence of a device node. This isn't a problem so far | ||
262 | * yet though... | ||
263 | */ | ||
264 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
265 | list_for_each_entry(h, &irq_hosts, link) | ||
266 | if (h->ops->match(h, node)) { | ||
267 | found = h; | ||
268 | break; | ||
269 | } | ||
270 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
271 | return found; | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(irq_find_host); | ||
274 | |||
275 | void irq_set_default_host(struct irq_host *host) | ||
276 | { | ||
277 | pr_debug("irq: Default host set to @0x%p\n", host); | ||
278 | |||
279 | irq_default_host = host; | ||
280 | } | ||
281 | |||
282 | void irq_set_virq_count(unsigned int count) | ||
283 | { | ||
284 | pr_debug("irq: Trying to set virq count to %d\n", count); | ||
285 | |||
286 | BUG_ON(count < NR_PRIORITY_IRQS); | ||
287 | if (count < NR_IRQS) | ||
288 | irq_virq_count = count; | ||
289 | } | ||
290 | |||
291 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | ||
292 | irq_hw_number_t hwirq) | ||
293 | { | ||
294 | int res; | ||
295 | |||
296 | res = irq_alloc_desc_at(virq, 0); | ||
297 | if (res != virq) { | ||
298 | pr_debug("irq: -> allocating desc failed\n"); | ||
299 | goto error; | ||
300 | } | ||
301 | |||
302 | /* map it */ | ||
303 | smp_wmb(); | ||
304 | irq_map[virq].hwirq = hwirq; | ||
305 | smp_mb(); | ||
306 | |||
307 | if (host->ops->map(host, virq, hwirq)) { | ||
308 | pr_debug("irq: -> mapping failed, freeing\n"); | ||
309 | goto errdesc; | ||
310 | } | ||
311 | |||
312 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | errdesc: | ||
317 | irq_free_descs(virq, 1); | ||
318 | error: | ||
319 | irq_free_virt(virq, 1); | ||
320 | return -1; | ||
321 | } | ||
322 | |||
323 | unsigned int irq_create_direct_mapping(struct irq_host *host) | ||
324 | { | ||
325 | unsigned int virq; | ||
326 | |||
327 | if (host == NULL) | ||
328 | host = irq_default_host; | ||
329 | |||
330 | BUG_ON(host == NULL); | ||
331 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | ||
332 | |||
333 | virq = irq_alloc_virt(host, 1, 0); | ||
334 | if (virq == NO_IRQ) { | ||
335 | pr_debug("irq: create_direct virq allocation failed\n"); | ||
336 | return NO_IRQ; | ||
337 | } | ||
338 | |||
339 | pr_debug("irq: create_direct obtained virq %d\n", virq); | ||
340 | |||
341 | if (irq_setup_virq(host, virq, virq)) | ||
342 | return NO_IRQ; | ||
343 | |||
344 | return virq; | ||
345 | } | ||
346 | |||
347 | unsigned int irq_create_mapping(struct irq_host *host, | ||
348 | irq_hw_number_t hwirq) | ||
349 | { | ||
350 | unsigned int virq, hint; | ||
351 | |||
352 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); | ||
353 | |||
354 | /* Look for default host if nececssary */ | ||
355 | if (host == NULL) | ||
356 | host = irq_default_host; | ||
357 | if (host == NULL) { | ||
358 | printk(KERN_WARNING "irq_create_mapping called for" | ||
359 | " NULL host, hwirq=%lx\n", hwirq); | ||
360 | WARN_ON(1); | ||
361 | return NO_IRQ; | ||
362 | } | ||
363 | pr_debug("irq: -> using host @%p\n", host); | ||
364 | |||
365 | /* Check if mapping already exists */ | ||
366 | virq = irq_find_mapping(host, hwirq); | ||
367 | if (virq != NO_IRQ) { | ||
368 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | ||
369 | return virq; | ||
370 | } | ||
371 | |||
372 | /* Allocate a virtual interrupt number */ | ||
373 | hint = hwirq % irq_virq_count; | ||
374 | virq = irq_alloc_virt(host, 1, hint); | ||
375 | if (virq == NO_IRQ) { | ||
376 | pr_debug("irq: -> virq allocation failed\n"); | ||
377 | return NO_IRQ; | ||
378 | } | ||
379 | |||
380 | if (irq_setup_virq(host, virq, hwirq)) | ||
381 | return NO_IRQ; | ||
382 | |||
383 | pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", | ||
384 | hwirq, host->of_node ? host->of_node->full_name : "null", virq); | ||
385 | |||
386 | return virq; | ||
387 | } | ||
388 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
389 | |||
390 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
391 | const u32 *intspec, unsigned int intsize) | ||
392 | { | ||
393 | struct irq_host *host; | ||
394 | irq_hw_number_t hwirq; | ||
395 | unsigned int type = IRQ_TYPE_NONE; | ||
396 | unsigned int virq; | ||
397 | |||
398 | if (controller == NULL) | ||
399 | host = irq_default_host; | ||
400 | else | ||
401 | host = irq_find_host(controller); | ||
402 | if (host == NULL) { | ||
403 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | ||
404 | controller->full_name); | ||
405 | return NO_IRQ; | ||
406 | } | ||
407 | |||
408 | /* If host has no translation, then we assume interrupt line */ | ||
409 | if (host->ops->xlate == NULL) | ||
410 | hwirq = intspec[0]; | ||
411 | else { | ||
412 | if (host->ops->xlate(host, controller, intspec, intsize, | ||
413 | &hwirq, &type)) | ||
414 | return NO_IRQ; | ||
415 | } | ||
416 | |||
417 | /* Create mapping */ | ||
418 | virq = irq_create_mapping(host, hwirq); | ||
419 | if (virq == NO_IRQ) | ||
420 | return virq; | ||
421 | |||
422 | /* Set type if specified and different than the current one */ | ||
423 | if (type != IRQ_TYPE_NONE && | ||
424 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) | ||
425 | irq_set_irq_type(virq, type); | ||
426 | return virq; | ||
427 | } | ||
428 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
429 | |||
430 | void irq_dispose_mapping(unsigned int virq) | ||
431 | { | ||
432 | struct irq_host *host; | ||
433 | irq_hw_number_t hwirq; | ||
434 | |||
435 | if (virq == NO_IRQ) | ||
436 | return; | ||
437 | |||
438 | /* Never unmap priority interrupts */ | ||
439 | if (virq < NR_PRIORITY_IRQS) | ||
440 | return; | ||
441 | |||
442 | host = irq_map[virq].host; | ||
443 | if (WARN_ON(host == NULL)) | ||
444 | return; | ||
445 | |||
446 | irq_set_status_flags(virq, IRQ_NOREQUEST); | ||
447 | |||
448 | /* remove chip and handler */ | ||
449 | irq_set_chip_and_handler(virq, NULL, NULL); | ||
450 | |||
451 | /* Make sure it's completed */ | ||
452 | synchronize_irq(virq); | ||
453 | |||
454 | /* Tell the PIC about it */ | ||
455 | if (host->ops->unmap) | ||
456 | host->ops->unmap(host, virq); | ||
457 | smp_mb(); | ||
458 | |||
459 | /* Clear reverse map */ | ||
460 | hwirq = irq_map[virq].hwirq; | ||
461 | switch (host->revmap_type) { | ||
462 | case IRQ_HOST_MAP_LINEAR: | ||
463 | if (hwirq < host->revmap_data.linear.size) | ||
464 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | ||
465 | break; | ||
466 | case IRQ_HOST_MAP_TREE: | ||
467 | mutex_lock(&revmap_trees_mutex); | ||
468 | radix_tree_delete(&host->revmap_data.tree, hwirq); | ||
469 | mutex_unlock(&revmap_trees_mutex); | ||
470 | break; | ||
471 | } | ||
472 | |||
473 | /* Destroy map */ | ||
474 | smp_mb(); | ||
475 | irq_map[virq].hwirq = host->inval_irq; | ||
476 | |||
477 | irq_free_descs(virq, 1); | ||
478 | /* Free it */ | ||
479 | irq_free_virt(virq, 1); | ||
480 | } | ||
481 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | ||
482 | |||
483 | unsigned int irq_find_mapping(struct irq_host *host, | ||
484 | irq_hw_number_t hwirq) | ||
485 | { | ||
486 | unsigned int i; | ||
487 | unsigned int hint = hwirq % irq_virq_count; | ||
488 | |||
489 | /* Look for default host if nececssary */ | ||
490 | if (host == NULL) | ||
491 | host = irq_default_host; | ||
492 | if (host == NULL) | ||
493 | return NO_IRQ; | ||
494 | |||
495 | /* Slow path does a linear search of the map */ | ||
496 | i = hint; | ||
497 | do { | ||
498 | if (irq_map[i].host == host && | ||
499 | irq_map[i].hwirq == hwirq) | ||
500 | return i; | ||
501 | i++; | ||
502 | if (i >= irq_virq_count) | ||
503 | i = 4; | ||
504 | } while (i != hint); | ||
505 | return NO_IRQ; | ||
506 | } | ||
507 | EXPORT_SYMBOL_GPL(irq_find_mapping); | ||
508 | |||
509 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, | ||
510 | irq_hw_number_t hwirq) | ||
511 | { | ||
512 | struct irq_map_entry *ptr; | ||
513 | unsigned int virq; | ||
514 | |||
515 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) | ||
516 | return irq_find_mapping(host, hwirq); | ||
517 | |||
518 | /* | ||
519 | * The ptr returned references the static global irq_map. | ||
520 | * but freeing an irq can delete nodes along the path to | ||
521 | * do the lookup via call_rcu. | ||
522 | */ | ||
523 | rcu_read_lock(); | ||
524 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | ||
525 | rcu_read_unlock(); | ||
526 | |||
527 | /* | ||
528 | * If found in radix tree, then fine. | ||
529 | * Else fallback to linear lookup - this should not happen in practice | ||
530 | * as it means that we failed to insert the node in the radix tree. | ||
531 | */ | ||
532 | if (ptr) | ||
533 | virq = ptr - irq_map; | ||
534 | else | ||
535 | virq = irq_find_mapping(host, hwirq); | ||
536 | |||
537 | return virq; | ||
538 | } | ||
539 | |||
540 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | ||
541 | irq_hw_number_t hwirq) | ||
542 | { | ||
543 | if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) | ||
544 | return; | ||
545 | |||
546 | if (virq != NO_IRQ) { | ||
547 | mutex_lock(&revmap_trees_mutex); | ||
548 | radix_tree_insert(&host->revmap_data.tree, hwirq, | ||
549 | &irq_map[virq]); | ||
550 | mutex_unlock(&revmap_trees_mutex); | ||
551 | } | ||
552 | } | ||
553 | |||
554 | unsigned int irq_linear_revmap(struct irq_host *host, | ||
555 | irq_hw_number_t hwirq) | ||
556 | { | ||
557 | unsigned int *revmap; | ||
558 | |||
559 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) | ||
560 | return irq_find_mapping(host, hwirq); | ||
561 | |||
562 | /* Check revmap bounds */ | ||
563 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | ||
564 | return irq_find_mapping(host, hwirq); | ||
565 | |||
566 | /* Check if revmap was allocated */ | ||
567 | revmap = host->revmap_data.linear.revmap; | ||
568 | if (unlikely(revmap == NULL)) | ||
569 | return irq_find_mapping(host, hwirq); | ||
570 | |||
571 | /* Fill up revmap with slow path if no mapping found */ | ||
572 | if (unlikely(revmap[hwirq] == NO_IRQ)) | ||
573 | revmap[hwirq] = irq_find_mapping(host, hwirq); | ||
574 | |||
575 | return revmap[hwirq]; | ||
576 | } | ||
577 | |||
578 | unsigned int irq_alloc_virt(struct irq_host *host, | ||
579 | unsigned int count, | ||
580 | unsigned int hint) | ||
581 | { | ||
582 | unsigned long flags; | ||
583 | unsigned int i, j, found = NO_IRQ; | ||
584 | |||
585 | if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS)) | ||
586 | return NO_IRQ; | ||
587 | |||
588 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
589 | |||
590 | /* Use hint for 1 interrupt if any */ | ||
591 | if (count == 1 && hint >= NR_PRIORITY_IRQS && | ||
592 | hint < irq_virq_count && irq_map[hint].host == NULL) { | ||
593 | found = hint; | ||
594 | goto hint_found; | ||
595 | } | ||
596 | |||
597 | /* Look for count consecutive numbers in the allocatable | ||
598 | * (non-legacy) space | ||
599 | */ | ||
600 | for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) { | ||
601 | if (irq_map[i].host != NULL) | ||
602 | j = 0; | ||
603 | else | ||
604 | j++; | ||
605 | |||
606 | if (j == count) { | ||
607 | found = i - count + 1; | ||
608 | break; | ||
609 | } | ||
610 | } | ||
611 | if (found == NO_IRQ) { | ||
612 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
613 | return NO_IRQ; | ||
614 | } | ||
615 | hint_found: | ||
616 | for (i = found; i < (found + count); i++) { | ||
617 | irq_map[i].hwirq = host->inval_irq; | ||
618 | smp_wmb(); | ||
619 | irq_map[i].host = host; | ||
620 | } | ||
621 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
622 | return found; | ||
623 | } | ||
624 | |||
625 | void irq_free_virt(unsigned int virq, unsigned int count) | ||
626 | { | ||
627 | unsigned long flags; | ||
628 | unsigned int i; | ||
629 | |||
630 | WARN_ON(virq < NR_PRIORITY_IRQS); | ||
631 | WARN_ON(count == 0 || (virq + count) > irq_virq_count); | ||
632 | |||
633 | if (virq < NR_PRIORITY_IRQS) { | ||
634 | if (virq + count < NR_PRIORITY_IRQS) | ||
635 | return; | ||
636 | count -= NR_PRIORITY_IRQS - virq; | ||
637 | virq = NR_PRIORITY_IRQS; | ||
638 | } | ||
639 | |||
640 | if (count > irq_virq_count || virq > irq_virq_count - count) { | ||
641 | if (virq > irq_virq_count) | ||
642 | return; | ||
643 | count = irq_virq_count - virq; | ||
644 | } | ||
645 | |||
646 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
647 | for (i = virq; i < (virq + count); i++) { | ||
648 | struct irq_host *host; | ||
649 | |||
650 | host = irq_map[i].host; | ||
651 | irq_map[i].hwirq = host->inval_irq; | ||
652 | smp_wmb(); | ||
653 | irq_map[i].host = NULL; | ||
654 | } | ||
655 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
656 | } | ||
657 | |||
658 | #ifdef CONFIG_VIRQ_DEBUG | ||
659 | static int virq_debug_show(struct seq_file *m, void *private) | ||
660 | { | ||
661 | unsigned long flags; | ||
662 | struct irq_desc *desc; | ||
663 | const char *p; | ||
664 | static const char none[] = "none"; | ||
665 | void *data; | ||
666 | int i; | ||
667 | |||
668 | seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", | ||
669 | "chip name", "chip data", "host name"); | ||
670 | |||
671 | for (i = 1; i < nr_irqs; i++) { | ||
672 | desc = irq_to_desc(i); | ||
673 | if (!desc) | ||
674 | continue; | ||
675 | |||
676 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
677 | |||
678 | if (desc->action && desc->action->handler) { | ||
679 | struct irq_chip *chip; | ||
680 | |||
681 | seq_printf(m, "%5d ", i); | ||
682 | seq_printf(m, "0x%05lx ", irq_map[i].hwirq); | ||
683 | |||
684 | chip = irq_desc_get_chip(desc); | ||
685 | if (chip && chip->name) | ||
686 | p = chip->name; | ||
687 | else | ||
688 | p = none; | ||
689 | seq_printf(m, "%-15s ", p); | ||
690 | |||
691 | data = irq_desc_get_chip_data(desc); | ||
692 | seq_printf(m, "0x%16p ", data); | ||
693 | |||
694 | if (irq_map[i].host && irq_map[i].host->of_node) | ||
695 | p = irq_map[i].host->of_node->full_name; | ||
696 | else | ||
697 | p = none; | ||
698 | seq_printf(m, "%s\n", p); | ||
699 | } | ||
700 | |||
701 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
702 | } | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static int virq_debug_open(struct inode *inode, struct file *file) | ||
708 | { | ||
709 | return single_open(file, virq_debug_show, inode->i_private); | ||
710 | } | ||
711 | |||
712 | static const struct file_operations virq_debug_fops = { | ||
713 | .open = virq_debug_open, | ||
714 | .read = seq_read, | ||
715 | .llseek = seq_lseek, | ||
716 | .release = single_release, | ||
717 | }; | ||
718 | |||
719 | static int __init irq_debugfs_init(void) | ||
720 | { | ||
721 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | ||
722 | NULL, &virq_debug_fops) == NULL) | ||
723 | return -ENOMEM; | ||
724 | |||
725 | return 0; | ||
726 | } | ||
727 | device_initcall(irq_debugfs_init); | ||
728 | #endif /* CONFIG_VIRQ_DEBUG */ | ||
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c index 7c37a947fb1c..61f5863f1b1d 100644 --- a/arch/c6x/platforms/megamod-pic.c +++ b/arch/c6x/platforms/megamod-pic.c | |||
@@ -48,7 +48,7 @@ struct megamod_regs { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct megamod_pic { | 50 | struct megamod_pic { |
51 | struct irq_host *irqhost; | 51 | struct irq_domain *irqhost; |
52 | struct megamod_regs __iomem *regs; | 52 | struct megamod_regs __iomem *regs; |
53 | raw_spinlock_t lock; | 53 | raw_spinlock_t lock; |
54 | 54 | ||
@@ -116,7 +116,7 @@ static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc) | |||
116 | } | 116 | } |
117 | } | 117 | } |
118 | 118 | ||
119 | static int megamod_map(struct irq_host *h, unsigned int virq, | 119 | static int megamod_map(struct irq_domain *h, unsigned int virq, |
120 | irq_hw_number_t hw) | 120 | irq_hw_number_t hw) |
121 | { | 121 | { |
122 | struct megamod_pic *pic = h->host_data; | 122 | struct megamod_pic *pic = h->host_data; |
@@ -136,7 +136,7 @@ static int megamod_map(struct irq_host *h, unsigned int virq, | |||
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | static int megamod_xlate(struct irq_host *h, struct device_node *ct, | 139 | static int megamod_xlate(struct irq_domain *h, struct device_node *ct, |
140 | const u32 *intspec, unsigned int intsize, | 140 | const u32 *intspec, unsigned int intsize, |
141 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | 141 | irq_hw_number_t *out_hwirq, unsigned int *out_type) |
142 | 142 | ||
@@ -148,7 +148,7 @@ static int megamod_xlate(struct irq_host *h, struct device_node *ct, | |||
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
150 | 150 | ||
151 | static struct irq_host_ops megamod_host_ops = { | 151 | static struct irq_domain_ops megamod_domain_ops = { |
152 | .map = megamod_map, | 152 | .map = megamod_map, |
153 | .xlate = megamod_xlate, | 153 | .xlate = megamod_xlate, |
154 | }; | 154 | }; |
@@ -223,9 +223,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np) | |||
223 | return NULL; | 223 | return NULL; |
224 | } | 224 | } |
225 | 225 | ||
226 | pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, | 226 | pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32, |
227 | NR_COMBINERS * 32, &megamod_host_ops, | 227 | &megamod_domain_ops, pic); |
228 | IRQ_UNMAPPED); | ||
229 | if (!pic->irqhost) { | 228 | if (!pic->irqhost) { |
230 | pr_err("%s: Could not alloc host.\n", np->full_name); | 229 | pr_err("%s: Could not alloc host.\n", np->full_name); |
231 | goto error_free; | 230 | goto error_free; |