diff options
Diffstat (limited to 'kernel/irq/irqdesc.c')
-rw-r--r-- | kernel/irq/irqdesc.c | 224 |
1 files changed, 206 insertions, 18 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index a623b44f2d4b..00bb0aeea1d0 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/radix-tree.h> | 15 | #include <linux/radix-tree.h> |
16 | #include <linux/bitmap.h> | 16 | #include <linux/bitmap.h> |
17 | #include <linux/irqdomain.h> | 17 | #include <linux/irqdomain.h> |
18 | #include <linux/sysfs.h> | ||
18 | 19 | ||
19 | #include "internals.h" | 20 | #include "internals.h" |
20 | 21 | ||
@@ -123,6 +124,181 @@ static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); | |||
123 | 124 | ||
124 | #ifdef CONFIG_SPARSE_IRQ | 125 | #ifdef CONFIG_SPARSE_IRQ |
125 | 126 | ||
127 | static void irq_kobj_release(struct kobject *kobj); | ||
128 | |||
129 | #ifdef CONFIG_SYSFS | ||
130 | static struct kobject *irq_kobj_base; | ||
131 | |||
132 | #define IRQ_ATTR_RO(_name) \ | ||
133 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | ||
134 | |||
135 | static ssize_t per_cpu_count_show(struct kobject *kobj, | ||
136 | struct kobj_attribute *attr, char *buf) | ||
137 | { | ||
138 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||
139 | int cpu, irq = desc->irq_data.irq; | ||
140 | ssize_t ret = 0; | ||
141 | char *p = ""; | ||
142 | |||
143 | for_each_possible_cpu(cpu) { | ||
144 | unsigned int c = kstat_irqs_cpu(irq, cpu); | ||
145 | |||
146 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); | ||
147 | p = ","; | ||
148 | } | ||
149 | |||
150 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | ||
151 | return ret; | ||
152 | } | ||
153 | IRQ_ATTR_RO(per_cpu_count); | ||
154 | |||
155 | static ssize_t chip_name_show(struct kobject *kobj, | ||
156 | struct kobj_attribute *attr, char *buf) | ||
157 | { | ||
158 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||
159 | ssize_t ret = 0; | ||
160 | |||
161 | raw_spin_lock_irq(&desc->lock); | ||
162 | if (desc->irq_data.chip && desc->irq_data.chip->name) { | ||
163 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", | ||
164 | desc->irq_data.chip->name); | ||
165 | } | ||
166 | raw_spin_unlock_irq(&desc->lock); | ||
167 | |||
168 | return ret; | ||
169 | } | ||
170 | IRQ_ATTR_RO(chip_name); | ||
171 | |||
172 | static ssize_t hwirq_show(struct kobject *kobj, | ||
173 | struct kobj_attribute *attr, char *buf) | ||
174 | { | ||
175 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||
176 | ssize_t ret = 0; | ||
177 | |||
178 | raw_spin_lock_irq(&desc->lock); | ||
179 | if (desc->irq_data.domain) | ||
180 | ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); | ||
181 | raw_spin_unlock_irq(&desc->lock); | ||
182 | |||
183 | return ret; | ||
184 | } | ||
185 | IRQ_ATTR_RO(hwirq); | ||
186 | |||
187 | static ssize_t type_show(struct kobject *kobj, | ||
188 | struct kobj_attribute *attr, char *buf) | ||
189 | { | ||
190 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||
191 | ssize_t ret = 0; | ||
192 | |||
193 | raw_spin_lock_irq(&desc->lock); | ||
194 | ret = sprintf(buf, "%s\n", | ||
195 | irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | ||
196 | raw_spin_unlock_irq(&desc->lock); | ||
197 | |||
198 | return ret; | ||
199 | |||
200 | } | ||
201 | IRQ_ATTR_RO(type); | ||
202 | |||
203 | static ssize_t name_show(struct kobject *kobj, | ||
204 | struct kobj_attribute *attr, char *buf) | ||
205 | { | ||
206 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||
207 | ssize_t ret = 0; | ||
208 | |||
209 | raw_spin_lock_irq(&desc->lock); | ||
210 | if (desc->name) | ||
211 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); | ||
212 | raw_spin_unlock_irq(&desc->lock); | ||
213 | |||
214 | return ret; | ||
215 | } | ||
216 | IRQ_ATTR_RO(name); | ||
217 | |||
218 | static ssize_t actions_show(struct kobject *kobj, | ||
219 | struct kobj_attribute *attr, char *buf) | ||
220 | { | ||
221 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | ||
222 | struct irqaction *action; | ||
223 | ssize_t ret = 0; | ||
224 | char *p = ""; | ||
225 | |||
226 | raw_spin_lock_irq(&desc->lock); | ||
227 | for (action = desc->action; action != NULL; action = action->next) { | ||
228 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", | ||
229 | p, action->name); | ||
230 | p = ","; | ||
231 | } | ||
232 | raw_spin_unlock_irq(&desc->lock); | ||
233 | |||
234 | if (ret) | ||
235 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | ||
236 | |||
237 | return ret; | ||
238 | } | ||
239 | IRQ_ATTR_RO(actions); | ||
240 | |||
241 | static struct attribute *irq_attrs[] = { | ||
242 | &per_cpu_count_attr.attr, | ||
243 | &chip_name_attr.attr, | ||
244 | &hwirq_attr.attr, | ||
245 | &type_attr.attr, | ||
246 | &name_attr.attr, | ||
247 | &actions_attr.attr, | ||
248 | NULL | ||
249 | }; | ||
250 | |||
251 | static struct kobj_type irq_kobj_type = { | ||
252 | .release = irq_kobj_release, | ||
253 | .sysfs_ops = &kobj_sysfs_ops, | ||
254 | .default_attrs = irq_attrs, | ||
255 | }; | ||
256 | |||
257 | static void irq_sysfs_add(int irq, struct irq_desc *desc) | ||
258 | { | ||
259 | if (irq_kobj_base) { | ||
260 | /* | ||
261 | * Continue even in case of failure as this is nothing | ||
262 | * crucial. | ||
263 | */ | ||
264 | if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) | ||
265 | pr_warn("Failed to add kobject for irq %d\n", irq); | ||
266 | } | ||
267 | } | ||
268 | |||
269 | static int __init irq_sysfs_init(void) | ||
270 | { | ||
271 | struct irq_desc *desc; | ||
272 | int irq; | ||
273 | |||
274 | /* Prevent concurrent irq alloc/free */ | ||
275 | irq_lock_sparse(); | ||
276 | |||
277 | irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); | ||
278 | if (!irq_kobj_base) { | ||
279 | irq_unlock_sparse(); | ||
280 | return -ENOMEM; | ||
281 | } | ||
282 | |||
283 | /* Add the already allocated interrupts */ | ||
284 | for_each_irq_desc(irq, desc) | ||
285 | irq_sysfs_add(irq, desc); | ||
286 | irq_unlock_sparse(); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | postcore_initcall(irq_sysfs_init); | ||
291 | |||
292 | #else /* !CONFIG_SYSFS */ | ||
293 | |||
294 | static struct kobj_type irq_kobj_type = { | ||
295 | .release = irq_kobj_release, | ||
296 | }; | ||
297 | |||
298 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} | ||
299 | |||
300 | #endif /* CONFIG_SYSFS */ | ||
301 | |||
126 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); | 302 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
127 | 303 | ||
128 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 304 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
@@ -187,6 +363,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, | |||
187 | 363 | ||
188 | desc_set_defaults(irq, desc, node, affinity, owner); | 364 | desc_set_defaults(irq, desc, node, affinity, owner); |
189 | irqd_set(&desc->irq_data, flags); | 365 | irqd_set(&desc->irq_data, flags); |
366 | kobject_init(&desc->kobj, &irq_kobj_type); | ||
190 | 367 | ||
191 | return desc; | 368 | return desc; |
192 | 369 | ||
@@ -197,15 +374,22 @@ err_desc: | |||
197 | return NULL; | 374 | return NULL; |
198 | } | 375 | } |
199 | 376 | ||
200 | static void delayed_free_desc(struct rcu_head *rhp) | 377 | static void irq_kobj_release(struct kobject *kobj) |
201 | { | 378 | { |
202 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); | 379 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
203 | 380 | ||
204 | free_masks(desc); | 381 | free_masks(desc); |
205 | free_percpu(desc->kstat_irqs); | 382 | free_percpu(desc->kstat_irqs); |
206 | kfree(desc); | 383 | kfree(desc); |
207 | } | 384 | } |
208 | 385 | ||
386 | static void delayed_free_desc(struct rcu_head *rhp) | ||
387 | { | ||
388 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); | ||
389 | |||
390 | kobject_put(&desc->kobj); | ||
391 | } | ||
392 | |||
209 | static void free_desc(unsigned int irq) | 393 | static void free_desc(unsigned int irq) |
210 | { | 394 | { |
211 | struct irq_desc *desc = irq_to_desc(irq); | 395 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -217,8 +401,12 @@ static void free_desc(unsigned int irq) | |||
217 | * kstat_irq_usr(). Once we deleted the descriptor from the | 401 | * kstat_irq_usr(). Once we deleted the descriptor from the |
218 | * sparse tree we can free it. Access in proc will fail to | 402 | * sparse tree we can free it. Access in proc will fail to |
219 | * lookup the descriptor. | 403 | * lookup the descriptor. |
404 | * | ||
405 | * The sysfs entry must be serialized against a concurrent | ||
406 | * irq_sysfs_init() as well. | ||
220 | */ | 407 | */ |
221 | mutex_lock(&sparse_irq_lock); | 408 | mutex_lock(&sparse_irq_lock); |
409 | kobject_del(&desc->kobj); | ||
222 | delete_irq_desc(irq); | 410 | delete_irq_desc(irq); |
223 | mutex_unlock(&sparse_irq_lock); | 411 | mutex_unlock(&sparse_irq_lock); |
224 | 412 | ||
@@ -236,31 +424,31 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node, | |||
236 | const struct cpumask *mask = NULL; | 424 | const struct cpumask *mask = NULL; |
237 | struct irq_desc *desc; | 425 | struct irq_desc *desc; |
238 | unsigned int flags; | 426 | unsigned int flags; |
239 | int i, cpu = -1; | 427 | int i; |
240 | 428 | ||
241 | if (affinity && cpumask_empty(affinity)) | 429 | /* Validate affinity mask(s) */ |
242 | return -EINVAL; | 430 | if (affinity) { |
431 | for (i = 0, mask = affinity; i < cnt; i++, mask++) { | ||
432 | if (cpumask_empty(mask)) | ||
433 | return -EINVAL; | ||
434 | } | ||
435 | } | ||
243 | 436 | ||
244 | flags = affinity ? IRQD_AFFINITY_MANAGED : 0; | 437 | flags = affinity ? IRQD_AFFINITY_MANAGED : 0; |
438 | mask = NULL; | ||
245 | 439 | ||
246 | for (i = 0; i < cnt; i++) { | 440 | for (i = 0; i < cnt; i++) { |
247 | if (affinity) { | 441 | if (affinity) { |
248 | cpu = cpumask_next(cpu, affinity); | 442 | node = cpu_to_node(cpumask_first(affinity)); |
249 | if (cpu >= nr_cpu_ids) | 443 | mask = affinity; |
250 | cpu = cpumask_first(affinity); | 444 | affinity++; |
251 | node = cpu_to_node(cpu); | ||
252 | |||
253 | /* | ||
254 | * For single allocations we use the caller provided | ||
255 | * mask otherwise we use the mask of the target cpu | ||
256 | */ | ||
257 | mask = cnt == 1 ? affinity : cpumask_of(cpu); | ||
258 | } | 445 | } |
259 | desc = alloc_desc(start + i, node, flags, mask, owner); | 446 | desc = alloc_desc(start + i, node, flags, mask, owner); |
260 | if (!desc) | 447 | if (!desc) |
261 | goto err; | 448 | goto err; |
262 | mutex_lock(&sparse_irq_lock); | 449 | mutex_lock(&sparse_irq_lock); |
263 | irq_insert_desc(start + i, desc); | 450 | irq_insert_desc(start + i, desc); |
451 | irq_sysfs_add(start + i, desc); | ||
264 | mutex_unlock(&sparse_irq_lock); | 452 | mutex_unlock(&sparse_irq_lock); |
265 | } | 453 | } |
266 | return start; | 454 | return start; |
@@ -481,9 +669,9 @@ EXPORT_SYMBOL_GPL(irq_free_descs); | |||
481 | * @cnt: Number of consecutive irqs to allocate. | 669 | * @cnt: Number of consecutive irqs to allocate. |
482 | * @node: Preferred node on which the irq descriptor should be allocated | 670 | * @node: Preferred node on which the irq descriptor should be allocated |
483 | * @owner: Owning module (can be NULL) | 671 | * @owner: Owning module (can be NULL) |
484 | * @affinity: Optional pointer to an affinity mask which hints where the | 672 | * @affinity: Optional pointer to an affinity mask array of size @cnt which |
485 | * irq descriptors should be allocated and which default | 673 | * hints where the irq descriptors should be allocated and which |
486 | * affinities to use | 674 | * default affinities to use |
487 | * | 675 | * |
488 | * Returns the first irq number or error code | 676 | * Returns the first irq number or error code |
489 | */ | 677 | */ |