aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-08-18 05:22:27 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-08-18 05:22:27 -0400
commit6629695465ee6eb9f4afab74f1531a89692a136e (patch)
tree05b2ea052bdec2051beb0102e0c43d8046c6baa2
parent500912121411e0175d44b69a7810ac6068e78326 (diff)
parent495c38d3001fd226cf91df1d031320f349bcaf35 (diff)
Merge branch 'irq/for-gpio' into irq/core
Merge the flow handlers and irq domain extensions which are in a separate branch so they can be consumed by the gpio folks.
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/chip.c109
-rw-r--r--kernel/irq/irqdomain.c230
5 files changed, 318 insertions, 30 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index dcfac6c8ba18..b99a784635ff 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
568extern int irq_chip_pm_get(struct irq_data *data); 568extern int irq_chip_pm_get(struct irq_data *data);
569extern int irq_chip_pm_put(struct irq_data *data); 569extern int irq_chip_pm_put(struct irq_data *data);
570#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 570#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
571extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
572extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
571extern void irq_chip_enable_parent(struct irq_data *data); 573extern void irq_chip_enable_parent(struct irq_data *data);
572extern void irq_chip_disable_parent(struct irq_data *data); 574extern void irq_chip_disable_parent(struct irq_data *data);
573extern void irq_chip_ack_parent(struct irq_data *data); 575extern void irq_chip_ack_parent(struct irq_data *data);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index cac77a5c5555..2318f29054af 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
460extern void irq_domain_free_irqs_top(struct irq_domain *domain, 460extern void irq_domain_free_irqs_top(struct irq_domain *domain,
461 unsigned int virq, unsigned int nr_irqs); 461 unsigned int virq, unsigned int nr_irqs);
462 462
463extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
464extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
465
463extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, 466extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
464 unsigned int irq_base, 467 unsigned int irq_base,
465 unsigned int nr_irqs, void *arg); 468 unsigned int nr_irqs, void *arg);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 1d06af787932..a117adf7084b 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -73,6 +73,10 @@ config IRQ_DOMAIN_HIERARCHY
73 bool 73 bool
74 select IRQ_DOMAIN 74 select IRQ_DOMAIN
75 75
76# Support for hierarchical fasteoi+edge and fasteoi+level handlers
77config IRQ_FASTEOI_HIERARCHY_HANDLERS
78 bool
79
76# Generic IRQ IPI support 80# Generic IRQ IPI support
77config GENERIC_IRQ_IPI 81config GENERIC_IRQ_IPI
78 bool 82 bool
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a3cc37c0c85e..23958980189d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1092,6 +1092,112 @@ void irq_cpu_offline(void)
1092} 1092}
1093 1093
1094#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1094#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1095
1096#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1097/**
1098 * handle_fasteoi_ack_irq - irq handler for edge hierarchy
1099 * stacked on transparent controllers
1100 *
1101 * @desc: the interrupt description structure for this irq
1102 *
1103 * Like handle_fasteoi_irq(), but for use with hierarchy where
1104 * the irq_chip also needs to have its ->irq_ack() function
1105 * called.
1106 */
1107void handle_fasteoi_ack_irq(struct irq_desc *desc)
1108{
1109 struct irq_chip *chip = desc->irq_data.chip;
1110
1111 raw_spin_lock(&desc->lock);
1112
1113 if (!irq_may_run(desc))
1114 goto out;
1115
1116 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1117
1118 /*
1119 * If its disabled or no action available
1120 * then mask it and get out of here:
1121 */
1122 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1123 desc->istate |= IRQS_PENDING;
1124 mask_irq(desc);
1125 goto out;
1126 }
1127
1128 kstat_incr_irqs_this_cpu(desc);
1129 if (desc->istate & IRQS_ONESHOT)
1130 mask_irq(desc);
1131
1132 /* Start handling the irq */
1133 desc->irq_data.chip->irq_ack(&desc->irq_data);
1134
1135 preflow_handler(desc);
1136 handle_irq_event(desc);
1137
1138 cond_unmask_eoi_irq(desc, chip);
1139
1140 raw_spin_unlock(&desc->lock);
1141 return;
1142out:
1143 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1144 chip->irq_eoi(&desc->irq_data);
1145 raw_spin_unlock(&desc->lock);
1146}
1147EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1148
1149/**
1150 * handle_fasteoi_mask_irq - irq handler for level hierarchy
1151 * stacked on transparent controllers
1152 *
1153 * @desc: the interrupt description structure for this irq
1154 *
1155 * Like handle_fasteoi_irq(), but for use with hierarchy where
1156 * the irq_chip also needs to have its ->irq_mask_ack() function
1157 * called.
1158 */
1159void handle_fasteoi_mask_irq(struct irq_desc *desc)
1160{
1161 struct irq_chip *chip = desc->irq_data.chip;
1162
1163 raw_spin_lock(&desc->lock);
1164 mask_ack_irq(desc);
1165
1166 if (!irq_may_run(desc))
1167 goto out;
1168
1169 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1170
1171 /*
1172 * If its disabled or no action available
1173 * then mask it and get out of here:
1174 */
1175 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1176 desc->istate |= IRQS_PENDING;
1177 mask_irq(desc);
1178 goto out;
1179 }
1180
1181 kstat_incr_irqs_this_cpu(desc);
1182 if (desc->istate & IRQS_ONESHOT)
1183 mask_irq(desc);
1184
1185 preflow_handler(desc);
1186 handle_irq_event(desc);
1187
1188 cond_unmask_eoi_irq(desc, chip);
1189
1190 raw_spin_unlock(&desc->lock);
1191 return;
1192out:
1193 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1194 chip->irq_eoi(&desc->irq_data);
1195 raw_spin_unlock(&desc->lock);
1196}
1197EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1198
1199#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1200
1095/** 1201/**
1096 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1202 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1097 * NULL) 1203 * NULL)
@@ -1105,6 +1211,7 @@ void irq_chip_enable_parent(struct irq_data *data)
1105 else 1211 else
1106 data->chip->irq_unmask(data); 1212 data->chip->irq_unmask(data);
1107} 1213}
1214EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1108 1215
1109/** 1216/**
1110 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1217 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
@@ -1119,6 +1226,7 @@ void irq_chip_disable_parent(struct irq_data *data)
1119 else 1226 else
1120 data->chip->irq_mask(data); 1227 data->chip->irq_mask(data);
1121} 1228}
1229EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1122 1230
1123/** 1231/**
1124 * irq_chip_ack_parent - Acknowledge the parent interrupt 1232 * irq_chip_ack_parent - Acknowledge the parent interrupt
@@ -1181,6 +1289,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
1181 1289
1182 return -ENOSYS; 1290 return -ENOSYS;
1183} 1291}
1292EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1184 1293
1185/** 1294/**
1186 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1295 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index f1f251479aa6..1ff9912211e9 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain)
455} 455}
456EXPORT_SYMBOL_GPL(irq_set_default_host); 456EXPORT_SYMBOL_GPL(irq_set_default_host);
457 457
458static void irq_domain_clear_mapping(struct irq_domain *domain,
459 irq_hw_number_t hwirq)
460{
461 if (hwirq < domain->revmap_size) {
462 domain->linear_revmap[hwirq] = 0;
463 } else {
464 mutex_lock(&revmap_trees_mutex);
465 radix_tree_delete(&domain->revmap_tree, hwirq);
466 mutex_unlock(&revmap_trees_mutex);
467 }
468}
469
470static void irq_domain_set_mapping(struct irq_domain *domain,
471 irq_hw_number_t hwirq,
472 struct irq_data *irq_data)
473{
474 if (hwirq < domain->revmap_size) {
475 domain->linear_revmap[hwirq] = irq_data->irq;
476 } else {
477 mutex_lock(&revmap_trees_mutex);
478 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
479 mutex_unlock(&revmap_trees_mutex);
480 }
481}
482
458void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 483void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
459{ 484{
460 struct irq_data *irq_data = irq_get_irq_data(irq); 485 struct irq_data *irq_data = irq_get_irq_data(irq);
@@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
483 domain->mapcount--; 508 domain->mapcount--;
484 509
485 /* Clear reverse map for this hwirq */ 510 /* Clear reverse map for this hwirq */
486 if (hwirq < domain->revmap_size) { 511 irq_domain_clear_mapping(domain, hwirq);
487 domain->linear_revmap[hwirq] = 0;
488 } else {
489 mutex_lock(&revmap_trees_mutex);
490 radix_tree_delete(&domain->revmap_tree, hwirq);
491 mutex_unlock(&revmap_trees_mutex);
492 }
493} 512}
494 513
495int irq_domain_associate(struct irq_domain *domain, unsigned int virq, 514int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
@@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
533 } 552 }
534 553
535 domain->mapcount++; 554 domain->mapcount++;
536 if (hwirq < domain->revmap_size) { 555 irq_domain_set_mapping(domain, hwirq, irq_data);
537 domain->linear_revmap[hwirq] = virq;
538 } else {
539 mutex_lock(&revmap_trees_mutex);
540 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
541 mutex_unlock(&revmap_trees_mutex);
542 }
543 mutex_unlock(&irq_domain_mutex); 556 mutex_unlock(&irq_domain_mutex);
544 557
545 irq_clear_status_flags(virq, IRQ_NOREQUEST); 558 irq_clear_status_flags(virq, IRQ_NOREQUEST);
@@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq)
1138 1151
1139 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 1152 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1140 struct irq_domain *domain = data->domain; 1153 struct irq_domain *domain = data->domain;
1141 irq_hw_number_t hwirq = data->hwirq;
1142 1154
1143 domain->mapcount++; 1155 domain->mapcount++;
1144 if (hwirq < domain->revmap_size) { 1156 irq_domain_set_mapping(domain, data->hwirq, data);
1145 domain->linear_revmap[hwirq] = virq;
1146 } else {
1147 mutex_lock(&revmap_trees_mutex);
1148 radix_tree_insert(&domain->revmap_tree, hwirq, data);
1149 mutex_unlock(&revmap_trees_mutex);
1150 }
1151 1157
1152 /* If not already assigned, give the domain the chip's name */ 1158 /* If not already assigned, give the domain the chip's name */
1153 if (!domain->name && data->chip) 1159 if (!domain->name && data->chip)
@@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq)
1171 irq_hw_number_t hwirq = data->hwirq; 1177 irq_hw_number_t hwirq = data->hwirq;
1172 1178
1173 domain->mapcount--; 1179 domain->mapcount--;
1174 if (hwirq < domain->revmap_size) { 1180 irq_domain_clear_mapping(domain, hwirq);
1175 domain->linear_revmap[hwirq] = 0;
1176 } else {
1177 mutex_lock(&revmap_trees_mutex);
1178 radix_tree_delete(&domain->revmap_tree, hwirq);
1179 mutex_unlock(&revmap_trees_mutex);
1180 }
1181 } 1181 }
1182} 1182}
1183 1183
@@ -1362,7 +1362,8 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
1362 unsigned int irq_base, 1362 unsigned int irq_base,
1363 unsigned int nr_irqs) 1363 unsigned int nr_irqs)
1364{ 1364{
1365 domain->ops->free(domain, irq_base, nr_irqs); 1365 if (domain->ops->free)
1366 domain->ops->free(domain, irq_base, nr_irqs);
1366} 1367}
1367 1368
1368int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, 1369int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
@@ -1448,6 +1449,175 @@ out_free_desc:
1448 return ret; 1449 return ret;
1449} 1450}
1450 1451
1452/* The irq_data was moved, fix the revmap to refer to the new location */
1453static void irq_domain_fix_revmap(struct irq_data *d)
1454{
1455 void **slot;
1456
1457 if (d->hwirq < d->domain->revmap_size)
1458 return; /* Not using radix tree. */
1459
1460 /* Fix up the revmap. */
1461 mutex_lock(&revmap_trees_mutex);
1462 slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1463 if (slot)
1464 radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
1465 mutex_unlock(&revmap_trees_mutex);
1466}
1467
1468/**
1469 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1470 * @domain: Domain to push.
1471 * @virq: Irq to push the domain in to.
1472 * @arg: Passed to the irq_domain_ops alloc() function.
1473 *
1474 * For an already existing irqdomain hierarchy, as might be obtained
1475 * via a call to pci_enable_msix(), add an additional domain to the
1476 * head of the processing chain. Must be called before request_irq()
1477 * has been called.
1478 */
1479int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1480{
1481 struct irq_data *child_irq_data;
1482 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1483 struct irq_desc *desc;
1484 int rv = 0;
1485
1486 /*
1487 * Check that no action has been set, which indicates the virq
1488 * is in a state where this function doesn't have to deal with
1489 * races between interrupt handling and maintaining the
1490 * hierarchy. This will catch gross misuse. Attempting to
1491 * make the check race free would require holding locks across
1492 * calls to struct irq_domain_ops->alloc(), which could lead
1493 * to deadlock, so we just do a simple check before starting.
1494 */
1495 desc = irq_to_desc(virq);
1496 if (!desc)
1497 return -EINVAL;
1498 if (WARN_ON(desc->action))
1499 return -EBUSY;
1500
1501 if (domain == NULL)
1502 return -EINVAL;
1503
1504 if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1505 return -EINVAL;
1506
1507 if (domain->parent != root_irq_data->domain)
1508 return -EINVAL;
1509
1510 if (!root_irq_data)
1511 return -EINVAL;
1512
1513 child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1514 irq_data_get_node(root_irq_data));
1515 if (!child_irq_data)
1516 return -ENOMEM;
1517
1518 mutex_lock(&irq_domain_mutex);
1519
1520 /* Copy the original irq_data. */
1521 *child_irq_data = *root_irq_data;
1522
1523 /*
1524 * Overwrite the root_irq_data, which is embedded in struct
1525 * irq_desc, with values for this domain.
1526 */
1527 root_irq_data->parent_data = child_irq_data;
1528 root_irq_data->domain = domain;
1529 root_irq_data->mask = 0;
1530 root_irq_data->hwirq = 0;
1531 root_irq_data->chip = NULL;
1532 root_irq_data->chip_data = NULL;
1533
1534 /* May (probably does) set hwirq, chip, etc. */
1535 rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1536 if (rv) {
1537 /* Restore the original irq_data. */
1538 *root_irq_data = *child_irq_data;
1539 goto error;
1540 }
1541
1542 irq_domain_fix_revmap(child_irq_data);
1543 irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1544
1545error:
1546 mutex_unlock(&irq_domain_mutex);
1547
1548 return rv;
1549}
1550EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1551
1552/**
1553 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1554 * @domain: Domain to remove.
1555 * @virq: Irq to remove the domain from.
1556 *
1557 * Undo the effects of a call to irq_domain_push_irq(). Must be
1558 * called either before request_irq() or after free_irq().
1559 */
1560int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1561{
1562 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1563 struct irq_data *child_irq_data;
1564 struct irq_data *tmp_irq_data;
1565 struct irq_desc *desc;
1566
1567 /*
1568 * Check that no action is set, which indicates the virq is in
1569 * a state where this function doesn't have to deal with races
1570 * between interrupt handling and maintaining the hierarchy.
1571 * This will catch gross misuse. Attempting to make the check
1572 * race free would require holding locks across calls to
1573 * struct irq_domain_ops->free(), which could lead to
1574 * deadlock, so we just do a simple check before starting.
1575 */
1576 desc = irq_to_desc(virq);
1577 if (!desc)
1578 return -EINVAL;
1579 if (WARN_ON(desc->action))
1580 return -EBUSY;
1581
1582 if (domain == NULL)
1583 return -EINVAL;
1584
1585 if (!root_irq_data)
1586 return -EINVAL;
1587
1588 tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1589
1590 /* We can only "pop" if this domain is at the top of the list */
1591 if (WARN_ON(root_irq_data != tmp_irq_data))
1592 return -EINVAL;
1593
1594 if (WARN_ON(root_irq_data->domain != domain))
1595 return -EINVAL;
1596
1597 child_irq_data = root_irq_data->parent_data;
1598 if (WARN_ON(!child_irq_data))
1599 return -EINVAL;
1600
1601 mutex_lock(&irq_domain_mutex);
1602
1603 root_irq_data->parent_data = NULL;
1604
1605 irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1606 irq_domain_free_irqs_hierarchy(domain, virq, 1);
1607
1608 /* Restore the original irq_data. */
1609 *root_irq_data = *child_irq_data;
1610
1611 irq_domain_fix_revmap(root_irq_data);
1612
1613 mutex_unlock(&irq_domain_mutex);
1614
1615 kfree(child_irq_data);
1616
1617 return 0;
1618}
1619EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1620
1451/** 1621/**
1452 * irq_domain_free_irqs - Free IRQ number and associated data structures 1622 * irq_domain_free_irqs - Free IRQ number and associated data structures
1453 * @virq: base IRQ number 1623 * @virq: base IRQ number