aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c92
1 files changed, 62 insertions, 30 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 704e488730a5..e1497481fe8a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -138,6 +138,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
138 return 0; 138 return 0;
139} 139}
140 140
141int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
142{
143 struct irq_desc *desc = irq_to_desc(irq);
144 unsigned long flags;
145
146 if (!desc)
147 return -EINVAL;
148
149 raw_spin_lock_irqsave(&desc->lock, flags);
150 desc->affinity_hint = m;
151 raw_spin_unlock_irqrestore(&desc->lock, flags);
152
153 return 0;
154}
155EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
156
141#ifndef CONFIG_AUTO_IRQ_AFFINITY 157#ifndef CONFIG_AUTO_IRQ_AFFINITY
142/* 158/*
143 * Generic version of the affinity autoselector. 159 * Generic version of the affinity autoselector.
@@ -440,6 +456,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
440 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
441 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
442 desc->status |= flags; 458 desc->status |= flags;
459
460 if (chip != desc->chip)
461 irq_chip_set_defaults(desc->chip);
443 } 462 }
444 463
445 return ret; 464 return ret;
@@ -757,16 +776,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
757 if (new->flags & IRQF_ONESHOT) 776 if (new->flags & IRQF_ONESHOT)
758 desc->status |= IRQ_ONESHOT; 777 desc->status |= IRQ_ONESHOT;
759 778
760 /*
761 * Force MSI interrupts to run with interrupts
762 * disabled. The multi vector cards can cause stack
763 * overflows due to nested interrupts when enough of
764 * them are directed to a core and fire at the same
765 * time.
766 */
767 if (desc->msi_desc)
768 new->flags |= IRQF_DISABLED;
769
770 if (!(desc->status & IRQ_NOAUTOEN)) { 779 if (!(desc->status & IRQ_NOAUTOEN)) {
771 desc->depth = 0; 780 desc->depth = 0;
772 desc->status &= ~IRQ_DISABLED; 781 desc->status &= ~IRQ_DISABLED;
@@ -916,6 +925,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
916 desc->chip->disable(irq); 925 desc->chip->disable(irq);
917 } 926 }
918 927
928#ifdef CONFIG_SMP
929 /* make sure affinity_hint is cleaned up */
930 if (WARN_ON_ONCE(desc->affinity_hint))
931 desc->affinity_hint = NULL;
932#endif
933
919 raw_spin_unlock_irqrestore(&desc->lock, flags); 934 raw_spin_unlock_irqrestore(&desc->lock, flags);
920 935
921 unregister_handler_proc(irq, action); 936 unregister_handler_proc(irq, action);
@@ -1027,7 +1042,6 @@ EXPORT_SYMBOL(free_irq);
1027 * Flags: 1042 * Flags:
1028 * 1043 *
1029 * IRQF_SHARED Interrupt is shared 1044 * IRQF_SHARED Interrupt is shared
1030 * IRQF_DISABLED Disable local interrupts while processing
1031 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 1045 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1032 * IRQF_TRIGGER_* Specify active edge(s) or level 1046 * IRQF_TRIGGER_* Specify active edge(s) or level
1033 * 1047 *
@@ -1041,25 +1055,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1041 int retval; 1055 int retval;
1042 1056
1043 /* 1057 /*
1044 * handle_IRQ_event() always ignores IRQF_DISABLED except for
1045 * the _first_ irqaction (sigh). That can cause oopsing, but
1046 * the behavior is classified as "will not fix" so we need to
1047 * start nudging drivers away from using that idiom.
1048 */
1049 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
1050 (IRQF_SHARED|IRQF_DISABLED)) {
1051 pr_warning(
1052 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
1053 irq, devname);
1054 }
1055
1056#ifdef CONFIG_LOCKDEP
1057 /*
1058 * Lockdep wants atomic interrupt handlers:
1059 */
1060 irqflags |= IRQF_DISABLED;
1061#endif
1062 /*
1063 * Sanity-check: shared interrupts must pass in a real dev-ID, 1058 * Sanity-check: shared interrupts must pass in a real dev-ID,
1064 * otherwise we'll have trouble later trying to figure out 1059 * otherwise we'll have trouble later trying to figure out
1065 * which interrupt is which (messes up the interrupt freeing 1060 * which interrupt is which (messes up the interrupt freeing
@@ -1120,3 +1115,40 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1120 return retval; 1115 return retval;
1121} 1116}
1122EXPORT_SYMBOL(request_threaded_irq); 1117EXPORT_SYMBOL(request_threaded_irq);
1118
1119/**
1120 * request_any_context_irq - allocate an interrupt line
1121 * @irq: Interrupt line to allocate
1122 * @handler: Function to be called when the IRQ occurs.
1123 * Threaded handler for threaded interrupts.
1124 * @flags: Interrupt type flags
1125 * @name: An ascii name for the claiming device
1126 * @dev_id: A cookie passed back to the handler function
1127 *
1128 * This call allocates interrupt resources and enables the
1129 * interrupt line and IRQ handling. It selects either a
1130 * hardirq or threaded handling method depending on the
1131 * context.
1132 *
1133 * On failure, it returns a negative value. On success,
1134 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1135 */
1136int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1137 unsigned long flags, const char *name, void *dev_id)
1138{
1139 struct irq_desc *desc = irq_to_desc(irq);
1140 int ret;
1141
1142 if (!desc)
1143 return -EINVAL;
1144
1145 if (desc->status & IRQ_NESTED_THREAD) {
1146 ret = request_threaded_irq(irq, NULL, handler,
1147 flags, name, dev_id);
1148 return !ret ? IRQC_IS_NESTED : ret;
1149 }
1150
1151 ret = request_irq(irq, handler, flags, name, dev_id);
1152 return !ret ? IRQC_IS_HARDIRQ : ret;
1153}
1154EXPORT_SYMBOL_GPL(request_any_context_irq);