aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:15 -0400
committerJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:59 -0400
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /kernel/irq/manage.c
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c103
1 files changed, 53 insertions, 50 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a2aa73e536c..07c1611f3899 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -41,7 +41,7 @@ early_param("threadirqs", setup_forced_irqthreads);
41void synchronize_irq(unsigned int irq) 41void synchronize_irq(unsigned int irq)
42{ 42{
43 struct irq_desc *desc = irq_to_desc(irq); 43 struct irq_desc *desc = irq_to_desc(irq);
44 unsigned int state; 44 bool inprogress;
45 45
46 if (!desc) 46 if (!desc)
47 return; 47 return;
@@ -53,16 +53,16 @@ void synchronize_irq(unsigned int irq)
53 * Wait until we're out of the critical section. This might 53 * Wait until we're out of the critical section. This might
54 * give the wrong answer due to the lack of memory barriers. 54 * give the wrong answer due to the lack of memory barriers.
55 */ 55 */
56 while (desc->istate & IRQS_INPROGRESS) 56 while (irqd_irq_inprogress(&desc->irq_data))
57 cpu_relax(); 57 cpu_relax();
58 58
59 /* Ok, that indicated we're done: double-check carefully. */ 59 /* Ok, that indicated we're done: double-check carefully. */
60 raw_spin_lock_irqsave(&desc->lock, flags); 60 raw_spin_lock_irqsave(&desc->lock, flags);
61 state = desc->istate; 61 inprogress = irqd_irq_inprogress(&desc->irq_data);
62 raw_spin_unlock_irqrestore(&desc->lock, flags); 62 raw_spin_unlock_irqrestore(&desc->lock, flags);
63 63
64 /* Oops, that failed? */ 64 /* Oops, that failed? */
65 } while (state & IRQS_INPROGRESS); 65 } while (inprogress);
66 66
67 /* 67 /*
68 * We made sure that no hardirq handler is running. Now verify 68 * We made sure that no hardirq handler is running. Now verify
@@ -112,13 +112,13 @@ void irq_set_thread_affinity(struct irq_desc *desc)
112} 112}
113 113
114#ifdef CONFIG_GENERIC_PENDING_IRQ 114#ifdef CONFIG_GENERIC_PENDING_IRQ
115static inline bool irq_can_move_pcntxt(struct irq_desc *desc) 115static inline bool irq_can_move_pcntxt(struct irq_data *data)
116{ 116{
117 return irq_settings_can_move_pcntxt(desc); 117 return irqd_can_move_in_process_context(data);
118} 118}
119static inline bool irq_move_pending(struct irq_desc *desc) 119static inline bool irq_move_pending(struct irq_data *data)
120{ 120{
121 return irqd_is_setaffinity_pending(&desc->irq_data); 121 return irqd_is_setaffinity_pending(data);
122} 122}
123static inline void 123static inline void
124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) 124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
@@ -131,43 +131,34 @@ irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
131 cpumask_copy(mask, desc->pending_mask); 131 cpumask_copy(mask, desc->pending_mask);
132} 132}
133#else 133#else
134static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } 134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135static inline bool irq_move_pending(struct irq_desc *desc) { return false; } 135static inline bool irq_move_pending(struct irq_data *data) { return false; }
136static inline void 136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void 138static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif 140#endif
141 141
142/** 142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143 * irq_set_affinity - Set the irq affinity of a given irq
144 * @irq: Interrupt to set affinity
145 * @cpumask: cpumask
146 *
147 */
148int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
149{ 143{
150 struct irq_desc *desc = irq_to_desc(irq); 144 struct irq_chip *chip = irq_data_get_irq_chip(data);
151 struct irq_chip *chip = desc->irq_data.chip; 145 struct irq_desc *desc = irq_data_to_desc(data);
152 unsigned long flags;
153 int ret = 0; 146 int ret = 0;
154 147
155 if (!chip->irq_set_affinity) 148 if (!chip || !chip->irq_set_affinity)
156 return -EINVAL; 149 return -EINVAL;
157 150
158 raw_spin_lock_irqsave(&desc->lock, flags); 151 if (irq_can_move_pcntxt(data)) {
159 152 ret = chip->irq_set_affinity(data, mask, false);
160 if (irq_can_move_pcntxt(desc)) {
161 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
162 switch (ret) { 153 switch (ret) {
163 case IRQ_SET_MASK_OK: 154 case IRQ_SET_MASK_OK:
164 cpumask_copy(desc->irq_data.affinity, mask); 155 cpumask_copy(data->affinity, mask);
165 case IRQ_SET_MASK_OK_NOCOPY: 156 case IRQ_SET_MASK_OK_NOCOPY:
166 irq_set_thread_affinity(desc); 157 irq_set_thread_affinity(desc);
167 ret = 0; 158 ret = 0;
168 } 159 }
169 } else { 160 } else {
170 irqd_set_move_pending(&desc->irq_data); 161 irqd_set_move_pending(data);
171 irq_copy_pending(desc, mask); 162 irq_copy_pending(desc, mask);
172 } 163 }
173 164
@@ -175,8 +166,28 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
175 kref_get(&desc->affinity_notify->kref); 166 kref_get(&desc->affinity_notify->kref);
176 schedule_work(&desc->affinity_notify->work); 167 schedule_work(&desc->affinity_notify->work);
177 } 168 }
178 irq_compat_set_affinity(desc); 169 irqd_set(data, IRQD_AFFINITY_SET);
179 irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); 170
171 return ret;
172}
173
174/**
175 * irq_set_affinity - Set the irq affinity of a given irq
176 * @irq: Interrupt to set affinity
177 * @mask: cpumask
178 *
179 */
180int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
181{
182 struct irq_desc *desc = irq_to_desc(irq);
183 unsigned long flags;
184 int ret;
185
186 if (!desc)
187 return -EINVAL;
188
189 raw_spin_lock_irqsave(&desc->lock, flags);
190 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
180 raw_spin_unlock_irqrestore(&desc->lock, flags); 191 raw_spin_unlock_irqrestore(&desc->lock, flags);
181 return ret; 192 return ret;
182} 193}
@@ -206,7 +217,7 @@ static void irq_affinity_notify(struct work_struct *work)
206 goto out; 217 goto out;
207 218
208 raw_spin_lock_irqsave(&desc->lock, flags); 219 raw_spin_lock_irqsave(&desc->lock, flags);
209 if (irq_move_pending(desc)) 220 if (irq_move_pending(&desc->irq_data))
210 irq_get_pending(cpumask, desc); 221 irq_get_pending(cpumask, desc);
211 else 222 else
212 cpumask_copy(cpumask, desc->irq_data.affinity); 223 cpumask_copy(cpumask, desc->irq_data.affinity);
@@ -285,10 +296,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
285 if (cpumask_intersects(desc->irq_data.affinity, 296 if (cpumask_intersects(desc->irq_data.affinity,
286 cpu_online_mask)) 297 cpu_online_mask))
287 set = desc->irq_data.affinity; 298 set = desc->irq_data.affinity;
288 else { 299 else
289 irq_compat_clr_affinity(desc);
290 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 300 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
291 }
292 } 301 }
293 302
294 cpumask_and(mask, cpu_online_mask, set); 303 cpumask_and(mask, cpu_online_mask, set);
@@ -551,9 +560,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
551 flags &= IRQ_TYPE_SENSE_MASK; 560 flags &= IRQ_TYPE_SENSE_MASK;
552 561
553 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 562 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
554 if (!(desc->istate & IRQS_MASKED)) 563 if (!irqd_irq_masked(&desc->irq_data))
555 mask_irq(desc); 564 mask_irq(desc);
556 if (!(desc->istate & IRQS_DISABLED)) 565 if (!irqd_irq_disabled(&desc->irq_data))
557 unmask = 1; 566 unmask = 1;
558 } 567 }
559 568
@@ -575,8 +584,6 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
575 irqd_set(&desc->irq_data, IRQD_LEVEL); 584 irqd_set(&desc->irq_data, IRQD_LEVEL);
576 } 585 }
577 586
578 if (chip != desc->irq_data.chip)
579 irq_chip_set_defaults(desc->irq_data.chip);
580 ret = 0; 587 ret = 0;
581 break; 588 break;
582 default: 589 default:
@@ -651,7 +658,7 @@ again:
651 * irq_wake_thread(). See the comment there which explains the 658 * irq_wake_thread(). See the comment there which explains the
652 * serialization. 659 * serialization.
653 */ 660 */
654 if (unlikely(desc->istate & IRQS_INPROGRESS)) { 661 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
655 raw_spin_unlock_irq(&desc->lock); 662 raw_spin_unlock_irq(&desc->lock);
656 chip_bus_sync_unlock(desc); 663 chip_bus_sync_unlock(desc);
657 cpu_relax(); 664 cpu_relax();
@@ -668,12 +675,10 @@ again:
668 675
669 desc->threads_oneshot &= ~action->thread_mask; 676 desc->threads_oneshot &= ~action->thread_mask;
670 677
671 if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && 678 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
672 (desc->istate & IRQS_MASKED)) { 679 irqd_irq_masked(&desc->irq_data))
673 irq_compat_clr_masked(desc); 680 unmask_irq(desc);
674 desc->istate &= ~IRQS_MASKED; 681
675 desc->irq_data.chip->irq_unmask(&desc->irq_data);
676 }
677out_unlock: 682out_unlock:
678 raw_spin_unlock_irq(&desc->lock); 683 raw_spin_unlock_irq(&desc->lock);
679 chip_bus_sync_unlock(desc); 684 chip_bus_sync_unlock(desc);
@@ -767,7 +772,7 @@ static int irq_thread(void *data)
767 atomic_inc(&desc->threads_active); 772 atomic_inc(&desc->threads_active);
768 773
769 raw_spin_lock_irq(&desc->lock); 774 raw_spin_lock_irq(&desc->lock);
770 if (unlikely(desc->istate & IRQS_DISABLED)) { 775 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
771 /* 776 /*
772 * CHECKME: We might need a dedicated 777 * CHECKME: We might need a dedicated
773 * IRQ_THREAD_PENDING flag here, which 778 * IRQ_THREAD_PENDING flag here, which
@@ -775,7 +780,6 @@ static int irq_thread(void *data)
775 * but AFAICT IRQS_PENDING should be fine as it 780 * but AFAICT IRQS_PENDING should be fine as it
776 * retriggers the interrupt itself --- tglx 781 * retriggers the interrupt itself --- tglx
777 */ 782 */
778 irq_compat_set_pending(desc);
779 desc->istate |= IRQS_PENDING; 783 desc->istate |= IRQS_PENDING;
780 raw_spin_unlock_irq(&desc->lock); 784 raw_spin_unlock_irq(&desc->lock);
781 } else { 785 } else {
@@ -971,8 +975,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
971 new->thread_mask = 1 << ffz(thread_mask); 975 new->thread_mask = 1 << ffz(thread_mask);
972 976
973 if (!shared) { 977 if (!shared) {
974 irq_chip_set_defaults(desc->irq_data.chip);
975
976 init_waitqueue_head(&desc->wait_for_threads); 978 init_waitqueue_head(&desc->wait_for_threads);
977 979
978 /* Setup the type (level, edge polarity) if configured: */ 980 /* Setup the type (level, edge polarity) if configured: */
@@ -985,8 +987,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
985 } 987 }
986 988
987 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 989 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
988 IRQS_INPROGRESS | IRQS_ONESHOT | \ 990 IRQS_ONESHOT | IRQS_WAITING);
989 IRQS_WAITING); 991 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
990 992
991 if (new->flags & IRQF_PERCPU) { 993 if (new->flags & IRQF_PERCPU) {
992 irqd_set(&desc->irq_data, IRQD_PER_CPU); 994 irqd_set(&desc->irq_data, IRQD_PER_CPU);
@@ -1049,6 +1051,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1049 register_irq_proc(irq, desc); 1051 register_irq_proc(irq, desc);
1050 new->dir = NULL; 1052 new->dir = NULL;
1051 register_handler_proc(irq, new); 1053 register_handler_proc(irq, new);
1054 free_cpumask_var(mask);
1052 1055
1053 return 0; 1056 return 0;
1054 1057