aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
authorIan Campbell <ian.campbell@citrix.com>2011-03-10 11:08:08 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-03-10 14:48:38 -0500
commitcb60d11427bbdd42c27edfa2ef1fa6344f455e1d (patch)
treec97f4a5b70cb530e0a939b16c70f0b29ccdab78f /drivers/xen/events.c
parentf4d0635bf8894b7ba43d7a54733f3e26fe6ced2e (diff)
xen: events: use per-cpu variable for cpu_evtchn_mask
I can't see any reason why it isn't already. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c28
1 files changed, 8 insertions, 20 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a40b2a1c6255..2dffa43696d8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -107,19 +107,9 @@ static struct irq_info *irq_info;
107static int *pirq_to_irq; 107static int *pirq_to_irq;
108 108
109static int *evtchn_to_irq; 109static int *evtchn_to_irq;
110struct cpu_evtchn_s {
111 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
112};
113
114static __initdata struct cpu_evtchn_s init_evtchn_mask = {
115 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
116};
117static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
118 110
119static inline unsigned long *cpu_evtchn_mask(int cpu) 111static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
120{ 112 cpu_evtchn_mask);
121 return cpu_evtchn_mask_p[cpu].bits;
122}
123 113
124/* Xen will never allocate port zero for any purpose. */ 114/* Xen will never allocate port zero for any purpose. */
125#define VALID_EVTCHN(chn) ((chn) != 0) 115#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -257,7 +247,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
257 unsigned int idx) 247 unsigned int idx)
258{ 248{
259 return (sh->evtchn_pending[idx] & 249 return (sh->evtchn_pending[idx] &
260 cpu_evtchn_mask(cpu)[idx] & 250 per_cpu(cpu_evtchn_mask, cpu)[idx] &
261 ~sh->evtchn_mask[idx]); 251 ~sh->evtchn_mask[idx]);
262} 252}
263 253
@@ -270,8 +260,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
270 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); 260 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
271#endif 261#endif
272 262
273 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 263 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
274 set_bit(chn, cpu_evtchn_mask(cpu)); 264 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
275 265
276 irq_info[irq].cpu = cpu; 266 irq_info[irq].cpu = cpu;
277} 267}
@@ -289,8 +279,8 @@ static void init_evtchn_cpu_bindings(void)
289#endif 279#endif
290 280
291 for_each_possible_cpu(i) 281 for_each_possible_cpu(i)
292 memset(cpu_evtchn_mask(i), 282 memset(per_cpu(cpu_evtchn_mask, i),
293 (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); 283 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
294 284
295} 285}
296 286
@@ -925,7 +915,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
925{ 915{
926 struct shared_info *sh = HYPERVISOR_shared_info; 916 struct shared_info *sh = HYPERVISOR_shared_info;
927 int cpu = smp_processor_id(); 917 int cpu = smp_processor_id();
928 unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu); 918 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
929 int i; 919 int i;
930 unsigned long flags; 920 unsigned long flags;
931 static DEFINE_SPINLOCK(debug_lock); 921 static DEFINE_SPINLOCK(debug_lock);
@@ -1462,8 +1452,6 @@ void __init xen_init_IRQ(void)
1462{ 1452{
1463 int i; 1453 int i;
1464 1454
1465 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1466 GFP_KERNEL);
1467 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); 1455 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1468 1456
1469 /* We are using nr_irqs as the maximum number of pirq available but 1457 /* We are using nr_irqs as the maximum number of pirq available but