diff options
author | David Vrabel <david.vrabel@citrix.com> | 2013-03-13 09:20:52 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2014-01-06 10:07:33 -0500 |
commit | fc087e10734a4d3e40693fc099461ec1270b3fff (patch) | |
tree | 801838375708e2053b413bbfd75a758c9421e93b | |
parent | 872951850666689e931e567ebdc7c483135d14cf (diff) |
xen/events: remove unnecessary init_evtchn_cpu_bindings()
Because the guest-side binding of an event to a VCPU (i.e., setting
the local per-cpu masks) is always explicitly done after an event
channel is bound to a port, there is no need to initialize all
possible events as bound to VCPU 0 at start of day or after a resume.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-rw-r--r-- | drivers/xen/events.c | 22 |
1 files changed, 0 insertions, 22 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index ddcdbb508dab..1e2c74bcd0c8 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -334,24 +334,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
334 | info_for_irq(irq)->cpu = cpu; | 334 | info_for_irq(irq)->cpu = cpu; |
335 | } | 335 | } |
336 | 336 | ||
337 | static void init_evtchn_cpu_bindings(void) | ||
338 | { | ||
339 | int i; | ||
340 | #ifdef CONFIG_SMP | ||
341 | struct irq_info *info; | ||
342 | |||
343 | /* By default all event channels notify CPU#0. */ | ||
344 | list_for_each_entry(info, &xen_irq_list_head, list) { | ||
345 | struct irq_desc *desc = irq_to_desc(info->irq); | ||
346 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); | ||
347 | } | ||
348 | #endif | ||
349 | |||
350 | for_each_possible_cpu(i) | ||
351 | memset(per_cpu(cpu_evtchn_mask, i), | ||
352 | (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); | ||
353 | } | ||
354 | |||
355 | static inline void clear_evtchn(int port) | 337 | static inline void clear_evtchn(int port) |
356 | { | 338 | { |
357 | struct shared_info *s = HYPERVISOR_shared_info; | 339 | struct shared_info *s = HYPERVISOR_shared_info; |
@@ -1778,8 +1760,6 @@ void xen_irq_resume(void) | |||
1778 | unsigned int cpu, evtchn; | 1760 | unsigned int cpu, evtchn; |
1779 | struct irq_info *info; | 1761 | struct irq_info *info; |
1780 | 1762 | ||
1781 | init_evtchn_cpu_bindings(); | ||
1782 | |||
1783 | /* New event-channel space is not 'live' yet. */ | 1763 | /* New event-channel space is not 'live' yet. */ |
1784 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | 1764 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) |
1785 | mask_evtchn(evtchn); | 1765 | mask_evtchn(evtchn); |
@@ -1890,8 +1870,6 @@ void __init xen_init_IRQ(void) | |||
1890 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | 1870 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
1891 | evtchn_to_irq[i] = -1; | 1871 | evtchn_to_irq[i] = -1; |
1892 | 1872 | ||
1893 | init_evtchn_cpu_bindings(); | ||
1894 | |||
1895 | /* No event channels are 'live' right now. */ | 1873 | /* No event channels are 'live' right now. */ |
1896 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | 1874 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
1897 | mask_evtchn(i); | 1875 | mask_evtchn(i); |