diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-15 08:15:53 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 08:20:31 -0500 |
commit | 6dbde3530850d4d8bfc1b6bd4006d92786a2787f (patch) | |
tree | 08c6dd55e860827311b889e2ecfe3de9f51421a0 /arch/x86/xen/irq.c | |
parent | 004aa322f855a765741d9437a98dd8fe2e4f32a6 (diff) |
percpu: add optimized generic percpu accessors
It is an optimization and a cleanup, and adds the following new
generic percpu methods:
percpu_read()
percpu_write()
percpu_add()
percpu_sub()
percpu_and()
percpu_or()
percpu_xor()
and implements support for them on x86. (other architectures will fall
back to a default implementation)
The advantage is that for example to read a local percpu variable,
instead of this sequence:
return __get_cpu_var(var);
ffffffff8102ca2b: 48 8b 14 fd 80 09 74 mov -0x7e8bf680(,%rdi,8),%rdx
ffffffff8102ca32: 81
ffffffff8102ca33: 48 c7 c0 d8 59 00 00 mov $0x59d8,%rax
ffffffff8102ca3a: 48 8b 04 10 mov (%rax,%rdx,1),%rax
We can get a single instruction by using the optimized variants:
return percpu_read(var);
ffffffff8102ca3f: 65 48 8b 05 91 8f fd mov %gs:0x7efd8f91(%rip),%rax
I also cleaned up the x86-specific APIs and made the x86 code use
these new generic percpu primitives.
tj: * fixed generic percpu_sub() definition as Roel Kluin pointed out
* added percpu_and() for completeness's sake
* made generic percpu ops atomic against preemption
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/xen/irq.c')
-rw-r--r-- | arch/x86/xen/irq.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index bb042608c602..2e8271431e1a 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void) | |||
39 | struct vcpu_info *vcpu; | 39 | struct vcpu_info *vcpu; |
40 | unsigned long flags; | 40 | unsigned long flags; |
41 | 41 | ||
42 | vcpu = x86_read_percpu(xen_vcpu); | 42 | vcpu = percpu_read(xen_vcpu); |
43 | 43 | ||
44 | /* flag has opposite sense of mask */ | 44 | /* flag has opposite sense of mask */ |
45 | flags = !vcpu->evtchn_upcall_mask; | 45 | flags = !vcpu->evtchn_upcall_mask; |
@@ -62,7 +62,7 @@ static void xen_restore_fl(unsigned long flags) | |||
62 | make sure we're don't switch CPUs between getting the vcpu | 62 | make sure we're don't switch CPUs between getting the vcpu |
63 | pointer and updating the mask. */ | 63 | pointer and updating the mask. */ |
64 | preempt_disable(); | 64 | preempt_disable(); |
65 | vcpu = x86_read_percpu(xen_vcpu); | 65 | vcpu = percpu_read(xen_vcpu); |
66 | vcpu->evtchn_upcall_mask = flags; | 66 | vcpu->evtchn_upcall_mask = flags; |
67 | preempt_enable_no_resched(); | 67 | preempt_enable_no_resched(); |
68 | 68 | ||
@@ -83,7 +83,7 @@ static void xen_irq_disable(void) | |||
83 | make sure we're don't switch CPUs between getting the vcpu | 83 | make sure we're don't switch CPUs between getting the vcpu |
84 | pointer and updating the mask. */ | 84 | pointer and updating the mask. */ |
85 | preempt_disable(); | 85 | preempt_disable(); |
86 | x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; | 86 | percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; |
87 | preempt_enable_no_resched(); | 87 | preempt_enable_no_resched(); |
88 | } | 88 | } |
89 | 89 | ||
@@ -96,7 +96,7 @@ static void xen_irq_enable(void) | |||
96 | the caller is confused and is trying to re-enable interrupts | 96 | the caller is confused and is trying to re-enable interrupts |
97 | on an indeterminate processor. */ | 97 | on an indeterminate processor. */ |
98 | 98 | ||
99 | vcpu = x86_read_percpu(xen_vcpu); | 99 | vcpu = percpu_read(xen_vcpu); |
100 | vcpu->evtchn_upcall_mask = 0; | 100 | vcpu->evtchn_upcall_mask = 0; |
101 | 101 | ||
102 | /* Doesn't matter if we get preempted here, because any | 102 | /* Doesn't matter if we get preempted here, because any |