diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-14 12:58:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-14 12:58:24 -0500 |
commit | d0316554d3586cbea60592a41391b5def2553d6f (patch) | |
tree | 5e7418f0bacbc68cec5dfd1541e03eb56870aa02 /arch/x86/xen | |
parent | fb0bbb92d42d5bd0ab224605444efdfed06d6934 (diff) | |
parent | 51e99be00ce2713cbb841cedc997cafa6e26c7f4 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits)
m68k: rename global variable vmalloc_end to m68k_vmalloc_end
percpu: add missing per_cpu_ptr_to_phys() definition for UP
percpu: Fix kdump failure if booted with percpu_alloc=page
percpu: make misc percpu symbols unique
percpu: make percpu symbols in ia64 unique
percpu: make percpu symbols in powerpc unique
percpu: make percpu symbols in x86 unique
percpu: make percpu symbols in xen unique
percpu: make percpu symbols in cpufreq unique
percpu: make percpu symbols in oprofile unique
percpu: make percpu symbols in tracer unique
percpu: make percpu symbols under kernel/ and mm/ unique
percpu: remove some sparse warnings
percpu: make alloc_percpu() handle array types
vmalloc: fix use of non-existent percpu variable in put_cpu_var()
this_cpu: Use this_cpu_xx in trace_functions_graph.c
this_cpu: Use this_cpu_xx for ftrace
this_cpu: Use this_cpu_xx in nmi handling
this_cpu: Use this_cpu operations in RCU
this_cpu: Use this_cpu ops for VM statistics
...
Fix up trivial (famous last words) global per-cpu naming conflicts in
arch/x86/kvm/svm.c
mm/slab.c
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/smp.c | 41 | ||||
-rw-r--r-- | arch/x86/xen/time.c | 24 |
2 files changed, 33 insertions, 32 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 64757c0ba5fc..563d20504988 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -35,10 +35,10 @@ | |||
35 | 35 | ||
36 | cpumask_var_t xen_cpu_initialized_map; | 36 | cpumask_var_t xen_cpu_initialized_map; |
37 | 37 | ||
38 | static DEFINE_PER_CPU(int, resched_irq); | 38 | static DEFINE_PER_CPU(int, xen_resched_irq); |
39 | static DEFINE_PER_CPU(int, callfunc_irq); | 39 | static DEFINE_PER_CPU(int, xen_callfunc_irq); |
40 | static DEFINE_PER_CPU(int, callfuncsingle_irq); | 40 | static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); |
41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | 41 | static DEFINE_PER_CPU(int, xen_debug_irq) = -1; |
42 | 42 | ||
43 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 43 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
44 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | 44 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
103 | NULL); | 103 | NULL); |
104 | if (rc < 0) | 104 | if (rc < 0) |
105 | goto fail; | 105 | goto fail; |
106 | per_cpu(resched_irq, cpu) = rc; | 106 | per_cpu(xen_resched_irq, cpu) = rc; |
107 | 107 | ||
108 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); | 108 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); |
109 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, | 109 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, |
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
114 | NULL); | 114 | NULL); |
115 | if (rc < 0) | 115 | if (rc < 0) |
116 | goto fail; | 116 | goto fail; |
117 | per_cpu(callfunc_irq, cpu) = rc; | 117 | per_cpu(xen_callfunc_irq, cpu) = rc; |
118 | 118 | ||
119 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); | 119 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); |
120 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, | 120 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, |
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
122 | debug_name, NULL); | 122 | debug_name, NULL); |
123 | if (rc < 0) | 123 | if (rc < 0) |
124 | goto fail; | 124 | goto fail; |
125 | per_cpu(debug_irq, cpu) = rc; | 125 | per_cpu(xen_debug_irq, cpu) = rc; |
126 | 126 | ||
127 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | 127 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); |
128 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | 128 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, |
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
133 | NULL); | 133 | NULL); |
134 | if (rc < 0) | 134 | if (rc < 0) |
135 | goto fail; | 135 | goto fail; |
136 | per_cpu(callfuncsingle_irq, cpu) = rc; | 136 | per_cpu(xen_callfuncsingle_irq, cpu) = rc; |
137 | 137 | ||
138 | return 0; | 138 | return 0; |
139 | 139 | ||
140 | fail: | 140 | fail: |
141 | if (per_cpu(resched_irq, cpu) >= 0) | 141 | if (per_cpu(xen_resched_irq, cpu) >= 0) |
142 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 142 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
143 | if (per_cpu(callfunc_irq, cpu) >= 0) | 143 | if (per_cpu(xen_callfunc_irq, cpu) >= 0) |
144 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 144 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
145 | if (per_cpu(debug_irq, cpu) >= 0) | 145 | if (per_cpu(xen_debug_irq, cpu) >= 0) |
146 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 146 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
147 | if (per_cpu(callfuncsingle_irq, cpu) >= 0) | 147 | if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) |
148 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 148 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), |
149 | NULL); | ||
149 | 150 | ||
150 | return rc; | 151 | return rc; |
151 | } | 152 | } |
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu) | |||
349 | current->state = TASK_UNINTERRUPTIBLE; | 350 | current->state = TASK_UNINTERRUPTIBLE; |
350 | schedule_timeout(HZ/10); | 351 | schedule_timeout(HZ/10); |
351 | } | 352 | } |
352 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 353 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
353 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 354 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
354 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 355 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
355 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 356 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); |
356 | xen_uninit_lock_cpu(cpu); | 357 | xen_uninit_lock_cpu(cpu); |
357 | xen_teardown_timer(cpu); | 358 | xen_teardown_timer(cpu); |
358 | 359 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 9d1f853120d8..0d3f07cd1b5f 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -31,14 +31,14 @@ | |||
31 | #define NS_PER_TICK (1000000000LL / HZ) | 31 | #define NS_PER_TICK (1000000000LL / HZ) |
32 | 32 | ||
33 | /* runstate info updated by Xen */ | 33 | /* runstate info updated by Xen */ |
34 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | 34 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
35 | 35 | ||
36 | /* snapshots of runstate info */ | 36 | /* snapshots of runstate info */ |
37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot); | 37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); |
38 | 38 | ||
39 | /* unused ns of stolen and blocked time */ | 39 | /* unused ns of stolen and blocked time */ |
40 | static DEFINE_PER_CPU(u64, residual_stolen); | 40 | static DEFINE_PER_CPU(u64, xen_residual_stolen); |
41 | static DEFINE_PER_CPU(u64, residual_blocked); | 41 | static DEFINE_PER_CPU(u64, xen_residual_blocked); |
42 | 42 | ||
43 | /* return an consistent snapshot of 64-bit time/counter value */ | 43 | /* return an consistent snapshot of 64-bit time/counter value */ |
44 | static u64 get64(const u64 *p) | 44 | static u64 get64(const u64 *p) |
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
79 | 79 | ||
80 | BUG_ON(preemptible()); | 80 | BUG_ON(preemptible()); |
81 | 81 | ||
82 | state = &__get_cpu_var(runstate); | 82 | state = &__get_cpu_var(xen_runstate); |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * The runstate info is always updated by the hypervisor on | 85 | * The runstate info is always updated by the hypervisor on |
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
97 | /* return true when a vcpu could run but has no real cpu to run on */ | 97 | /* return true when a vcpu could run but has no real cpu to run on */ |
98 | bool xen_vcpu_stolen(int vcpu) | 98 | bool xen_vcpu_stolen(int vcpu) |
99 | { | 99 | { |
100 | return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; | 100 | return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; |
101 | } | 101 | } |
102 | 102 | ||
103 | void xen_setup_runstate_info(int cpu) | 103 | void xen_setup_runstate_info(int cpu) |
104 | { | 104 | { |
105 | struct vcpu_register_runstate_memory_area area; | 105 | struct vcpu_register_runstate_memory_area area; |
106 | 106 | ||
107 | area.addr.v = &per_cpu(runstate, cpu); | 107 | area.addr.v = &per_cpu(xen_runstate, cpu); |
108 | 108 | ||
109 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, | 109 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, |
110 | cpu, &area)) | 110 | cpu, &area)) |
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void) | |||
122 | 122 | ||
123 | WARN_ON(state.state != RUNSTATE_running); | 123 | WARN_ON(state.state != RUNSTATE_running); |
124 | 124 | ||
125 | snap = &__get_cpu_var(runstate_snapshot); | 125 | snap = &__get_cpu_var(xen_runstate_snapshot); |
126 | 126 | ||
127 | /* work out how much time the VCPU has not been runn*ing* */ | 127 | /* work out how much time the VCPU has not been runn*ing* */ |
128 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; | 128 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; |
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void) | |||
133 | 133 | ||
134 | /* Add the appropriate number of ticks of stolen time, | 134 | /* Add the appropriate number of ticks of stolen time, |
135 | including any left-overs from last time. */ | 135 | including any left-overs from last time. */ |
136 | stolen = runnable + offline + __get_cpu_var(residual_stolen); | 136 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); |
137 | 137 | ||
138 | if (stolen < 0) | 138 | if (stolen < 0) |
139 | stolen = 0; | 139 | stolen = 0; |
140 | 140 | ||
141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); | 141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
142 | __get_cpu_var(residual_stolen) = stolen; | 142 | __get_cpu_var(xen_residual_stolen) = stolen; |
143 | account_steal_ticks(ticks); | 143 | account_steal_ticks(ticks); |
144 | 144 | ||
145 | /* Add the appropriate number of ticks of blocked time, | 145 | /* Add the appropriate number of ticks of blocked time, |
146 | including any left-overs from last time. */ | 146 | including any left-overs from last time. */ |
147 | blocked += __get_cpu_var(residual_blocked); | 147 | blocked += __get_cpu_var(xen_residual_blocked); |
148 | 148 | ||
149 | if (blocked < 0) | 149 | if (blocked < 0) |
150 | blocked = 0; | 150 | blocked = 0; |
151 | 151 | ||
152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); | 152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
153 | __get_cpu_var(residual_blocked) = blocked; | 153 | __get_cpu_var(xen_residual_blocked) = blocked; |
154 | account_idle_ticks(ticks); | 154 | account_idle_ticks(ticks); |
155 | } | 155 | } |
156 | 156 | ||