aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-23 16:28:58 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-24 06:31:51 -0400
commitd5de8841355a48f7f634a04507185eaf1f9755e3 (patch)
tree48f9dbde3b77a15c2c6420d805b7a955a98f52f3 /arch
parent338b9bb3adac0d2c5a1e180491d9b001d624c402 (diff)
x86: split spinlock implementations out into their own files
ftrace requires certain low-level code, like spinlocks and timestamps, to be compiled without -pg in order to avoid infinite recursion. This patch splits out the core paravirt spinlocks and the Xen spinlocks into separate files which can be compiled without -pg. Also do xen/time.c while we're about it. As a result, we can now use ftrace within a Xen domain. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c31
-rw-r--r--arch/x86/kernel/paravirt.c23
-rw-r--r--arch/x86/xen/Makefile8
-rw-r--r--arch/x86/xen/smp.c167
-rw-r--r--arch/x86/xen/spinlock.c183
-rw-r--r--arch/x86/xen/xen-ops.h3
7 files changed, 226 insertions, 193 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3db651fc8ec5..d679cb2c79b4 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -10,7 +10,7 @@ ifdef CONFIG_FTRACE
10# Do not profile debug and lowlevel utilities 10# Do not profile debug and lowlevel utilities
11CFLAGS_REMOVE_tsc.o = -pg 11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14endif 14endif
15 15
16# 16#
@@ -89,7 +89,7 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o 89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
90obj-$(CONFIG_KVM_GUEST) += kvm.o 90obj-$(CONFIG_KVM_GUEST) += kvm.o
91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o 91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o 92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o
93obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o 93obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
94 94
95obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o 95obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
new file mode 100644
index 000000000000..38d7f7f1dbc9
--- /dev/null
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -0,0 +1,31 @@
1/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/spinlock.h>
6#include <linux/module.h>
7
8#include <asm/paravirt.h>
9
10struct pv_lock_ops pv_lock_ops = {
11#ifdef CONFIG_SMP
12 .spin_is_locked = __ticket_spin_is_locked,
13 .spin_is_contended = __ticket_spin_is_contended,
14
15 .spin_lock = __ticket_spin_lock,
16 .spin_trylock = __ticket_spin_trylock,
17 .spin_unlock = __ticket_spin_unlock,
18#endif
19};
20EXPORT_SYMBOL_GPL(pv_lock_ops);
21
22void __init paravirt_use_bytelocks(void)
23{
24#ifdef CONFIG_SMP
25 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
26 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
27 pv_lock_ops.spin_lock = __byte_spin_lock;
28 pv_lock_ops.spin_trylock = __byte_spin_trylock;
29 pv_lock_ops.spin_unlock = __byte_spin_unlock;
30#endif
31}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 94da4d52d798..0d71de9ff56d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -268,17 +268,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
268 return __get_cpu_var(paravirt_lazy_mode); 268 return __get_cpu_var(paravirt_lazy_mode);
269} 269}
270 270
271void __init paravirt_use_bytelocks(void)
272{
273#ifdef CONFIG_SMP
274 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
275 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
276 pv_lock_ops.spin_lock = __byte_spin_lock;
277 pv_lock_ops.spin_trylock = __byte_spin_trylock;
278 pv_lock_ops.spin_unlock = __byte_spin_unlock;
279#endif
280}
281
282struct pv_info pv_info = { 271struct pv_info pv_info = {
283 .name = "bare hardware", 272 .name = "bare hardware",
284 .paravirt_enabled = 0, 273 .paravirt_enabled = 0,
@@ -461,18 +450,6 @@ struct pv_mmu_ops pv_mmu_ops = {
461 .set_fixmap = native_set_fixmap, 450 .set_fixmap = native_set_fixmap,
462}; 451};
463 452
464struct pv_lock_ops pv_lock_ops = {
465#ifdef CONFIG_SMP
466 .spin_is_locked = __ticket_spin_is_locked,
467 .spin_is_contended = __ticket_spin_is_contended,
468
469 .spin_lock = __ticket_spin_lock,
470 .spin_trylock = __ticket_spin_trylock,
471 .spin_unlock = __ticket_spin_unlock,
472#endif
473};
474EXPORT_SYMBOL_GPL(pv_lock_ops);
475
476EXPORT_SYMBOL_GPL(pv_time_ops); 453EXPORT_SYMBOL_GPL(pv_time_ops);
477EXPORT_SYMBOL (pv_cpu_ops); 454EXPORT_SYMBOL (pv_cpu_ops);
478EXPORT_SYMBOL (pv_mmu_ops); 455EXPORT_SYMBOL (pv_mmu_ops);
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 59c1e539aed2..5bfee243cf9a 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,4 +1,10 @@
1ifdef CONFIG_FTRACE
2# Do not profile debug and lowlevel utilities
3CFLAGS_REMOVE_spinlock.o = -pg
4CFLAGS_REMOVE_time.o = -pg
5endif
6
1obj-y := enlighten.o setup.o multicalls.o mmu.o \ 7obj-y := enlighten.o setup.o multicalls.o mmu.o \
2 time.o xen-asm_$(BITS).o grant-table.o suspend.o 8 time.o xen-asm_$(BITS).o grant-table.o suspend.o
3 9
4obj-$(CONFIG_SMP) += smp.o 10obj-$(CONFIG_SMP) += smp.o spinlock.o
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d8faf79a0a1d..baca7f2fbd8a 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -15,7 +15,6 @@
15 * This does not handle HOTPLUG_CPU yet. 15 * This does not handle HOTPLUG_CPU yet.
16 */ 16 */
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/err.h> 18#include <linux/err.h>
20#include <linux/smp.h> 19#include <linux/smp.h>
21 20
@@ -36,8 +35,6 @@
36#include "xen-ops.h" 35#include "xen-ops.h"
37#include "mmu.h" 36#include "mmu.h"
38 37
39static void __cpuinit xen_init_lock_cpu(int cpu);
40
41cpumask_t xen_cpu_initialized_map; 38cpumask_t xen_cpu_initialized_map;
42 39
43static DEFINE_PER_CPU(int, resched_irq); 40static DEFINE_PER_CPU(int, resched_irq);
@@ -419,170 +416,6 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
419 return IRQ_HANDLED; 416 return IRQ_HANDLED;
420} 417}
421 418
422struct xen_spinlock {
423 unsigned char lock; /* 0 -> free; 1 -> locked */
424 unsigned short spinners; /* count of waiting cpus */
425};
426
427static int xen_spin_is_locked(struct raw_spinlock *lock)
428{
429 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
430
431 return xl->lock != 0;
432}
433
434static int xen_spin_is_contended(struct raw_spinlock *lock)
435{
436 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
437
438 /* Not strictly true; this is only the count of contended
439 lock-takers entering the slow path. */
440 return xl->spinners != 0;
441}
442
443static int xen_spin_trylock(struct raw_spinlock *lock)
444{
445 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
446 u8 old = 1;
447
448 asm("xchgb %b0,%1"
449 : "+q" (old), "+m" (xl->lock) : : "memory");
450
451 return old == 0;
452}
453
454static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
455static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
456
457static inline void spinning_lock(struct xen_spinlock *xl)
458{
459 __get_cpu_var(lock_spinners) = xl;
460 wmb(); /* set lock of interest before count */
461 asm(LOCK_PREFIX " incw %0"
462 : "+m" (xl->spinners) : : "memory");
463}
464
465static inline void unspinning_lock(struct xen_spinlock *xl)
466{
467 asm(LOCK_PREFIX " decw %0"
468 : "+m" (xl->spinners) : : "memory");
469 wmb(); /* decrement count before clearing lock */
470 __get_cpu_var(lock_spinners) = NULL;
471}
472
473static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
474{
475 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
476 int irq = __get_cpu_var(lock_kicker_irq);
477 int ret;
478
479 /* If kicker interrupts not initialized yet, just spin */
480 if (irq == -1)
481 return 0;
482
483 /* announce we're spinning */
484 spinning_lock(xl);
485
486 /* clear pending */
487 xen_clear_irq_pending(irq);
488
489 /* check again make sure it didn't become free while
490 we weren't looking */
491 ret = xen_spin_trylock(lock);
492 if (ret)
493 goto out;
494
495 /* block until irq becomes pending */
496 xen_poll_irq(irq);
497 kstat_this_cpu.irqs[irq]++;
498
499out:
500 unspinning_lock(xl);
501 return ret;
502}
503
504static void xen_spin_lock(struct raw_spinlock *lock)
505{
506 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
507 int timeout;
508 u8 oldval;
509
510 do {
511 timeout = 1 << 10;
512
513 asm("1: xchgb %1,%0\n"
514 " testb %1,%1\n"
515 " jz 3f\n"
516 "2: rep;nop\n"
517 " cmpb $0,%0\n"
518 " je 1b\n"
519 " dec %2\n"
520 " jnz 2b\n"
521 "3:\n"
522 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
523 : "1" (1)
524 : "memory");
525
526 } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
527}
528
529static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
530{
531 int cpu;
532
533 for_each_online_cpu(cpu) {
534 /* XXX should mix up next cpu selection */
535 if (per_cpu(lock_spinners, cpu) == xl) {
536 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
537 break;
538 }
539 }
540}
541
542static void xen_spin_unlock(struct raw_spinlock *lock)
543{
544 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
545
546 smp_wmb(); /* make sure no writes get moved after unlock */
547 xl->lock = 0; /* release lock */
548
549 /* make sure unlock happens before kick */
550 barrier();
551
552 if (unlikely(xl->spinners))
553 xen_spin_unlock_slow(xl);
554}
555
556static __cpuinit void xen_init_lock_cpu(int cpu)
557{
558 int irq;
559 const char *name;
560
561 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
562 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
563 cpu,
564 xen_reschedule_interrupt,
565 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
566 name,
567 NULL);
568
569 if (irq >= 0) {
570 disable_irq(irq); /* make sure it's never delivered */
571 per_cpu(lock_kicker_irq, cpu) = irq;
572 }
573
574 printk("cpu %d spinlock event irq %d\n", cpu, irq);
575}
576
577static void __init xen_init_spinlocks(void)
578{
579 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
580 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
581 pv_lock_ops.spin_lock = xen_spin_lock;
582 pv_lock_ops.spin_trylock = xen_spin_trylock;
583 pv_lock_ops.spin_unlock = xen_spin_unlock;
584}
585
586static const struct smp_ops xen_smp_ops __initdata = { 419static const struct smp_ops xen_smp_ops __initdata = {
587 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 420 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
588 .smp_prepare_cpus = xen_smp_prepare_cpus, 421 .smp_prepare_cpus = xen_smp_prepare_cpus,
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
new file mode 100644
index 000000000000..8dc4d31da67f
--- /dev/null
+++ b/arch/x86/xen/spinlock.c
@@ -0,0 +1,183 @@
1/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/kernel_stat.h>
6#include <linux/spinlock.h>
7
8#include <asm/paravirt.h>
9
10#include <xen/interface/xen.h>
11#include <xen/events.h>
12
13#include "xen-ops.h"
14
15struct xen_spinlock {
16 unsigned char lock; /* 0 -> free; 1 -> locked */
17 unsigned short spinners; /* count of waiting cpus */
18};
19
20static int xen_spin_is_locked(struct raw_spinlock *lock)
21{
22 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
23
24 return xl->lock != 0;
25}
26
27static int xen_spin_is_contended(struct raw_spinlock *lock)
28{
29 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
30
31 /* Not strictly true; this is only the count of contended
32 lock-takers entering the slow path. */
33 return xl->spinners != 0;
34}
35
36static int xen_spin_trylock(struct raw_spinlock *lock)
37{
38 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
39 u8 old = 1;
40
41 asm("xchgb %b0,%1"
42 : "+q" (old), "+m" (xl->lock) : : "memory");
43
44 return old == 0;
45}
46
47static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
48static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
49
50static inline void spinning_lock(struct xen_spinlock *xl)
51{
52 __get_cpu_var(lock_spinners) = xl;
53 wmb(); /* set lock of interest before count */
54 asm(LOCK_PREFIX " incw %0"
55 : "+m" (xl->spinners) : : "memory");
56}
57
58static inline void unspinning_lock(struct xen_spinlock *xl)
59{
60 asm(LOCK_PREFIX " decw %0"
61 : "+m" (xl->spinners) : : "memory");
62 wmb(); /* decrement count before clearing lock */
63 __get_cpu_var(lock_spinners) = NULL;
64}
65
66static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
67{
68 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
69 int irq = __get_cpu_var(lock_kicker_irq);
70 int ret;
71
72 /* If kicker interrupts not initialized yet, just spin */
73 if (irq == -1)
74 return 0;
75
76 /* announce we're spinning */
77 spinning_lock(xl);
78
79 /* clear pending */
80 xen_clear_irq_pending(irq);
81
82 /* check again make sure it didn't become free while
83 we weren't looking */
84 ret = xen_spin_trylock(lock);
85 if (ret)
86 goto out;
87
88 /* block until irq becomes pending */
89 xen_poll_irq(irq);
90 kstat_this_cpu.irqs[irq]++;
91
92out:
93 unspinning_lock(xl);
94 return ret;
95}
96
97static void xen_spin_lock(struct raw_spinlock *lock)
98{
99 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
100 int timeout;
101 u8 oldval;
102
103 do {
104 timeout = 1 << 10;
105
106 asm("1: xchgb %1,%0\n"
107 " testb %1,%1\n"
108 " jz 3f\n"
109 "2: rep;nop\n"
110 " cmpb $0,%0\n"
111 " je 1b\n"
112 " dec %2\n"
113 " jnz 2b\n"
114 "3:\n"
115 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
116 : "1" (1)
117 : "memory");
118
119 } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
120}
121
122static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
123{
124 int cpu;
125
126 for_each_online_cpu(cpu) {
127 /* XXX should mix up next cpu selection */
128 if (per_cpu(lock_spinners, cpu) == xl) {
129 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
130 break;
131 }
132 }
133}
134
135static void xen_spin_unlock(struct raw_spinlock *lock)
136{
137 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
138
139 smp_wmb(); /* make sure no writes get moved after unlock */
140 xl->lock = 0; /* release lock */
141
142 /* make sure unlock happens before kick */
143 barrier();
144
145 if (unlikely(xl->spinners))
146 xen_spin_unlock_slow(xl);
147}
148
149static irqreturn_t dummy_handler(int irq, void *dev_id)
150{
151 BUG();
152 return IRQ_HANDLED;
153}
154
155void __cpuinit xen_init_lock_cpu(int cpu)
156{
157 int irq;
158 const char *name;
159
160 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
161 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
162 cpu,
163 dummy_handler,
164 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
165 name,
166 NULL);
167
168 if (irq >= 0) {
169 disable_irq(irq); /* make sure it's never delivered */
170 per_cpu(lock_kicker_irq, cpu) = irq;
171 }
172
173 printk("cpu %d spinlock event irq %d\n", cpu, irq);
174}
175
176void __init xen_init_spinlocks(void)
177{
178 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
179 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
180 pv_lock_ops.spin_lock = xen_spin_lock;
181 pv_lock_ops.spin_trylock = xen_spin_trylock;
182 pv_lock_ops.spin_unlock = xen_spin_unlock;
183}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index dd3c23152a2e..8847fb34f17e 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -50,6 +50,9 @@ void __init xen_setup_vcpu_info_placement(void);
50#ifdef CONFIG_SMP 50#ifdef CONFIG_SMP
51void xen_smp_init(void); 51void xen_smp_init(void);
52 52
53void __init xen_init_spinlocks(void);
54__cpuinit void xen_init_lock_cpu(int cpu);
55
53extern cpumask_t xen_cpu_initialized_map; 56extern cpumask_t xen_cpu_initialized_map;
54#else 57#else
55static inline void xen_smp_init(void) {} 58static inline void xen_smp_init(void) {}