diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-10 08:50:37 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 08:50:37 -0400 |
commit | 40ef8cbc6d360e564573eb19582249c35d8ba330 (patch) | |
tree | abba70b7da8bef93a87431691dc8df79eb4425d5 /arch/powerpc/lib/locks.c | |
parent | bc6f8a4b199156897f6eb5b70bf5c1a4773f4e2b (diff) |
powerpc: Get 64-bit configs to compile with ARCH=powerpc
This is a bunch of mostly small fixes that are needed to get
ARCH=powerpc to compile for 64-bit. This adds setup_64.c from
arch/ppc64/kernel/setup.c and locks.c from arch/ppc64/lib/locks.c.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/lib/locks.c')
-rw-r--r-- | arch/powerpc/lib/locks.c | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c new file mode 100644 index 000000000000..4b8c5ad5e7dc --- /dev/null +++ b/arch/powerpc/lib/locks.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Spin and read/write lock operations. | ||
3 | * | ||
4 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | ||
5 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | ||
6 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | ||
7 | * Rework to support virtual processors | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/stringify.h> | ||
20 | |||
21 | /* waiting for a spinlock... */ | ||
22 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | ||
23 | #include <asm/hvcall.h> | ||
24 | #include <asm/iSeries/HvCall.h> | ||
25 | |||
26 | void __spin_yield(raw_spinlock_t *lock) | ||
27 | { | ||
28 | unsigned int lock_value, holder_cpu, yield_count; | ||
29 | struct paca_struct *holder_paca; | ||
30 | |||
31 | lock_value = lock->slock; | ||
32 | if (lock_value == 0) | ||
33 | return; | ||
34 | holder_cpu = lock_value & 0xffff; | ||
35 | BUG_ON(holder_cpu >= NR_CPUS); | ||
36 | holder_paca = &paca[holder_cpu]; | ||
37 | yield_count = holder_paca->lppaca.yield_count; | ||
38 | if ((yield_count & 1) == 0) | ||
39 | return; /* virtual cpu is currently running */ | ||
40 | rmb(); | ||
41 | if (lock->slock != lock_value) | ||
42 | return; /* something has changed */ | ||
43 | #ifdef CONFIG_PPC_ISERIES | ||
44 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, | ||
45 | ((u64)holder_cpu << 32) | yield_count); | ||
46 | #else | ||
47 | plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), | ||
48 | yield_count); | ||
49 | #endif | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Waiting for a read lock or a write lock on a rwlock... | ||
54 | * This turns out to be the same for read and write locks, since | ||
55 | * we only know the holder if it is write-locked. | ||
56 | */ | ||
57 | void __rw_yield(raw_rwlock_t *rw) | ||
58 | { | ||
59 | int lock_value; | ||
60 | unsigned int holder_cpu, yield_count; | ||
61 | struct paca_struct *holder_paca; | ||
62 | |||
63 | lock_value = rw->lock; | ||
64 | if (lock_value >= 0) | ||
65 | return; /* no write lock at present */ | ||
66 | holder_cpu = lock_value & 0xffff; | ||
67 | BUG_ON(holder_cpu >= NR_CPUS); | ||
68 | holder_paca = &paca[holder_cpu]; | ||
69 | yield_count = holder_paca->lppaca.yield_count; | ||
70 | if ((yield_count & 1) == 0) | ||
71 | return; /* virtual cpu is currently running */ | ||
72 | rmb(); | ||
73 | if (rw->lock != lock_value) | ||
74 | return; /* something has changed */ | ||
75 | #ifdef CONFIG_PPC_ISERIES | ||
76 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, | ||
77 | ((u64)holder_cpu << 32) | yield_count); | ||
78 | #else | ||
79 | plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), | ||
80 | yield_count); | ||
81 | #endif | ||
82 | } | ||
83 | #endif | ||
84 | |||
85 | void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
86 | { | ||
87 | while (lock->slock) { | ||
88 | HMT_low(); | ||
89 | if (SHARED_PROCESSOR) | ||
90 | __spin_yield(lock); | ||
91 | } | ||
92 | HMT_medium(); | ||
93 | } | ||
94 | |||
95 | EXPORT_SYMBOL(__raw_spin_unlock_wait); | ||