aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc
diff options
context:
space:
mode:
authorBob Breuer <breuerr@mc.net>2006-03-24 01:36:19 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-24 01:36:19 -0500
commita54123e27779049d27d21e6c8adfee73aa2c0734 (patch)
tree265849e706e4ebe3b75127ebe6e3cbfe2a78850a /include/asm-sparc
parent674a396c6d2ba0341ebdd7c1c9950f32f018e2dd (diff)
[SPARC]: Try to start getting SMP back into shape.
Todo items: - IRQ_INPROGRESS flag - use sparc64 irq buckets, or generic irq_desc? - sun4d - re-indent large chunks of sun4m_smp.c - some places assume sequential cpu numbering (i.e. 0,1 instead of 0,2) Last I checked (with 2.6.14), random programs segfault with dual HyperSPARC. And with SuperSPARC II's, it seems stable but will eventually die from a write lock error (wrong lock owner or something). I haven't tried the HyperSPARC + highmem combination recently, so that may still be a problem. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc')
-rw-r--r--include/asm-sparc/cpudata.h1
-rw-r--r--include/asm-sparc/smp.h9
-rw-r--r--include/asm-sparc/spinlock.h25
3 files changed, 24 insertions, 11 deletions
diff --git a/include/asm-sparc/cpudata.h b/include/asm-sparc/cpudata.h
index ec0d9ef90a3b..a2c4d51d36c4 100644
--- a/include/asm-sparc/cpudata.h
+++ b/include/asm-sparc/cpudata.h
@@ -18,6 +18,7 @@ typedef struct {
18 unsigned int counter; 18 unsigned int counter;
19 int prom_node; 19 int prom_node;
20 int mid; 20 int mid;
21 int next;
21} cpuinfo_sparc; 22} cpuinfo_sparc;
22 23
23DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 24DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index 580c51d011df..98c46e3fbe8a 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -81,16 +81,9 @@ static inline int smp_call_function(void (*func)(void *info), void *info, int no
81 return 0; 81 return 0;
82} 82}
83 83
84extern __volatile__ int __cpu_number_map[NR_CPUS];
85extern __volatile__ int __cpu_logical_map[NR_CPUS];
86
87static inline int cpu_logical_map(int cpu) 84static inline int cpu_logical_map(int cpu)
88{ 85{
89 return __cpu_logical_map[cpu]; 86 return cpu;
90}
91static inline int cpu_number_map(int cpu)
92{
93 return __cpu_number_map[cpu];
94} 87}
95 88
96static inline int hard_smp4m_processor_id(void) 89static inline int hard_smp4m_processor_id(void)
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h
index e344c98a6f5f..3350c90c7869 100644
--- a/include/asm-sparc/spinlock.h
+++ b/include/asm-sparc/spinlock.h
@@ -94,7 +94,7 @@ static inline void __read_lock(raw_rwlock_t *rw)
94#define __raw_read_lock(lock) \ 94#define __raw_read_lock(lock) \
95do { unsigned long flags; \ 95do { unsigned long flags; \
96 local_irq_save(flags); \ 96 local_irq_save(flags); \
97 __raw_read_lock(lock); \ 97 __read_lock(lock); \
98 local_irq_restore(flags); \ 98 local_irq_restore(flags); \
99} while(0) 99} while(0)
100 100
@@ -114,11 +114,11 @@ static inline void __read_unlock(raw_rwlock_t *rw)
114#define __raw_read_unlock(lock) \ 114#define __raw_read_unlock(lock) \
115do { unsigned long flags; \ 115do { unsigned long flags; \
116 local_irq_save(flags); \ 116 local_irq_save(flags); \
117 __raw_read_unlock(lock); \ 117 __read_unlock(lock); \
118 local_irq_restore(flags); \ 118 local_irq_restore(flags); \
119} while(0) 119} while(0)
120 120
121extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) 121static inline void __raw_write_lock(raw_rwlock_t *rw)
122{ 122{
123 register raw_rwlock_t *lp asm("g1"); 123 register raw_rwlock_t *lp asm("g1");
124 lp = rw; 124 lp = rw;
@@ -131,9 +131,28 @@ extern __inline__ void __raw_write_lock(raw_rwlock_t *rw)
131 : "g2", "g4", "memory", "cc"); 131 : "g2", "g4", "memory", "cc");
132} 132}
133 133
134static inline int __raw_write_trylock(raw_rwlock_t *rw)
135{
136 unsigned int val;
137
138 __asm__ __volatile__("ldstub [%1 + 3], %0"
139 : "=r" (val)
140 : "r" (&rw->lock)
141 : "memory");
142
143 if (val == 0) {
144 val = rw->lock & ~0xff;
145 if (val)
146 ((volatile u8*)&rw->lock)[3] = 0;
147 }
148
149 return (val == 0);
150}
151
134#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 152#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
135 153
136#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 154#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
155#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
137 156
138#endif /* !(__ASSEMBLY__) */ 157#endif /* !(__ASSEMBLY__) */
139 158