aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/hpux/sys_hpux.c7
-rw-r--r--arch/parisc/include/asm/asm-offsets.h1
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/parisc/include/asm/bug.h4
-rw-r--r--arch/parisc/include/asm/elf.h1
-rw-r--r--arch/parisc/include/asm/ftrace.h14
-rw-r--r--arch/parisc/include/asm/spinlock.h64
-rw-r--r--arch/parisc/include/asm/spinlock_types.h12
-rw-r--r--arch/parisc/kernel/asm-offsets.c3
-rw-r--r--arch/parisc/kernel/irq.c8
-rw-r--r--arch/parisc/kernel/signal.c5
-rw-r--r--arch/parisc/kernel/smp.c9
-rw-r--r--arch/parisc/kernel/sys_parisc32.c6
-rw-r--r--arch/parisc/kernel/unwind.c50
-rw-r--r--arch/parisc/lib/bitops.c4
15 files changed, 112 insertions, 86 deletions
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 18072e03a019..92343bd35fa3 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -445,12 +445,7 @@ done:
445 445
446int hpux_pipe(int *kstack_fildes) 446int hpux_pipe(int *kstack_fildes)
447{ 447{
448 int error; 448 return do_pipe_flags(kstack_fildes, 0);
449
450 lock_kernel();
451 error = do_pipe_flags(kstack_fildes, 0);
452 unlock_kernel();
453 return error;
454} 449}
455 450
456/* lies - says it works, but it really didn't lock anything */ 451/* lies - says it works, but it really didn't lock anything */
diff --git a/arch/parisc/include/asm/asm-offsets.h b/arch/parisc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/parisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b2..716634d1f546 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,19 +27,19 @@
27# define ATOMIC_HASH_SIZE 4 27# define ATOMIC_HASH_SIZE 4
28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29 29
30extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31 31
32/* Can't use raw_spin_lock_irq because of #include problems, so 32/* Can't use raw_spin_lock_irq because of #include problems, so
33 * this is the substitute */ 33 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \ 34#define _atomic_spin_lock_irqsave(l,f) do { \
35 raw_spinlock_t *s = ATOMIC_HASH(l); \ 35 arch_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \ 36 local_irq_save(f); \
37 __raw_spin_lock(s); \ 37 arch_spin_lock(s); \
38} while(0) 38} while(0)
39 39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \ 40#define _atomic_spin_unlock_irqrestore(l,f) do { \
41 raw_spinlock_t *s = ATOMIC_HASH(l); \ 41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 __raw_spin_unlock(s); \ 42 arch_spin_unlock(s); \
43 local_irq_restore(f); \ 43 local_irq_restore(f); \
44} while(0) 44} while(0)
45 45
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 8cfc553fc837..75e46c557a16 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -32,14 +32,14 @@
32 "\t.popsection" \ 32 "\t.popsection" \
33 : : "i" (__FILE__), "i" (__LINE__), \ 33 : : "i" (__FILE__), "i" (__LINE__), \
34 "i" (0), "i" (sizeof(struct bug_entry)) ); \ 34 "i" (0), "i" (sizeof(struct bug_entry)) ); \
35 for(;;) ; \ 35 unreachable(); \
36 } while(0) 36 } while(0)
37 37
38#else 38#else
39#define BUG() \ 39#define BUG() \
40 do { \ 40 do { \
41 asm volatile(PARISC_BUG_BREAK_ASM : : ); \ 41 asm volatile(PARISC_BUG_BREAK_ASM : : ); \
42 for(;;) ; \ 42 unreachable(); \
43 } while(0) 43 } while(0)
44#endif 44#endif
45 45
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 9c802eb4be84..19f6cb1a4a1c 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */
328 such function. */ 328 such function. */
329#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0 329#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
330 330
331#define USE_ELF_CORE_DUMP
332#define ELF_EXEC_PAGESIZE 4096 331#define ELF_EXEC_PAGESIZE 4096
333 332
334/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 333/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 2fa05dd6aeee..72c0fafaa039 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -20,6 +20,20 @@ struct ftrace_ret_stack {
20 * Defined in entry.S 20 * Defined in entry.S
21 */ 21 */
22extern void return_to_handler(void); 22extern void return_to_handler(void);
23
24
25extern unsigned long return_address(unsigned int);
26
27#define HAVE_ARCH_CALLER_ADDR
28
29#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
30#define CALLER_ADDR1 return_address(1)
31#define CALLER_ADDR2 return_address(2)
32#define CALLER_ADDR3 return_address(3)
33#define CALLER_ADDR4 return_address(4)
34#define CALLER_ADDR5 return_address(5)
35#define CALLER_ADDR6 return_address(6)
36
23#endif /* __ASSEMBLY__ */ 37#endif /* __ASSEMBLY__ */
24 38
25#endif /* _ASM_PARISC_FTRACE_H */ 39#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa8..74036f436a3b 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/spinlock_types.h> 6#include <asm/spinlock_types.h>
7 7
8static inline int __raw_spin_is_locked(raw_spinlock_t *x) 8static inline int arch_spin_is_locked(arch_spinlock_t *x)
9{ 9{
10 volatile unsigned int *a = __ldcw_align(x); 10 volatile unsigned int *a = __ldcw_align(x);
11 return *a == 0; 11 return *a == 0;
12} 12}
13 13
14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) 14#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
15#define __raw_spin_unlock_wait(x) \ 15#define arch_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (arch_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock_flags(raw_spinlock_t *x, 18static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19 unsigned long flags) 19 unsigned long flags)
20{ 20{
21 volatile unsigned int *a; 21 volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
33 mb(); 33 mb();
34} 34}
35 35
36static inline void __raw_spin_unlock(raw_spinlock_t *x) 36static inline void arch_spin_unlock(arch_spinlock_t *x)
37{ 37{
38 volatile unsigned int *a; 38 volatile unsigned int *a;
39 mb(); 39 mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
42 mb(); 42 mb();
43} 43}
44 44
45static inline int __raw_spin_trylock(raw_spinlock_t *x) 45static inline int arch_spin_trylock(arch_spinlock_t *x)
46{ 46{
47 volatile unsigned int *a; 47 volatile unsigned int *a;
48 int ret; 48 int ret;
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
69 69
70/* Note that we have to ensure interrupts are disabled in case we're 70/* Note that we have to ensure interrupts are disabled in case we're
71 * interrupted by some other code that wants to grab the same read lock */ 71 * interrupted by some other code that wants to grab the same read lock */
72static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 72static __inline__ void arch_read_lock(arch_rwlock_t *rw)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 local_irq_save(flags); 75 local_irq_save(flags);
76 __raw_spin_lock_flags(&rw->lock, flags); 76 arch_spin_lock_flags(&rw->lock, flags);
77 rw->counter++; 77 rw->counter++;
78 __raw_spin_unlock(&rw->lock); 78 arch_spin_unlock(&rw->lock);
79 local_irq_restore(flags); 79 local_irq_restore(flags);
80} 80}
81 81
82/* Note that we have to ensure interrupts are disabled in case we're 82/* Note that we have to ensure interrupts are disabled in case we're
83 * interrupted by some other code that wants to grab the same read lock */ 83 * interrupted by some other code that wants to grab the same read lock */
84static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 84static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
85{ 85{
86 unsigned long flags; 86 unsigned long flags;
87 local_irq_save(flags); 87 local_irq_save(flags);
88 __raw_spin_lock_flags(&rw->lock, flags); 88 arch_spin_lock_flags(&rw->lock, flags);
89 rw->counter--; 89 rw->counter--;
90 __raw_spin_unlock(&rw->lock); 90 arch_spin_unlock(&rw->lock);
91 local_irq_restore(flags); 91 local_irq_restore(flags);
92} 92}
93 93
94/* Note that we have to ensure interrupts are disabled in case we're 94/* Note that we have to ensure interrupts are disabled in case we're
95 * interrupted by some other code that wants to grab the same read lock */ 95 * interrupted by some other code that wants to grab the same read lock */
96static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) 96static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
97{ 97{
98 unsigned long flags; 98 unsigned long flags;
99 retry: 99 retry:
100 local_irq_save(flags); 100 local_irq_save(flags);
101 if (__raw_spin_trylock(&rw->lock)) { 101 if (arch_spin_trylock(&rw->lock)) {
102 rw->counter++; 102 rw->counter++;
103 __raw_spin_unlock(&rw->lock); 103 arch_spin_unlock(&rw->lock);
104 local_irq_restore(flags); 104 local_irq_restore(flags);
105 return 1; 105 return 1;
106 } 106 }
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
111 return 0; 111 return 0;
112 112
113 /* Wait until we have a realistic chance at the lock */ 113 /* Wait until we have a realistic chance at the lock */
114 while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) 114 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
115 cpu_relax(); 115 cpu_relax();
116 116
117 goto retry; 117 goto retry;
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
119 119
120/* Note that we have to ensure interrupts are disabled in case we're 120/* Note that we have to ensure interrupts are disabled in case we're
121 * interrupted by some other code that wants to read_trylock() this lock */ 121 * interrupted by some other code that wants to read_trylock() this lock */
122static __inline__ void __raw_write_lock(raw_rwlock_t *rw) 122static __inline__ void arch_write_lock(arch_rwlock_t *rw)
123{ 123{
124 unsigned long flags; 124 unsigned long flags;
125retry: 125retry:
126 local_irq_save(flags); 126 local_irq_save(flags);
127 __raw_spin_lock_flags(&rw->lock, flags); 127 arch_spin_lock_flags(&rw->lock, flags);
128 128
129 if (rw->counter != 0) { 129 if (rw->counter != 0) {
130 __raw_spin_unlock(&rw->lock); 130 arch_spin_unlock(&rw->lock);
131 local_irq_restore(flags); 131 local_irq_restore(flags);
132 132
133 while (rw->counter != 0) 133 while (rw->counter != 0)
@@ -141,27 +141,27 @@ retry:
141 local_irq_restore(flags); 141 local_irq_restore(flags);
142} 142}
143 143
144static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 144static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
145{ 145{
146 rw->counter = 0; 146 rw->counter = 0;
147 __raw_spin_unlock(&rw->lock); 147 arch_spin_unlock(&rw->lock);
148} 148}
149 149
150/* Note that we have to ensure interrupts are disabled in case we're 150/* Note that we have to ensure interrupts are disabled in case we're
151 * interrupted by some other code that wants to read_trylock() this lock */ 151 * interrupted by some other code that wants to read_trylock() this lock */
152static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) 152static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 int result = 0; 155 int result = 0;
156 156
157 local_irq_save(flags); 157 local_irq_save(flags);
158 if (__raw_spin_trylock(&rw->lock)) { 158 if (arch_spin_trylock(&rw->lock)) {
159 if (rw->counter == 0) { 159 if (rw->counter == 0) {
160 rw->counter = -1; 160 rw->counter = -1;
161 result = 1; 161 result = 1;
162 } else { 162 } else {
163 /* Read-locked. Oh well. */ 163 /* Read-locked. Oh well. */
164 __raw_spin_unlock(&rw->lock); 164 arch_spin_unlock(&rw->lock);
165 } 165 }
166 } 166 }
167 local_irq_restore(flags); 167 local_irq_restore(flags);
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
173 * read_can_lock - would read_trylock() succeed? 173 * read_can_lock - would read_trylock() succeed?
174 * @lock: the rwlock in question. 174 * @lock: the rwlock in question.
175 */ 175 */
176static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) 176static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
177{ 177{
178 return rw->counter >= 0; 178 return rw->counter >= 0;
179} 179}
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
182 * write_can_lock - would write_trylock() succeed? 182 * write_can_lock - would write_trylock() succeed?
183 * @lock: the rwlock in question. 183 * @lock: the rwlock in question.
184 */ 184 */
185static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) 185static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
186{ 186{
187 return !rw->counter; 187 return !rw->counter;
188} 188}
189 189
190#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 190#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
191#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 191#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
192 192
193#define _raw_spin_relax(lock) cpu_relax() 193#define arch_spin_relax(lock) cpu_relax()
194#define _raw_read_relax(lock) cpu_relax() 194#define arch_read_relax(lock) cpu_relax()
195#define _raw_write_relax(lock) cpu_relax() 195#define arch_write_relax(lock) cpu_relax()
196 196
197#endif /* __ASM_SPINLOCK_H */ 197#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b2..8c373aa28a86 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,18 +4,18 @@
4typedef struct { 4typedef struct {
5#ifdef CONFIG_PA20 5#ifdef CONFIG_PA20
6 volatile unsigned int slock; 6 volatile unsigned int slock;
7# define __RAW_SPIN_LOCK_UNLOCKED { 1 } 7# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
8#else 8#else
9 volatile unsigned int lock[4]; 9 volatile unsigned int lock[4];
10# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } 10# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
11#endif 11#endif
12} raw_spinlock_t; 12} arch_spinlock_t;
13 13
14typedef struct { 14typedef struct {
15 raw_spinlock_t lock; 15 arch_spinlock_t lock;
16 volatile int counter; 16 volatile int counter;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
20 20
21#endif 21#endif
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index fcd3c707bf12..ec787b411e9a 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -244,9 +244,6 @@ int main(void)
244 DEFINE(THREAD_SZ, sizeof(struct thread_info)); 244 DEFINE(THREAD_SZ, sizeof(struct thread_info));
245 DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); 245 DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
246 BLANK(); 246 BLANK();
247 DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
248 DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
249 BLANK();
250 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); 247 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
251 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); 248 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
252 DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count)); 249 DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 2e7610cb33d5..efbcee5d2220 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -145,7 +145,7 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
145#endif 145#endif
146 146
147static struct irq_chip cpu_interrupt_type = { 147static struct irq_chip cpu_interrupt_type = {
148 .typename = "CPU", 148 .name = "CPU",
149 .startup = cpu_startup_irq, 149 .startup = cpu_startup_irq,
150 .shutdown = cpu_disable_irq, 150 .shutdown = cpu_disable_irq,
151 .enable = cpu_enable_irq, 151 .enable = cpu_enable_irq,
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
180 if (i < NR_IRQS) { 180 if (i < NR_IRQS) {
181 struct irqaction *action; 181 struct irqaction *action;
182 182
183 spin_lock_irqsave(&irq_desc[i].lock, flags); 183 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
184 action = irq_desc[i].action; 184 action = irq_desc[i].action;
185 if (!action) 185 if (!action)
186 goto skip; 186 goto skip;
@@ -192,7 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
192 seq_printf(p, "%10u ", kstat_irqs(i)); 192 seq_printf(p, "%10u ", kstat_irqs(i));
193#endif 193#endif
194 194
195 seq_printf(p, " %14s", irq_desc[i].chip->typename); 195 seq_printf(p, " %14s", irq_desc[i].chip->name);
196#ifndef PARISC_IRQ_CR16_COUNTS 196#ifndef PARISC_IRQ_CR16_COUNTS
197 seq_printf(p, " %s", action->name); 197 seq_printf(p, " %s", action->name);
198 198
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
224 224
225 seq_putc(p, '\n'); 225 seq_putc(p, '\n');
226 skip: 226 skip:
227 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 227 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
228 } 228 }
229 229
230 return 0; 230 return 0;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index e8467e4aa8d1..35c827e94e31 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -26,7 +26,6 @@
26#include <linux/stddef.h> 26#include <linux/stddef.h>
27#include <linux/compat.h> 27#include <linux/compat.h>
28#include <linux/elf.h> 28#include <linux/elf.h>
29#include <linux/tracehook.h>
30#include <asm/ucontext.h> 29#include <asm/ucontext.h>
31#include <asm/rt_sigframe.h> 30#include <asm/rt_sigframe.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -469,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
469 recalc_sigpending(); 468 recalc_sigpending();
470 spin_unlock_irq(&current->sighand->siglock); 469 spin_unlock_irq(&current->sighand->siglock);
471 470
472 tracehook_signal_handler(sig, info, ka, regs, 0); 471 tracehook_signal_handler(sig, info, ka, regs,
472 test_thread_flag(TIF_SINGLESTEP) ||
473 test_thread_flag(TIF_BLOCKSTEP));
473 474
474 return 1; 475 return 1;
475} 476}
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 1fd0f0cec037..3f2fce8ce6b6 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -60,8 +60,6 @@ static int smp_debug_lvl = 0;
60#define smp_debug(lvl, ...) do { } while(0) 60#define smp_debug(lvl, ...) do { } while(0)
61#endif /* DEBUG_SMP */ 61#endif /* DEBUG_SMP */
62 62
63DEFINE_SPINLOCK(smp_lock);
64
65volatile struct task_struct *smp_init_current_idle_task; 63volatile struct task_struct *smp_init_current_idle_task;
66 64
67/* track which CPU is booting */ 65/* track which CPU is booting */
@@ -69,7 +67,7 @@ static volatile int cpu_now_booting __cpuinitdata;
69 67
70static int parisc_max_cpus __cpuinitdata = 1; 68static int parisc_max_cpus __cpuinitdata = 1;
71 69
72DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; 70static DEFINE_PER_CPU(spinlock_t, ipi_lock);
73 71
74enum ipi_message_type { 72enum ipi_message_type {
75 IPI_NOP=0, 73 IPI_NOP=0,
@@ -438,6 +436,11 @@ void __init smp_prepare_boot_cpu(void)
438*/ 436*/
439void __init smp_prepare_cpus(unsigned int max_cpus) 437void __init smp_prepare_cpus(unsigned int max_cpus)
440{ 438{
439 int cpu;
440
441 for_each_possible_cpu(cpu)
442 spin_lock_init(&per_cpu(ipi_lock, cpu));
443
441 init_cpu_present(cpumask_of(0)); 444 init_cpu_present(cpumask_of(0));
442 445
443 parisc_max_cpus = max_cpus; 446 parisc_max_cpus = max_cpus;
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 76d23ec8dfaa..9779ece2b070 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -26,13 +26,7 @@
26#include <linux/shm.h> 26#include <linux/shm.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/uio.h> 28#include <linux/uio.h>
29#include <linux/nfs_fs.h>
30#include <linux/ncp_fs.h> 29#include <linux/ncp_fs.h>
31#include <linux/sunrpc/svc.h>
32#include <linux/nfsd/nfsd.h>
33#include <linux/nfsd/cache.h>
34#include <linux/nfsd/xdr.h>
35#include <linux/nfsd/syscall.h>
36#include <linux/poll.h> 30#include <linux/poll.h>
37#include <linux/personality.h> 31#include <linux/personality.h>
38#include <linux/stat.h> 32#include <linux/stat.h>
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index a36799e85693..d58eac1a8288 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/sort.h>
16 17
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <asm/assembly.h> 19#include <asm/assembly.h>
@@ -115,24 +116,18 @@ unwind_table_init(struct unwind_table *table, const char *name,
115 } 116 }
116} 117}
117 118
119static int cmp_unwind_table_entry(const void *a, const void *b)
120{
121 return ((const struct unwind_table_entry *)a)->region_start
122 - ((const struct unwind_table_entry *)b)->region_start;
123}
124
118static void 125static void
119unwind_table_sort(struct unwind_table_entry *start, 126unwind_table_sort(struct unwind_table_entry *start,
120 struct unwind_table_entry *finish) 127 struct unwind_table_entry *finish)
121{ 128{
122 struct unwind_table_entry el, *p, *q; 129 sort(start, finish - start, sizeof(struct unwind_table_entry),
123 130 cmp_unwind_table_entry, NULL);
124 for (p = start + 1; p < finish; ++p) {
125 if (p[0].region_start < p[-1].region_start) {
126 el = *p;
127 q = p;
128 do {
129 q[0] = q[-1];
130 --q;
131 } while (q > start &&
132 el.region_start < q[-1].region_start);
133 *q = el;
134 }
135 }
136} 131}
137 132
138struct unwind_table * 133struct unwind_table *
@@ -417,3 +412,30 @@ int unwind_to_user(struct unwind_frame_info *info)
417 412
418 return ret; 413 return ret;
419} 414}
415
416unsigned long return_address(unsigned int level)
417{
418 struct unwind_frame_info info;
419 struct pt_regs r;
420 unsigned long sp;
421
422 /* initialize unwind info */
423 asm volatile ("copy %%r30, %0" : "=r"(sp));
424 memset(&r, 0, sizeof(struct pt_regs));
425 r.iaoq[0] = (unsigned long) current_text_addr();
426 r.gr[2] = (unsigned long) __builtin_return_address(0);
427 r.gr[30] = sp;
428 unwind_frame_init(&info, current, &r);
429
430 /* unwind stack */
431 ++level;
432 do {
433 if (unwind_once(&info) < 0 || info.ip == 0)
434 return 0;
435 if (!__kernel_text_address(info.ip)) {
436 return 0;
437 }
438 } while (info.ip && level--);
439
440 return info.ip;
441}
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index e3eb739fab19..353963d42059 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -12,8 +12,8 @@
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 15arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
16 [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED 16 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
17}; 17};
18#endif 18#endif
19 19