diff options
author | Tejun Heo <tj@kernel.org> | 2010-01-04 19:17:33 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-01-04 19:17:33 -0500 |
commit | 32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch) | |
tree | b1ce838a37044bb38dfc128e2116ca35630e629a /arch/parisc | |
parent | 22b737f4c75197372d64afc6ed1bccd58c00e549 (diff) | |
parent | c5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff) |
Merge branch 'master' into percpu
Conflicts:
arch/powerpc/platforms/pseries/hvCall.S
include/linux/percpu.h
Diffstat (limited to 'arch/parisc')
33 files changed, 250 insertions, 273 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index f388dc68f605..524d9352f17e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -18,6 +18,7 @@ config PARISC | |||
18 | select BUG | 18 | select BUG |
19 | select HAVE_PERF_EVENTS | 19 | select HAVE_PERF_EVENTS |
20 | select GENERIC_ATOMIC64 if !64BIT | 20 | select GENERIC_ATOMIC64 if !64BIT |
21 | select HAVE_ARCH_TRACEHOOK | ||
21 | help | 22 | help |
22 | The PA-RISC microprocessor is designed by Hewlett-Packard and used | 23 | The PA-RISC microprocessor is designed by Hewlett-Packard and used |
23 | in many of their workstations & servers (HP9000 700 and 800 series, | 24 | in many of their workstations & servers (HP9000 700 and 800 series, |
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index 18072e03a019..92343bd35fa3 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c | |||
@@ -445,12 +445,7 @@ done: | |||
445 | 445 | ||
446 | int hpux_pipe(int *kstack_fildes) | 446 | int hpux_pipe(int *kstack_fildes) |
447 | { | 447 | { |
448 | int error; | 448 | return do_pipe_flags(kstack_fildes, 0); |
449 | |||
450 | lock_kernel(); | ||
451 | error = do_pipe_flags(kstack_fildes, 0); | ||
452 | unlock_kernel(); | ||
453 | return error; | ||
454 | } | 449 | } |
455 | 450 | ||
456 | /* lies - says it works, but it really didn't lock anything */ | 451 | /* lies - says it works, but it really didn't lock anything */ |
diff --git a/arch/parisc/include/asm/asm-offsets.h b/arch/parisc/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/parisc/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
#include <generated/asm-offsets.h> | |||
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 8bc9e96699b2..716634d1f546 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -27,19 +27,19 @@ | |||
27 | # define ATOMIC_HASH_SIZE 4 | 27 | # define ATOMIC_HASH_SIZE 4 |
28 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 28 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
29 | 29 | ||
30 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 30 | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
31 | 31 | ||
32 | /* Can't use raw_spin_lock_irq because of #include problems, so | 32 | /* Can't use raw_spin_lock_irq because of #include problems, so |
33 | * this is the substitute */ | 33 | * this is the substitute */ |
34 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 34 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
35 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | 35 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
36 | local_irq_save(f); \ | 36 | local_irq_save(f); \ |
37 | __raw_spin_lock(s); \ | 37 | arch_spin_lock(s); \ |
38 | } while(0) | 38 | } while(0) |
39 | 39 | ||
40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
41 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | 41 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
42 | __raw_spin_unlock(s); \ | 42 | arch_spin_unlock(s); \ |
43 | local_irq_restore(f); \ | 43 | local_irq_restore(f); \ |
44 | } while(0) | 44 | } while(0) |
45 | 45 | ||
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index 8cfc553fc837..75e46c557a16 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h | |||
@@ -32,14 +32,14 @@ | |||
32 | "\t.popsection" \ | 32 | "\t.popsection" \ |
33 | : : "i" (__FILE__), "i" (__LINE__), \ | 33 | : : "i" (__FILE__), "i" (__LINE__), \ |
34 | "i" (0), "i" (sizeof(struct bug_entry)) ); \ | 34 | "i" (0), "i" (sizeof(struct bug_entry)) ); \ |
35 | for(;;) ; \ | 35 | unreachable(); \ |
36 | } while(0) | 36 | } while(0) |
37 | 37 | ||
38 | #else | 38 | #else |
39 | #define BUG() \ | 39 | #define BUG() \ |
40 | do { \ | 40 | do { \ |
41 | asm volatile(PARISC_BUG_BREAK_ASM : : ); \ | 41 | asm volatile(PARISC_BUG_BREAK_ASM : : ); \ |
42 | for(;;) ; \ | 42 | unreachable(); \ |
43 | } while(0) | 43 | } while(0) |
44 | #endif | 44 | #endif |
45 | 45 | ||
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 724395143f26..7a73b615c23d 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -42,6 +42,7 @@ void flush_cache_mm(struct mm_struct *mm); | |||
42 | #define flush_cache_vmap(start, end) flush_cache_all() | 42 | #define flush_cache_vmap(start, end) flush_cache_all() |
43 | #define flush_cache_vunmap(start, end) flush_cache_all() | 43 | #define flush_cache_vunmap(start, end) flush_cache_all() |
44 | 44 | ||
45 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | ||
45 | extern void flush_dcache_page(struct page *page); | 46 | extern void flush_dcache_page(struct page *page); |
46 | 47 | ||
47 | #define flush_dcache_mmap_lock(mapping) \ | 48 | #define flush_dcache_mmap_lock(mapping) \ |
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 9c802eb4be84..19f6cb1a4a1c 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h | |||
@@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */ | |||
328 | such function. */ | 328 | such function. */ |
329 | #define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0 | 329 | #define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0 |
330 | 330 | ||
331 | #define USE_ELF_CORE_DUMP | ||
332 | #define ELF_EXEC_PAGESIZE 4096 | 331 | #define ELF_EXEC_PAGESIZE 4096 |
333 | 332 | ||
334 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | 333 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical |
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h index 5f39d5597ced..f357fc693c89 100644 --- a/arch/parisc/include/asm/fcntl.h +++ b/arch/parisc/include/asm/fcntl.h | |||
@@ -1,14 +1,13 @@ | |||
1 | #ifndef _PARISC_FCNTL_H | 1 | #ifndef _PARISC_FCNTL_H |
2 | #define _PARISC_FCNTL_H | 2 | #define _PARISC_FCNTL_H |
3 | 3 | ||
4 | /* open/fcntl - O_SYNC is only implemented on blocks devices and on files | ||
5 | located on an ext2 file system */ | ||
6 | #define O_APPEND 000000010 | 4 | #define O_APPEND 000000010 |
7 | #define O_BLKSEEK 000000100 /* HPUX only */ | 5 | #define O_BLKSEEK 000000100 /* HPUX only */ |
8 | #define O_CREAT 000000400 /* not fcntl */ | 6 | #define O_CREAT 000000400 /* not fcntl */ |
9 | #define O_EXCL 000002000 /* not fcntl */ | 7 | #define O_EXCL 000002000 /* not fcntl */ |
10 | #define O_LARGEFILE 000004000 | 8 | #define O_LARGEFILE 000004000 |
11 | #define O_SYNC 000100000 | 9 | #define __O_SYNC 000100000 |
10 | #define O_SYNC (__O_SYNC|O_DSYNC) | ||
12 | #define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */ | 11 | #define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */ |
13 | #define O_NOCTTY 000400000 /* not fcntl */ | 12 | #define O_NOCTTY 000400000 /* not fcntl */ |
14 | #define O_DSYNC 001000000 /* HPUX only */ | 13 | #define O_DSYNC 001000000 /* HPUX only */ |
@@ -28,8 +27,6 @@ | |||
28 | #define F_SETOWN 12 /* for sockets. */ | 27 | #define F_SETOWN 12 /* for sockets. */ |
29 | #define F_SETSIG 13 /* for sockets. */ | 28 | #define F_SETSIG 13 /* for sockets. */ |
30 | #define F_GETSIG 14 /* for sockets. */ | 29 | #define F_GETSIG 14 /* for sockets. */ |
31 | #define F_GETOWN_EX 15 | ||
32 | #define F_SETOWN_EX 16 | ||
33 | 30 | ||
34 | /* for posix fcntl() and lockf() */ | 31 | /* for posix fcntl() and lockf() */ |
35 | #define F_RDLCK 01 | 32 | #define F_RDLCK 01 |
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index de3fe3a18229..6fec4d4a1a18 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h | |||
@@ -21,9 +21,9 @@ | |||
21 | #define KERNEL_MAP_END (TMPALIAS_MAP_START) | 21 | #define KERNEL_MAP_END (TMPALIAS_MAP_START) |
22 | 22 | ||
23 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
24 | extern void *vmalloc_start; | 24 | extern void *parisc_vmalloc_start; |
25 | #define PCXL_DMA_MAP_SIZE (8*1024*1024) | 25 | #define PCXL_DMA_MAP_SIZE (8*1024*1024) |
26 | #define VMALLOC_START ((unsigned long)vmalloc_start) | 26 | #define VMALLOC_START ((unsigned long)parisc_vmalloc_start) |
27 | #define VMALLOC_END (KERNEL_MAP_END) | 27 | #define VMALLOC_END (KERNEL_MAP_END) |
28 | #endif /*__ASSEMBLY__*/ | 28 | #endif /*__ASSEMBLY__*/ |
29 | 29 | ||
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h index 2fa05dd6aeee..72c0fafaa039 100644 --- a/arch/parisc/include/asm/ftrace.h +++ b/arch/parisc/include/asm/ftrace.h | |||
@@ -20,6 +20,20 @@ struct ftrace_ret_stack { | |||
20 | * Defined in entry.S | 20 | * Defined in entry.S |
21 | */ | 21 | */ |
22 | extern void return_to_handler(void); | 22 | extern void return_to_handler(void); |
23 | |||
24 | |||
25 | extern unsigned long return_address(unsigned int); | ||
26 | |||
27 | #define HAVE_ARCH_CALLER_ADDR | ||
28 | |||
29 | #define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | ||
30 | #define CALLER_ADDR1 return_address(1) | ||
31 | #define CALLER_ADDR2 return_address(2) | ||
32 | #define CALLER_ADDR3 return_address(3) | ||
33 | #define CALLER_ADDR4 return_address(4) | ||
34 | #define CALLER_ADDR5 return_address(5) | ||
35 | #define CALLER_ADDR6 return_address(6) | ||
36 | |||
23 | #endif /* __ASSEMBLY__ */ | 37 | #endif /* __ASSEMBLY__ */ |
24 | 38 | ||
25 | #endif /* _ASM_PARISC_FTRACE_H */ | 39 | #endif /* _ASM_PARISC_FTRACE_H */ |
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index ce93133d5112..0d68184a76cb 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h | |||
@@ -1,29 +1,11 @@ | |||
1 | /* hardirq.h: PA-RISC hard IRQ support. | 1 | /* hardirq.h: PA-RISC hard IRQ support. |
2 | * | 2 | * |
3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> | 3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> |
4 | * | ||
5 | * The locking is really quite interesting. There's a cpu-local | ||
6 | * count of how many interrupts are being handled, and a global | ||
7 | * lock. An interrupt can only be serviced if the global lock | ||
8 | * is free. You can't be sure no more interrupts are being | ||
9 | * serviced until you've acquired the lock and then checked | ||
10 | * all the per-cpu interrupt counts are all zero. It's a specialised | ||
11 | * br_lock, and that's exactly how Sparc does it. We don't because | ||
12 | * it's more locking for us. This way is lock-free in the interrupt path. | ||
13 | */ | 4 | */ |
14 | 5 | ||
15 | #ifndef _PARISC_HARDIRQ_H | 6 | #ifndef _PARISC_HARDIRQ_H |
16 | #define _PARISC_HARDIRQ_H | 7 | #define _PARISC_HARDIRQ_H |
17 | 8 | ||
18 | #include <linux/threads.h> | 9 | #include <asm-generic/hardirq.h> |
19 | #include <linux/irq.h> | ||
20 | |||
21 | typedef struct { | ||
22 | unsigned long __softirq_pending; /* set_bit is used on this */ | ||
23 | } ____cacheline_aligned irq_cpustat_t; | ||
24 | |||
25 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
26 | |||
27 | void ack_bad_irq(unsigned int irq); | ||
28 | 10 | ||
29 | #endif /* _PARISC_HARDIRQ_H */ | 11 | #endif /* _PARISC_HARDIRQ_H */ |
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 302f68dc889c..aead40b16dd8 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h | |||
@@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task); | |||
59 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) | 59 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) |
60 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) | 60 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) |
61 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) | 61 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) |
62 | #define user_stack_pointer(regs) ((regs)->gr[30]) | ||
62 | unsigned long profile_pc(struct pt_regs *); | 63 | unsigned long profile_pc(struct pt_regs *); |
63 | extern void show_regs(struct pt_regs *); | 64 | extern void show_regs(struct pt_regs *); |
64 | #endif | 65 | |
66 | |||
67 | #endif /* __KERNEL__ */ | ||
65 | 68 | ||
66 | #endif | 69 | #endif |
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index 960b1e5d8e16..225b7d6a1a0a 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h | |||
@@ -59,6 +59,8 @@ | |||
59 | #define SO_TIMESTAMPING 0x4020 | 59 | #define SO_TIMESTAMPING 0x4020 |
60 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 60 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
61 | 61 | ||
62 | #define SO_RXQ_OVFL 0x4021 | ||
63 | |||
62 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we | 64 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we |
63 | * have to define SOCK_NONBLOCK to a different value here. | 65 | * have to define SOCK_NONBLOCK to a different value here. |
64 | */ | 66 | */ |
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fae03e136fa8..74036f436a3b 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -5,17 +5,17 @@ | |||
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | 6 | #include <asm/spinlock_types.h> |
7 | 7 | ||
8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 8 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
9 | { | 9 | { |
10 | volatile unsigned int *a = __ldcw_align(x); | 10 | volatile unsigned int *a = __ldcw_align(x); |
11 | return *a == 0; | 11 | return *a == 0; |
12 | } | 12 | } |
13 | 13 | ||
14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | 14 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
15 | #define __raw_spin_unlock_wait(x) \ | 15 | #define arch_spin_unlock_wait(x) \ |
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 16 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
17 | 17 | ||
18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | 18 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
19 | unsigned long flags) | 19 | unsigned long flags) |
20 | { | 20 | { |
21 | volatile unsigned int *a; | 21 | volatile unsigned int *a; |
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | |||
33 | mb(); | 33 | mb(); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) | 36 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
37 | { | 37 | { |
38 | volatile unsigned int *a; | 38 | volatile unsigned int *a; |
39 | mb(); | 39 | mb(); |
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x) | |||
42 | mb(); | 42 | mb(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) | 45 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
46 | { | 46 | { |
47 | volatile unsigned int *a; | 47 | volatile unsigned int *a; |
48 | int ret; | 48 | int ret; |
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) | |||
69 | 69 | ||
70 | /* Note that we have to ensure interrupts are disabled in case we're | 70 | /* Note that we have to ensure interrupts are disabled in case we're |
71 | * interrupted by some other code that wants to grab the same read lock */ | 71 | * interrupted by some other code that wants to grab the same read lock */ |
72 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | 72 | static __inline__ void arch_read_lock(arch_rwlock_t *rw) |
73 | { | 73 | { |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | local_irq_save(flags); | 75 | local_irq_save(flags); |
76 | __raw_spin_lock_flags(&rw->lock, flags); | 76 | arch_spin_lock_flags(&rw->lock, flags); |
77 | rw->counter++; | 77 | rw->counter++; |
78 | __raw_spin_unlock(&rw->lock); | 78 | arch_spin_unlock(&rw->lock); |
79 | local_irq_restore(flags); | 79 | local_irq_restore(flags); |
80 | } | 80 | } |
81 | 81 | ||
82 | /* Note that we have to ensure interrupts are disabled in case we're | 82 | /* Note that we have to ensure interrupts are disabled in case we're |
83 | * interrupted by some other code that wants to grab the same read lock */ | 83 | * interrupted by some other code that wants to grab the same read lock */ |
84 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | 84 | static __inline__ void arch_read_unlock(arch_rwlock_t *rw) |
85 | { | 85 | { |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | __raw_spin_lock_flags(&rw->lock, flags); | 88 | arch_spin_lock_flags(&rw->lock, flags); |
89 | rw->counter--; | 89 | rw->counter--; |
90 | __raw_spin_unlock(&rw->lock); | 90 | arch_spin_unlock(&rw->lock); |
91 | local_irq_restore(flags); | 91 | local_irq_restore(flags); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Note that we have to ensure interrupts are disabled in case we're | 94 | /* Note that we have to ensure interrupts are disabled in case we're |
95 | * interrupted by some other code that wants to grab the same read lock */ | 95 | * interrupted by some other code that wants to grab the same read lock */ |
96 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | 96 | static __inline__ int arch_read_trylock(arch_rwlock_t *rw) |
97 | { | 97 | { |
98 | unsigned long flags; | 98 | unsigned long flags; |
99 | retry: | 99 | retry: |
100 | local_irq_save(flags); | 100 | local_irq_save(flags); |
101 | if (__raw_spin_trylock(&rw->lock)) { | 101 | if (arch_spin_trylock(&rw->lock)) { |
102 | rw->counter++; | 102 | rw->counter++; |
103 | __raw_spin_unlock(&rw->lock); | 103 | arch_spin_unlock(&rw->lock); |
104 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
105 | return 1; | 105 | return 1; |
106 | } | 106 | } |
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | /* Wait until we have a realistic chance at the lock */ | 113 | /* Wait until we have a realistic chance at the lock */ |
114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) | 114 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
115 | cpu_relax(); | 115 | cpu_relax(); |
116 | 116 | ||
117 | goto retry; | 117 | goto retry; |
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
119 | 119 | ||
120 | /* Note that we have to ensure interrupts are disabled in case we're | 120 | /* Note that we have to ensure interrupts are disabled in case we're |
121 | * interrupted by some other code that wants to read_trylock() this lock */ | 121 | * interrupted by some other code that wants to read_trylock() this lock */ |
122 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | 122 | static __inline__ void arch_write_lock(arch_rwlock_t *rw) |
123 | { | 123 | { |
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | retry: | 125 | retry: |
126 | local_irq_save(flags); | 126 | local_irq_save(flags); |
127 | __raw_spin_lock_flags(&rw->lock, flags); | 127 | arch_spin_lock_flags(&rw->lock, flags); |
128 | 128 | ||
129 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
130 | __raw_spin_unlock(&rw->lock); | 130 | arch_spin_unlock(&rw->lock); |
131 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
132 | 132 | ||
133 | while (rw->counter != 0) | 133 | while (rw->counter != 0) |
@@ -141,27 +141,27 @@ retry: | |||
141 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
142 | } | 142 | } |
143 | 143 | ||
144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 144 | static __inline__ void arch_write_unlock(arch_rwlock_t *rw) |
145 | { | 145 | { |
146 | rw->counter = 0; | 146 | rw->counter = 0; |
147 | __raw_spin_unlock(&rw->lock); | 147 | arch_spin_unlock(&rw->lock); |
148 | } | 148 | } |
149 | 149 | ||
150 | /* Note that we have to ensure interrupts are disabled in case we're | 150 | /* Note that we have to ensure interrupts are disabled in case we're |
151 | * interrupted by some other code that wants to read_trylock() this lock */ | 151 | * interrupted by some other code that wants to read_trylock() this lock */ |
152 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | 152 | static __inline__ int arch_write_trylock(arch_rwlock_t *rw) |
153 | { | 153 | { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | int result = 0; | 155 | int result = 0; |
156 | 156 | ||
157 | local_irq_save(flags); | 157 | local_irq_save(flags); |
158 | if (__raw_spin_trylock(&rw->lock)) { | 158 | if (arch_spin_trylock(&rw->lock)) { |
159 | if (rw->counter == 0) { | 159 | if (rw->counter == 0) { |
160 | rw->counter = -1; | 160 | rw->counter = -1; |
161 | result = 1; | 161 | result = 1; |
162 | } else { | 162 | } else { |
163 | /* Read-locked. Oh well. */ | 163 | /* Read-locked. Oh well. */ |
164 | __raw_spin_unlock(&rw->lock); | 164 | arch_spin_unlock(&rw->lock); |
165 | } | 165 | } |
166 | } | 166 | } |
167 | local_irq_restore(flags); | 167 | local_irq_restore(flags); |
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | |||
173 | * read_can_lock - would read_trylock() succeed? | 173 | * read_can_lock - would read_trylock() succeed? |
174 | * @lock: the rwlock in question. | 174 | * @lock: the rwlock in question. |
175 | */ | 175 | */ |
176 | static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | 176 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) |
177 | { | 177 | { |
178 | return rw->counter >= 0; | 178 | return rw->counter >= 0; |
179 | } | 179 | } |
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | |||
182 | * write_can_lock - would write_trylock() succeed? | 182 | * write_can_lock - would write_trylock() succeed? |
183 | * @lock: the rwlock in question. | 183 | * @lock: the rwlock in question. |
184 | */ | 184 | */ |
185 | static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | 185 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) |
186 | { | 186 | { |
187 | return !rw->counter; | 187 | return !rw->counter; |
188 | } | 188 | } |
189 | 189 | ||
190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 190 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 191 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
192 | 192 | ||
193 | #define _raw_spin_relax(lock) cpu_relax() | 193 | #define arch_spin_relax(lock) cpu_relax() |
194 | #define _raw_read_relax(lock) cpu_relax() | 194 | #define arch_read_relax(lock) cpu_relax() |
195 | #define _raw_write_relax(lock) cpu_relax() | 195 | #define arch_write_relax(lock) cpu_relax() |
196 | 196 | ||
197 | #endif /* __ASM_SPINLOCK_H */ | 197 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 3f72f47cf4b2..8c373aa28a86 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h | |||
@@ -4,18 +4,18 @@ | |||
4 | typedef struct { | 4 | typedef struct { |
5 | #ifdef CONFIG_PA20 | 5 | #ifdef CONFIG_PA20 |
6 | volatile unsigned int slock; | 6 | volatile unsigned int slock; |
7 | # define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 7 | # define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
8 | #else | 8 | #else |
9 | volatile unsigned int lock[4]; | 9 | volatile unsigned int lock[4]; |
10 | # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | 10 | # define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } |
11 | #endif | 11 | #endif |
12 | } raw_spinlock_t; | 12 | } arch_spinlock_t; |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | raw_spinlock_t lock; | 15 | arch_spinlock_t lock; |
16 | volatile int counter; | 16 | volatile int counter; |
17 | } raw_rwlock_t; | 17 | } arch_rwlock_t; |
18 | 18 | ||
19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | 19 | #define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h new file mode 100644 index 000000000000..8bdfd2c8c39f --- /dev/null +++ b/arch/parisc/include/asm/syscall.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* syscall.h */ | ||
2 | |||
3 | #ifndef _ASM_PARISC_SYSCALL_H_ | ||
4 | #define _ASM_PARISC_SYSCALL_H_ | ||
5 | |||
6 | #include <linux/err.h> | ||
7 | #include <asm/ptrace.h> | ||
8 | |||
9 | static inline long syscall_get_nr(struct task_struct *tsk, | ||
10 | struct pt_regs *regs) | ||
11 | { | ||
12 | return regs->gr[20]; | ||
13 | } | ||
14 | |||
15 | static inline void syscall_get_arguments(struct task_struct *tsk, | ||
16 | struct pt_regs *regs, unsigned int i, | ||
17 | unsigned int n, unsigned long *args) | ||
18 | { | ||
19 | BUG_ON(i); | ||
20 | |||
21 | switch (n) { | ||
22 | case 6: | ||
23 | args[5] = regs->gr[21]; | ||
24 | case 5: | ||
25 | args[4] = regs->gr[22]; | ||
26 | case 4: | ||
27 | args[3] = regs->gr[23]; | ||
28 | case 3: | ||
29 | args[2] = regs->gr[24]; | ||
30 | case 2: | ||
31 | args[1] = regs->gr[25]; | ||
32 | case 1: | ||
33 | args[0] = regs->gr[26]; | ||
34 | break; | ||
35 | default: | ||
36 | BUG(); | ||
37 | } | ||
38 | } | ||
39 | |||
40 | #endif /*_ASM_PARISC_SYSCALL_H_*/ | ||
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index ac775a76bff7..7ecc1039cfed 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -32,6 +32,11 @@ struct thread_info { | |||
32 | #define init_thread_info (init_thread_union.thread_info) | 32 | #define init_thread_info (init_thread_union.thread_info) |
33 | #define init_stack (init_thread_union.stack) | 33 | #define init_stack (init_thread_union.stack) |
34 | 34 | ||
35 | /* how to get the thread information struct from C */ | ||
36 | #define current_thread_info() ((struct thread_info *)mfctl(30)) | ||
37 | |||
38 | #endif /* !__ASSEMBLY */ | ||
39 | |||
35 | /* thread information allocation */ | 40 | /* thread information allocation */ |
36 | 41 | ||
37 | #define THREAD_SIZE_ORDER 2 | 42 | #define THREAD_SIZE_ORDER 2 |
@@ -40,11 +45,6 @@ struct thread_info { | |||
40 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | 45 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
41 | #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) | 46 | #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) |
42 | 47 | ||
43 | /* how to get the thread information struct from C */ | ||
44 | #define current_thread_info() ((struct thread_info *)mfctl(30)) | ||
45 | |||
46 | #endif /* !__ASSEMBLY */ | ||
47 | |||
48 | #define PREEMPT_ACTIVE_BIT 28 | 48 | #define PREEMPT_ACTIVE_BIT 28 |
49 | #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) | 49 | #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) |
50 | 50 | ||
@@ -60,6 +60,8 @@ struct thread_info { | |||
60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ |
61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ |
62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | 62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ |
63 | #define TIF_SINGLESTEP 9 /* single stepping? */ | ||
64 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ | ||
63 | 65 | ||
64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 66 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 67 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -69,6 +71,8 @@ struct thread_info { | |||
69 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 71 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
70 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 72 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 73 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
74 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | ||
75 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | ||
72 | 76 | ||
73 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ | 77 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
74 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | 78 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) |
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 699cf8ef2118..ec787b411e9a 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
@@ -244,9 +244,6 @@ int main(void) | |||
244 | DEFINE(THREAD_SZ, sizeof(struct thread_info)); | 244 | DEFINE(THREAD_SZ, sizeof(struct thread_info)); |
245 | DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); | 245 | DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); |
246 | BLANK(); | 246 | BLANK(); |
247 | DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending)); | ||
248 | DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t)); | ||
249 | BLANK(); | ||
250 | DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); | 247 | DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); |
251 | DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); | 248 | DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); |
252 | DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count)); | 249 | DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count)); |
@@ -270,8 +267,8 @@ int main(void) | |||
270 | DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); | 267 | DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); |
271 | DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); | 268 | DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); |
272 | BLANK(); | 269 | BLANK(); |
273 | DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); | 270 | DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); |
274 | DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); | 271 | DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); |
275 | BLANK(); | 272 | BLANK(); |
276 | DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); | 273 | DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); |
277 | DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); | 274 | DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 8c4712b74dc1..3a44f7f704fa 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -2047,12 +2047,13 @@ syscall_do_signal: | |||
2047 | b,n syscall_check_sig | 2047 | b,n syscall_check_sig |
2048 | 2048 | ||
2049 | syscall_restore: | 2049 | syscall_restore: |
2050 | /* Are we being ptraced? */ | ||
2051 | LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 | 2050 | LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 |
2052 | 2051 | ||
2053 | ldw TASK_PTRACE(%r1), %r19 | 2052 | /* Are we being ptraced? */ |
2054 | bb,< %r19,31,syscall_restore_rfi | 2053 | ldw TASK_FLAGS(%r1),%r19 |
2055 | nop | 2054 | ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 |
2055 | and,COND(=) %r19,%r2,%r0 | ||
2056 | b,n syscall_restore_rfi | ||
2056 | 2057 | ||
2057 | ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ | 2058 | ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ |
2058 | rest_fp %r19 | 2059 | rest_fp %r19 |
@@ -2113,16 +2114,16 @@ syscall_restore_rfi: | |||
2113 | ldi 0x0b,%r20 /* Create new PSW */ | 2114 | ldi 0x0b,%r20 /* Create new PSW */ |
2114 | depi -1,13,1,%r20 /* C, Q, D, and I bits */ | 2115 | depi -1,13,1,%r20 /* C, Q, D, and I bits */ |
2115 | 2116 | ||
2116 | /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are | 2117 | /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are |
2117 | * set in include/linux/ptrace.h and converted to PA bitmap | 2118 | * set in thread_info.h and converted to PA bitmap |
2118 | * numbers in asm-offsets.c */ | 2119 | * numbers in asm-offsets.c */ |
2119 | 2120 | ||
2120 | /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ | 2121 | /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ |
2121 | extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 | 2122 | extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 |
2122 | depi -1,27,1,%r20 /* R bit */ | 2123 | depi -1,27,1,%r20 /* R bit */ |
2123 | 2124 | ||
2124 | /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ | 2125 | /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ |
2125 | extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 | 2126 | extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 |
2126 | depi -1,7,1,%r20 /* T bit */ | 2127 | depi -1,7,1,%r20 /* T bit */ |
2127 | 2128 | ||
2128 | STREG %r20,TASK_PT_PSW(%r1) | 2129 | STREG %r20,TASK_PT_PSW(%r1) |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 330f536a9324..efbcee5d2220 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -145,7 +145,7 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) | |||
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | static struct irq_chip cpu_interrupt_type = { | 147 | static struct irq_chip cpu_interrupt_type = { |
148 | .typename = "CPU", | 148 | .name = "CPU", |
149 | .startup = cpu_startup_irq, | 149 | .startup = cpu_startup_irq, |
150 | .shutdown = cpu_disable_irq, | 150 | .shutdown = cpu_disable_irq, |
151 | .enable = cpu_enable_irq, | 151 | .enable = cpu_enable_irq, |
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
180 | if (i < NR_IRQS) { | 180 | if (i < NR_IRQS) { |
181 | struct irqaction *action; | 181 | struct irqaction *action; |
182 | 182 | ||
183 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 183 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
184 | action = irq_desc[i].action; | 184 | action = irq_desc[i].action; |
185 | if (!action) | 185 | if (!action) |
186 | goto skip; | 186 | goto skip; |
@@ -192,7 +192,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
192 | seq_printf(p, "%10u ", kstat_irqs(i)); | 192 | seq_printf(p, "%10u ", kstat_irqs(i)); |
193 | #endif | 193 | #endif |
194 | 194 | ||
195 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 195 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
196 | #ifndef PARISC_IRQ_CR16_COUNTS | 196 | #ifndef PARISC_IRQ_CR16_COUNTS |
197 | seq_printf(p, " %s", action->name); | 197 | seq_printf(p, " %s", action->name); |
198 | 198 | ||
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
224 | 224 | ||
225 | seq_putc(p, '\n'); | 225 | seq_putc(p, '\n'); |
226 | skip: | 226 | skip: |
227 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 227 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
228 | } | 228 | } |
229 | 229 | ||
230 | return 0; | 230 | return 0; |
@@ -423,8 +423,3 @@ void __init init_IRQ(void) | |||
423 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ | 423 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ |
424 | 424 | ||
425 | } | 425 | } |
426 | |||
427 | void ack_bad_irq(unsigned int irq) | ||
428 | { | ||
429 | printk(KERN_WARNING "unexpected IRQ %d\n", irq); | ||
430 | } | ||
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 61ee0eec4e69..212074653df7 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
@@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
893 | * ourselves */ | 893 | * ourselves */ |
894 | for (i = 1; i < hdr->e_shnum; i++) { | 894 | for (i = 1; i < hdr->e_shnum; i++) { |
895 | if(sechdrs[i].sh_type == SHT_SYMTAB | 895 | if(sechdrs[i].sh_type == SHT_SYMTAB |
896 | && (sechdrs[i].sh_type & SHF_ALLOC)) { | 896 | && (sechdrs[i].sh_flags & SHF_ALLOC)) { |
897 | int strindex = sechdrs[i].sh_link; | 897 | int strindex = sechdrs[i].sh_link; |
898 | /* FIXME: AWFUL HACK | 898 | /* FIXME: AWFUL HACK |
899 | * The cast is to drop the const from | 899 | * The cast is to drop the const from |
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index 75099efb3bf3..f9f6783e4bdd 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c | |||
@@ -24,7 +24,7 @@ | |||
24 | * | 24 | * |
25 | * This driver programs the PCX-U/PCX-W performance counters | 25 | * This driver programs the PCX-U/PCX-W performance counters |
26 | * on the PA-RISC 2.0 chips. The driver keeps all images now | 26 | * on the PA-RISC 2.0 chips. The driver keeps all images now |
27 | * internally to the kernel to hopefully eliminate the possiblity | 27 | * internally to the kernel to hopefully eliminate the possibility |
28 | * of a bad image halting the CPU. Also, there are different | 28 | * of a bad image halting the CPU. Also, there are different |
29 | * images for the PCX-W and later chips vs the PCX-U chips. | 29 | * images for the PCX-W and later chips vs the PCX-U chips. |
30 | * | 30 | * |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 927db3668b6f..c4f49e45129d 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
16 | #include <linux/tracehook.h> | ||
16 | #include <linux/user.h> | 17 | #include <linux/user.h> |
17 | #include <linux/personality.h> | 18 | #include <linux/personality.h> |
18 | #include <linux/security.h> | 19 | #include <linux/security.h> |
@@ -35,7 +36,8 @@ | |||
35 | */ | 36 | */ |
36 | void ptrace_disable(struct task_struct *task) | 37 | void ptrace_disable(struct task_struct *task) |
37 | { | 38 | { |
38 | task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); | 39 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); |
40 | clear_tsk_thread_flag(task, TIF_BLOCKSTEP); | ||
39 | 41 | ||
40 | /* make sure the trap bits are not set */ | 42 | /* make sure the trap bits are not set */ |
41 | pa_psw(task)->r = 0; | 43 | pa_psw(task)->r = 0; |
@@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task) | |||
55 | 57 | ||
56 | void user_enable_single_step(struct task_struct *task) | 58 | void user_enable_single_step(struct task_struct *task) |
57 | { | 59 | { |
58 | task->ptrace &= ~PT_BLOCKSTEP; | 60 | clear_tsk_thread_flag(task, TIF_BLOCKSTEP); |
59 | task->ptrace |= PT_SINGLESTEP; | 61 | set_tsk_thread_flag(task, TIF_SINGLESTEP); |
60 | 62 | ||
61 | if (pa_psw(task)->n) { | 63 | if (pa_psw(task)->n) { |
62 | struct siginfo si; | 64 | struct siginfo si; |
@@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task) | |||
98 | 100 | ||
99 | void user_enable_block_step(struct task_struct *task) | 101 | void user_enable_block_step(struct task_struct *task) |
100 | { | 102 | { |
101 | task->ptrace &= ~PT_SINGLESTEP; | 103 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); |
102 | task->ptrace |= PT_BLOCKSTEP; | 104 | set_tsk_thread_flag(task, TIF_BLOCKSTEP); |
103 | 105 | ||
104 | /* Enable taken branch trap. */ | 106 | /* Enable taken branch trap. */ |
105 | pa_psw(task)->r = 0; | 107 | pa_psw(task)->r = 0; |
@@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
263 | } | 265 | } |
264 | #endif | 266 | #endif |
265 | 267 | ||
268 | long do_syscall_trace_enter(struct pt_regs *regs) | ||
269 | { | ||
270 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | ||
271 | tracehook_report_syscall_entry(regs)) | ||
272 | return -1L; | ||
273 | |||
274 | return regs->gr[20]; | ||
275 | } | ||
266 | 276 | ||
267 | void syscall_trace(void) | 277 | void do_syscall_trace_exit(struct pt_regs *regs) |
268 | { | 278 | { |
269 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 279 | int stepping = test_thread_flag(TIF_SINGLESTEP) || |
270 | return; | 280 | test_thread_flag(TIF_BLOCKSTEP); |
271 | if (!(current->ptrace & PT_PTRACED)) | 281 | |
272 | return; | 282 | if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) |
273 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | 283 | tracehook_report_syscall_exit(regs, stepping); |
274 | ? 0x80 : 0)); | ||
275 | /* | ||
276 | * this isn't the same as continuing with a signal, but it will do | ||
277 | * for normal use. strace only continues with a signal if the | ||
278 | * stopping signal is not SIGTRAP. -brl | ||
279 | */ | ||
280 | if (current->exit_code) { | ||
281 | send_sig(current->exit_code, current, 1); | ||
282 | current->exit_code = 0; | ||
283 | } | ||
284 | } | 284 | } |
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 8eb3c63c407a..fb37ac52e46c 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
@@ -21,11 +21,11 @@ | |||
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/ptrace.h> | 23 | #include <linux/ptrace.h> |
24 | #include <linux/tracehook.h> | ||
24 | #include <linux/unistd.h> | 25 | #include <linux/unistd.h> |
25 | #include <linux/stddef.h> | 26 | #include <linux/stddef.h> |
26 | #include <linux/compat.h> | 27 | #include <linux/compat.h> |
27 | #include <linux/elf.h> | 28 | #include <linux/elf.h> |
28 | #include <linux/tracehook.h> | ||
29 | #include <asm/ucontext.h> | 29 | #include <asm/ucontext.h> |
30 | #include <asm/rt_sigframe.h> | 30 | #include <asm/rt_sigframe.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -34,7 +34,6 @@ | |||
34 | #include <asm/asm-offsets.h> | 34 | #include <asm/asm-offsets.h> |
35 | 35 | ||
36 | #ifdef CONFIG_COMPAT | 36 | #ifdef CONFIG_COMPAT |
37 | #include <linux/compat.h> | ||
38 | #include "signal32.h" | 37 | #include "signal32.h" |
39 | #endif | 38 | #endif |
40 | 39 | ||
@@ -468,6 +467,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
468 | sigaddset(¤t->blocked,sig); | 467 | sigaddset(¤t->blocked,sig); |
469 | recalc_sigpending(); | 468 | recalc_sigpending(); |
470 | spin_unlock_irq(¤t->sighand->siglock); | 469 | spin_unlock_irq(¤t->sighand->siglock); |
470 | |||
471 | tracehook_signal_handler(sig, info, ka, regs, 0); | ||
472 | |||
471 | return 1; | 473 | return 1; |
472 | } | 474 | } |
473 | 475 | ||
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 1fd0f0cec037..3f2fce8ce6b6 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -60,8 +60,6 @@ static int smp_debug_lvl = 0; | |||
60 | #define smp_debug(lvl, ...) do { } while(0) | 60 | #define smp_debug(lvl, ...) do { } while(0) |
61 | #endif /* DEBUG_SMP */ | 61 | #endif /* DEBUG_SMP */ |
62 | 62 | ||
63 | DEFINE_SPINLOCK(smp_lock); | ||
64 | |||
65 | volatile struct task_struct *smp_init_current_idle_task; | 63 | volatile struct task_struct *smp_init_current_idle_task; |
66 | 64 | ||
67 | /* track which CPU is booting */ | 65 | /* track which CPU is booting */ |
@@ -69,7 +67,7 @@ static volatile int cpu_now_booting __cpuinitdata; | |||
69 | 67 | ||
70 | static int parisc_max_cpus __cpuinitdata = 1; | 68 | static int parisc_max_cpus __cpuinitdata = 1; |
71 | 69 | ||
72 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 70 | static DEFINE_PER_CPU(spinlock_t, ipi_lock); |
73 | 71 | ||
74 | enum ipi_message_type { | 72 | enum ipi_message_type { |
75 | IPI_NOP=0, | 73 | IPI_NOP=0, |
@@ -438,6 +436,11 @@ void __init smp_prepare_boot_cpu(void) | |||
438 | */ | 436 | */ |
439 | void __init smp_prepare_cpus(unsigned int max_cpus) | 437 | void __init smp_prepare_cpus(unsigned int max_cpus) |
440 | { | 438 | { |
439 | int cpu; | ||
440 | |||
441 | for_each_possible_cpu(cpu) | ||
442 | spin_lock_init(&per_cpu(ipi_lock, cpu)); | ||
443 | |||
441 | init_cpu_present(cpumask_of(0)); | 444 | init_cpu_present(cpumask_of(0)); |
442 | 445 | ||
443 | parisc_max_cpus = max_cpus; | 446 | parisc_max_cpus = max_cpus; |
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 71b31957c8f1..9147391afb03 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c | |||
@@ -110,37 +110,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
110 | return addr; | 110 | return addr; |
111 | } | 111 | } |
112 | 112 | ||
113 | static unsigned long do_mmap2(unsigned long addr, unsigned long len, | ||
114 | unsigned long prot, unsigned long flags, unsigned long fd, | ||
115 | unsigned long pgoff) | ||
116 | { | ||
117 | struct file * file = NULL; | ||
118 | unsigned long error = -EBADF; | ||
119 | if (!(flags & MAP_ANONYMOUS)) { | ||
120 | file = fget(fd); | ||
121 | if (!file) | ||
122 | goto out; | ||
123 | } | ||
124 | |||
125 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
126 | |||
127 | down_write(¤t->mm->mmap_sem); | ||
128 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
129 | up_write(¤t->mm->mmap_sem); | ||
130 | |||
131 | if (file != NULL) | ||
132 | fput(file); | ||
133 | out: | ||
134 | return error; | ||
135 | } | ||
136 | |||
137 | asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, | 113 | asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, |
138 | unsigned long prot, unsigned long flags, unsigned long fd, | 114 | unsigned long prot, unsigned long flags, unsigned long fd, |
139 | unsigned long pgoff) | 115 | unsigned long pgoff) |
140 | { | 116 | { |
141 | /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE | 117 | /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE |
142 | we have. */ | 118 | we have. */ |
143 | return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); | 119 | return sys_mmap_pgoff(addr, len, prot, flags, fd, |
120 | pgoff >> (PAGE_SHIFT - 12)); | ||
144 | } | 121 | } |
145 | 122 | ||
146 | asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | 123 | asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, |
@@ -148,7 +125,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | |||
148 | unsigned long offset) | 125 | unsigned long offset) |
149 | { | 126 | { |
150 | if (!(offset & ~PAGE_MASK)) { | 127 | if (!(offset & ~PAGE_MASK)) { |
151 | return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | 128 | return sys_mmap_pgoff(addr, len, prot, flags, fd, |
129 | offset >> PAGE_SHIFT); | ||
152 | } else { | 130 | } else { |
153 | return -EINVAL; | 131 | return -EINVAL; |
154 | } | 132 | } |
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 561388b17c91..9779ece2b070 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c | |||
@@ -26,13 +26,7 @@ | |||
26 | #include <linux/shm.h> | 26 | #include <linux/shm.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/uio.h> | 28 | #include <linux/uio.h> |
29 | #include <linux/nfs_fs.h> | ||
30 | #include <linux/ncp_fs.h> | 29 | #include <linux/ncp_fs.h> |
31 | #include <linux/sunrpc/svc.h> | ||
32 | #include <linux/nfsd/nfsd.h> | ||
33 | #include <linux/nfsd/cache.h> | ||
34 | #include <linux/nfsd/xdr.h> | ||
35 | #include <linux/nfsd/syscall.h> | ||
36 | #include <linux/poll.h> | 30 | #include <linux/poll.h> |
37 | #include <linux/personality.h> | 31 | #include <linux/personality.h> |
38 | #include <linux/stat.h> | 32 | #include <linux/stat.h> |
@@ -90,77 +84,6 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, | |||
90 | return -ENOSYS; | 84 | return -ENOSYS; |
91 | } | 85 | } |
92 | 86 | ||
93 | #ifdef CONFIG_SYSCTL | ||
94 | |||
95 | struct __sysctl_args32 { | ||
96 | u32 name; | ||
97 | int nlen; | ||
98 | u32 oldval; | ||
99 | u32 oldlenp; | ||
100 | u32 newval; | ||
101 | u32 newlen; | ||
102 | u32 __unused[4]; | ||
103 | }; | ||
104 | |||
105 | asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) | ||
106 | { | ||
107 | #ifndef CONFIG_SYSCTL_SYSCALL | ||
108 | return -ENOSYS; | ||
109 | #else | ||
110 | struct __sysctl_args32 tmp; | ||
111 | int error; | ||
112 | unsigned int oldlen32; | ||
113 | size_t oldlen, __user *oldlenp = NULL; | ||
114 | unsigned long addr = (((long __force)&args->__unused[0]) + 7) & ~7; | ||
115 | |||
116 | DBG(("sysctl32(%p)\n", args)); | ||
117 | |||
118 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
119 | return -EFAULT; | ||
120 | |||
121 | if (tmp.oldval && tmp.oldlenp) { | ||
122 | /* Duh, this is ugly and might not work if sysctl_args | ||
123 | is in read-only memory, but do_sysctl does indirectly | ||
124 | a lot of uaccess in both directions and we'd have to | ||
125 | basically copy the whole sysctl.c here, and | ||
126 | glibc's __sysctl uses rw memory for the structure | ||
127 | anyway. */ | ||
128 | /* a possibly better hack than this, which will avoid the | ||
129 | * problem if the struct is read only, is to push the | ||
130 | * 'oldlen' value out to the user's stack instead. -PB | ||
131 | */ | ||
132 | if (get_user(oldlen32, (u32 *)(u64)tmp.oldlenp)) | ||
133 | return -EFAULT; | ||
134 | oldlen = oldlen32; | ||
135 | if (put_user(oldlen, (size_t *)addr)) | ||
136 | return -EFAULT; | ||
137 | oldlenp = (size_t *)addr; | ||
138 | } | ||
139 | |||
140 | lock_kernel(); | ||
141 | error = do_sysctl((int __user *)(u64)tmp.name, tmp.nlen, | ||
142 | (void __user *)(u64)tmp.oldval, oldlenp, | ||
143 | (void __user *)(u64)tmp.newval, tmp.newlen); | ||
144 | unlock_kernel(); | ||
145 | if (oldlenp) { | ||
146 | if (!error) { | ||
147 | if (get_user(oldlen, (size_t *)addr)) { | ||
148 | error = -EFAULT; | ||
149 | } else { | ||
150 | oldlen32 = oldlen; | ||
151 | if (put_user(oldlen32, (u32 *)(u64)tmp.oldlenp)) | ||
152 | error = -EFAULT; | ||
153 | } | ||
154 | } | ||
155 | if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused))) | ||
156 | error = -EFAULT; | ||
157 | } | ||
158 | return error; | ||
159 | #endif | ||
160 | } | ||
161 | |||
162 | #endif /* CONFIG_SYSCTL */ | ||
163 | |||
164 | asmlinkage long sys32_sched_rr_get_interval(pid_t pid, | 87 | asmlinkage long sys32_sched_rr_get_interval(pid_t pid, |
165 | struct compat_timespec __user *interval) | 88 | struct compat_timespec __user *interval) |
166 | { | 89 | { |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 59fc1a43ec3e..f5f96021caa0 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -288,18 +288,23 @@ tracesys: | |||
288 | STREG %r18,PT_GR18(%r2) | 288 | STREG %r18,PT_GR18(%r2) |
289 | /* Finished saving things for the debugger */ | 289 | /* Finished saving things for the debugger */ |
290 | 290 | ||
291 | ldil L%syscall_trace,%r1 | 291 | copy %r2,%r26 |
292 | ldil L%do_syscall_trace_enter,%r1 | ||
292 | ldil L%tracesys_next,%r2 | 293 | ldil L%tracesys_next,%r2 |
293 | be R%syscall_trace(%sr7,%r1) | 294 | be R%do_syscall_trace_enter(%sr7,%r1) |
294 | ldo R%tracesys_next(%r2),%r2 | 295 | ldo R%tracesys_next(%r2),%r2 |
295 | 296 | ||
296 | tracesys_next: | 297 | tracesys_next: |
298 | /* do_syscall_trace_enter either returned the syscallno, or -1L, | ||
299 | * so we skip restoring the PT_GR20 below, since we pulled it from | ||
300 | * task->thread.regs.gr[20] above. | ||
301 | */ | ||
302 | copy %ret0,%r20 | ||
297 | ldil L%sys_call_table,%r1 | 303 | ldil L%sys_call_table,%r1 |
298 | ldo R%sys_call_table(%r1), %r19 | 304 | ldo R%sys_call_table(%r1), %r19 |
299 | 305 | ||
300 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 306 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
301 | LDREG TI_TASK(%r1), %r1 | 307 | LDREG TI_TASK(%r1), %r1 |
302 | LDREG TASK_PT_GR20(%r1), %r20 | ||
303 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ | 308 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ |
304 | LDREG TASK_PT_GR25(%r1), %r25 | 309 | LDREG TASK_PT_GR25(%r1), %r25 |
305 | LDREG TASK_PT_GR24(%r1), %r24 | 310 | LDREG TASK_PT_GR24(%r1), %r24 |
@@ -336,7 +341,8 @@ tracesys_exit: | |||
336 | #ifdef CONFIG_64BIT | 341 | #ifdef CONFIG_64BIT |
337 | ldo -16(%r30),%r29 /* Reference param save area */ | 342 | ldo -16(%r30),%r29 /* Reference param save area */ |
338 | #endif | 343 | #endif |
339 | bl syscall_trace, %r2 | 344 | ldo TASK_REGS(%r1),%r26 |
345 | bl do_syscall_trace_exit,%r2 | ||
340 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ | 346 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ |
341 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 347 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
342 | LDREG TI_TASK(%r1), %r1 | 348 | LDREG TI_TASK(%r1), %r1 |
@@ -353,12 +359,12 @@ tracesys_exit: | |||
353 | 359 | ||
354 | tracesys_sigexit: | 360 | tracesys_sigexit: |
355 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 361 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
356 | LDREG 0(%r1), %r1 | 362 | LDREG TI_TASK(%r1), %r1 |
357 | #ifdef CONFIG_64BIT | 363 | #ifdef CONFIG_64BIT |
358 | ldo -16(%r30),%r29 /* Reference param save area */ | 364 | ldo -16(%r30),%r29 /* Reference param save area */ |
359 | #endif | 365 | #endif |
360 | bl syscall_trace, %r2 | 366 | bl do_syscall_trace_exit,%r2 |
361 | nop | 367 | ldo TASK_REGS(%r1),%r26 |
362 | 368 | ||
363 | ldil L%syscall_exit_rfi,%r1 | 369 | ldil L%syscall_exit_rfi,%r1 |
364 | be,n R%syscall_exit_rfi(%sr7,%r1) | 370 | be,n R%syscall_exit_rfi(%sr7,%r1) |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 843f423dec67..01c4fcf8f481 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -234,7 +234,7 @@ | |||
234 | ENTRY_SAME(getsid) | 234 | ENTRY_SAME(getsid) |
235 | ENTRY_SAME(fdatasync) | 235 | ENTRY_SAME(fdatasync) |
236 | /* struct __sysctl_args is a mess */ | 236 | /* struct __sysctl_args is a mess */ |
237 | ENTRY_DIFF(sysctl) | 237 | ENTRY_COMP(sysctl) |
238 | ENTRY_SAME(mlock) /* 150 */ | 238 | ENTRY_SAME(mlock) /* 150 */ |
239 | ENTRY_SAME(munlock) | 239 | ENTRY_SAME(munlock) |
240 | ENTRY_SAME(mlockall) | 240 | ENTRY_SAME(mlockall) |
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 69dad5a850a8..d58eac1a8288 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/sort.h> | ||
16 | 17 | ||
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <asm/assembly.h> | 19 | #include <asm/assembly.h> |
@@ -28,7 +29,7 @@ | |||
28 | #define dbg(x...) | 29 | #define dbg(x...) |
29 | #endif | 30 | #endif |
30 | 31 | ||
31 | #define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000) | 32 | #define KERNEL_START (KERNEL_BINARY_TEXT_START) |
32 | 33 | ||
33 | extern struct unwind_table_entry __start___unwind[]; | 34 | extern struct unwind_table_entry __start___unwind[]; |
34 | extern struct unwind_table_entry __stop___unwind[]; | 35 | extern struct unwind_table_entry __stop___unwind[]; |
@@ -115,24 +116,18 @@ unwind_table_init(struct unwind_table *table, const char *name, | |||
115 | } | 116 | } |
116 | } | 117 | } |
117 | 118 | ||
119 | static int cmp_unwind_table_entry(const void *a, const void *b) | ||
120 | { | ||
121 | return ((const struct unwind_table_entry *)a)->region_start | ||
122 | - ((const struct unwind_table_entry *)b)->region_start; | ||
123 | } | ||
124 | |||
118 | static void | 125 | static void |
119 | unwind_table_sort(struct unwind_table_entry *start, | 126 | unwind_table_sort(struct unwind_table_entry *start, |
120 | struct unwind_table_entry *finish) | 127 | struct unwind_table_entry *finish) |
121 | { | 128 | { |
122 | struct unwind_table_entry el, *p, *q; | 129 | sort(start, finish - start, sizeof(struct unwind_table_entry), |
123 | 130 | cmp_unwind_table_entry, NULL); | |
124 | for (p = start + 1; p < finish; ++p) { | ||
125 | if (p[0].region_start < p[-1].region_start) { | ||
126 | el = *p; | ||
127 | q = p; | ||
128 | do { | ||
129 | q[0] = q[-1]; | ||
130 | --q; | ||
131 | } while (q > start && | ||
132 | el.region_start < q[-1].region_start); | ||
133 | *q = el; | ||
134 | } | ||
135 | } | ||
136 | } | 131 | } |
137 | 132 | ||
138 | struct unwind_table * | 133 | struct unwind_table * |
@@ -417,3 +412,30 @@ int unwind_to_user(struct unwind_frame_info *info) | |||
417 | 412 | ||
418 | return ret; | 413 | return ret; |
419 | } | 414 | } |
415 | |||
416 | unsigned long return_address(unsigned int level) | ||
417 | { | ||
418 | struct unwind_frame_info info; | ||
419 | struct pt_regs r; | ||
420 | unsigned long sp; | ||
421 | |||
422 | /* initialize unwind info */ | ||
423 | asm volatile ("copy %%r30, %0" : "=r"(sp)); | ||
424 | memset(&r, 0, sizeof(struct pt_regs)); | ||
425 | r.iaoq[0] = (unsigned long) current_text_addr(); | ||
426 | r.gr[2] = (unsigned long) __builtin_return_address(0); | ||
427 | r.gr[30] = sp; | ||
428 | unwind_frame_init(&info, current, &r); | ||
429 | |||
430 | /* unwind stack */ | ||
431 | ++level; | ||
432 | do { | ||
433 | if (unwind_once(&info) < 0 || info.ip == 0) | ||
434 | return 0; | ||
435 | if (!__kernel_text_address(info.ip)) { | ||
436 | return 0; | ||
437 | } | ||
438 | } while (info.ip && level--); | ||
439 | |||
440 | return info.ip; | ||
441 | } | ||
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 775be2791bc2..9dab4a4e09f7 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/cache.h> | 28 | #include <asm/cache.h> |
29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
30 | #include <asm/asm-offsets.h> | 30 | #include <asm/asm-offsets.h> |
31 | #include <asm/thread_info.h> | ||
31 | 32 | ||
32 | /* ld script to make hppa Linux kernel */ | 33 | /* ld script to make hppa Linux kernel */ |
33 | #ifndef CONFIG_64BIT | 34 | #ifndef CONFIG_64BIT |
@@ -77,9 +78,6 @@ SECTIONS | |||
77 | */ | 78 | */ |
78 | . = ALIGN(PAGE_SIZE); | 79 | . = ALIGN(PAGE_SIZE); |
79 | data_start = .; | 80 | data_start = .; |
80 | EXCEPTION_TABLE(16) | ||
81 | |||
82 | NOTES | ||
83 | 81 | ||
84 | /* unwind info */ | 82 | /* unwind info */ |
85 | .PARISC.unwind : { | 83 | .PARISC.unwind : { |
@@ -88,6 +86,9 @@ SECTIONS | |||
88 | __stop___unwind = .; | 86 | __stop___unwind = .; |
89 | } | 87 | } |
90 | 88 | ||
89 | EXCEPTION_TABLE(16) | ||
90 | NOTES | ||
91 | |||
91 | /* Data */ | 92 | /* Data */ |
92 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | 93 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) |
93 | 94 | ||
@@ -134,6 +135,15 @@ SECTIONS | |||
134 | __init_begin = .; | 135 | __init_begin = .; |
135 | INIT_TEXT_SECTION(16384) | 136 | INIT_TEXT_SECTION(16384) |
136 | INIT_DATA_SECTION(16) | 137 | INIT_DATA_SECTION(16) |
138 | /* we have to discard exit text and such at runtime, not link time */ | ||
139 | .exit.text : | ||
140 | { | ||
141 | EXIT_TEXT | ||
142 | } | ||
143 | .exit.data : | ||
144 | { | ||
145 | EXIT_DATA | ||
146 | } | ||
137 | 147 | ||
138 | PERCPU(PAGE_SIZE) | 148 | PERCPU(PAGE_SIZE) |
139 | . = ALIGN(PAGE_SIZE); | 149 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index e3eb739fab19..353963d42059 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include <asm/atomic.h> | 12 | #include <asm/atomic.h> |
13 | 13 | ||
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { | 15 | arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { |
16 | [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED | 16 | [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED |
17 | }; | 17 | }; |
18 | #endif | 18 | #endif |
19 | 19 | ||
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index d5aca31fddbb..13b6e3e59b99 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -434,8 +434,8 @@ void mark_rodata_ro(void) | |||
434 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | 434 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ |
435 | & ~(VM_MAP_OFFSET-1))) | 435 | & ~(VM_MAP_OFFSET-1))) |
436 | 436 | ||
437 | void *vmalloc_start __read_mostly; | 437 | void *parisc_vmalloc_start __read_mostly; |
438 | EXPORT_SYMBOL(vmalloc_start); | 438 | EXPORT_SYMBOL(parisc_vmalloc_start); |
439 | 439 | ||
440 | #ifdef CONFIG_PA11 | 440 | #ifdef CONFIG_PA11 |
441 | unsigned long pcxl_dma_start __read_mostly; | 441 | unsigned long pcxl_dma_start __read_mostly; |
@@ -496,13 +496,14 @@ void __init mem_init(void) | |||
496 | #ifdef CONFIG_PA11 | 496 | #ifdef CONFIG_PA11 |
497 | if (hppa_dma_ops == &pcxl_dma_ops) { | 497 | if (hppa_dma_ops == &pcxl_dma_ops) { |
498 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); | 498 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); |
499 | vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); | 499 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
500 | + PCXL_DMA_MAP_SIZE); | ||
500 | } else { | 501 | } else { |
501 | pcxl_dma_start = 0; | 502 | pcxl_dma_start = 0; |
502 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 503 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
503 | } | 504 | } |
504 | #else | 505 | #else |
505 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 506 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
506 | #endif | 507 | #endif |
507 | 508 | ||
508 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", | 509 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", |