diff options
| author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
|---|---|---|
| committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
| commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
| tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/m32r | |
| parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
| parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) | |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/m32r')
| -rw-r--r-- | arch/m32r/include/asm/cacheflush.h | 3 | ||||
| -rw-r--r-- | arch/m32r/include/asm/elf.h | 1 | ||||
| -rw-r--r-- | arch/m32r/include/asm/local.h | 25 | ||||
| -rw-r--r-- | arch/m32r/include/asm/ptrace.h | 2 | ||||
| -rw-r--r-- | arch/m32r/include/asm/socket.h | 2 | ||||
| -rw-r--r-- | arch/m32r/include/asm/spinlock.h | 48 | ||||
| -rw-r--r-- | arch/m32r/include/asm/spinlock_types.h | 8 | ||||
| -rw-r--r-- | arch/m32r/include/asm/tlbflush.h | 2 | ||||
| -rw-r--r-- | arch/m32r/include/asm/unistd.h | 1 | ||||
| -rw-r--r-- | arch/m32r/kernel/irq.c | 4 | ||||
| -rw-r--r-- | arch/m32r/kernel/process.c | 2 | ||||
| -rw-r--r-- | arch/m32r/kernel/ptrace.c | 97 | ||||
| -rw-r--r-- | arch/m32r/kernel/sys_m32r.c | 116 | ||||
| -rw-r--r-- | arch/m32r/kernel/syscall_table.S | 2 | ||||
| -rw-r--r-- | arch/m32r/mm/fault-nommu.c | 2 | ||||
| -rw-r--r-- | arch/m32r/mm/fault.c | 6 | ||||
| -rw-r--r-- | arch/m32r/mm/init.c | 1 |
17 files changed, 75 insertions, 247 deletions
diff --git a/arch/m32r/include/asm/cacheflush.h b/arch/m32r/include/asm/cacheflush.h index 78587c958146..8e8e04516c39 100644 --- a/arch/m32r/include/asm/cacheflush.h +++ b/arch/m32r/include/asm/cacheflush.h | |||
| @@ -12,6 +12,7 @@ extern void _flush_cache_copyback_all(void); | |||
| 12 | #define flush_cache_dup_mm(mm) do { } while (0) | 12 | #define flush_cache_dup_mm(mm) do { } while (0) |
| 13 | #define flush_cache_range(vma, start, end) do { } while (0) | 13 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 15 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
| 15 | #define flush_dcache_page(page) do { } while (0) | 16 | #define flush_dcache_page(page) do { } while (0) |
| 16 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 17 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 17 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 18 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| @@ -33,6 +34,7 @@ extern void smp_flush_cache_all(void); | |||
| 33 | #define flush_cache_dup_mm(mm) do { } while (0) | 34 | #define flush_cache_dup_mm(mm) do { } while (0) |
| 34 | #define flush_cache_range(vma, start, end) do { } while (0) | 35 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 35 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 36 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 37 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
| 36 | #define flush_dcache_page(page) do { } while (0) | 38 | #define flush_dcache_page(page) do { } while (0) |
| 37 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 39 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 38 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 40 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| @@ -46,6 +48,7 @@ extern void smp_flush_cache_all(void); | |||
| 46 | #define flush_cache_dup_mm(mm) do { } while (0) | 48 | #define flush_cache_dup_mm(mm) do { } while (0) |
| 47 | #define flush_cache_range(vma, start, end) do { } while (0) | 49 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 48 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 50 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 51 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
| 49 | #define flush_dcache_page(page) do { } while (0) | 52 | #define flush_dcache_page(page) do { } while (0) |
| 50 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 53 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 51 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 54 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h index 0cc34c94bf2b..2f85412ef730 100644 --- a/arch/m32r/include/asm/elf.h +++ b/arch/m32r/include/asm/elf.h | |||
| @@ -102,7 +102,6 @@ typedef elf_fpreg_t elf_fpregset_t; | |||
| 102 | */ | 102 | */ |
| 103 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0 | 103 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0 |
| 104 | 104 | ||
| 105 | #define USE_ELF_CORE_DUMP | ||
| 106 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | 105 | #define ELF_EXEC_PAGESIZE PAGE_SIZE |
| 107 | 106 | ||
| 108 | /* | 107 | /* |
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h index 22256d138630..734bca87018a 100644 --- a/arch/m32r/include/asm/local.h +++ b/arch/m32r/include/asm/local.h | |||
| @@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr) | |||
| 338 | * a variable, not an address. | 338 | * a variable, not an address. |
| 339 | */ | 339 | */ |
| 340 | 340 | ||
| 341 | /* Need to disable preemption for the cpu local counters otherwise we could | ||
| 342 | still access a variable of a previous CPU in a non local way. */ | ||
| 343 | #define cpu_local_wrap_v(l) \ | ||
| 344 | ({ local_t res__; \ | ||
| 345 | preempt_disable(); \ | ||
| 346 | res__ = (l); \ | ||
| 347 | preempt_enable(); \ | ||
| 348 | res__; }) | ||
| 349 | #define cpu_local_wrap(l) \ | ||
| 350 | ({ preempt_disable(); \ | ||
| 351 | l; \ | ||
| 352 | preempt_enable(); }) \ | ||
| 353 | |||
| 354 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||
| 355 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||
| 356 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||
| 357 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||
| 358 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||
| 359 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||
| 360 | |||
| 361 | #define __cpu_local_inc(l) cpu_local_inc(l) | ||
| 362 | #define __cpu_local_dec(l) cpu_local_dec(l) | ||
| 363 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | ||
| 364 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | ||
| 365 | |||
| 366 | #endif /* __M32R_LOCAL_H */ | 341 | #endif /* __M32R_LOCAL_H */ |
diff --git a/arch/m32r/include/asm/ptrace.h b/arch/m32r/include/asm/ptrace.h index a0755b982028..840a1231edeb 100644 --- a/arch/m32r/include/asm/ptrace.h +++ b/arch/m32r/include/asm/ptrace.h | |||
| @@ -120,6 +120,8 @@ struct pt_regs { | |||
| 120 | 120 | ||
| 121 | #include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */ | 121 | #include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */ |
| 122 | 122 | ||
| 123 | #define arch_has_single_step() (1) | ||
| 124 | |||
| 123 | struct task_struct; | 125 | struct task_struct; |
| 124 | extern void init_debug_traps(struct task_struct *); | 126 | extern void init_debug_traps(struct task_struct *); |
| 125 | #define arch_ptrace_attach(child) \ | 127 | #define arch_ptrace_attach(child) \ |
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h index 3390a864f224..469787c30098 100644 --- a/arch/m32r/include/asm/socket.h +++ b/arch/m32r/include/asm/socket.h | |||
| @@ -60,4 +60,6 @@ | |||
| 60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
| 61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
| 62 | 62 | ||
| 63 | #define SO_RXQ_OVFL 40 | ||
| 64 | |||
| 63 | #endif /* _ASM_M32R_SOCKET_H */ | 65 | #endif /* _ASM_M32R_SOCKET_H */ |
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923883b2..179a06489b10 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
| @@ -24,19 +24,19 @@ | |||
| 24 | * We make no fairness assumptions. They have a cost. | 24 | * We make no fairness assumptions. They have a cost. |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 27 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
| 28 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 28 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 29 | #define __raw_spin_unlock_wait(x) \ | 29 | #define arch_spin_unlock_wait(x) \ |
| 30 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 30 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
| 31 | 31 | ||
| 32 | /** | 32 | /** |
| 33 | * __raw_spin_trylock - Try spin lock and return a result | 33 | * arch_spin_trylock - Try spin lock and return a result |
| 34 | * @lock: Pointer to the lock variable | 34 | * @lock: Pointer to the lock variable |
| 35 | * | 35 | * |
| 36 | * __raw_spin_trylock() tries to get the lock and returns a result. | 36 | * arch_spin_trylock() tries to get the lock and returns a result. |
| 37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
| 38 | */ | 38 | */ |
| 39 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
| 40 | { | 40 | { |
| 41 | int oldval; | 41 | int oldval; |
| 42 | unsigned long tmp1, tmp2; | 42 | unsigned long tmp1, tmp2; |
| @@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
| 50 | * } | 50 | * } |
| 51 | */ | 51 | */ |
| 52 | __asm__ __volatile__ ( | 52 | __asm__ __volatile__ ( |
| 53 | "# __raw_spin_trylock \n\t" | 53 | "# arch_spin_trylock \n\t" |
| 54 | "ldi %1, #0; \n\t" | 54 | "ldi %1, #0; \n\t" |
| 55 | "mvfc %2, psw; \n\t" | 55 | "mvfc %2, psw; \n\t" |
| 56 | "clrpsw #0x40 -> nop; \n\t" | 56 | "clrpsw #0x40 -> nop; \n\t" |
| @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
| 69 | return (oldval > 0); | 69 | return (oldval > 0); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 72 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 73 | { | 73 | { |
| 74 | unsigned long tmp0, tmp1; | 74 | unsigned long tmp0, tmp1; |
| 75 | 75 | ||
| @@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
| 84 | * } | 84 | * } |
| 85 | */ | 85 | */ |
| 86 | __asm__ __volatile__ ( | 86 | __asm__ __volatile__ ( |
| 87 | "# __raw_spin_lock \n\t" | 87 | "# arch_spin_lock \n\t" |
| 88 | ".fillinsn \n" | 88 | ".fillinsn \n" |
| 89 | "1: \n\t" | 89 | "1: \n\t" |
| 90 | "mvfc %1, psw; \n\t" | 90 | "mvfc %1, psw; \n\t" |
| @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
| 111 | ); | 111 | ); |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 114 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
| 115 | { | 115 | { |
| 116 | mb(); | 116 | mb(); |
| 117 | lock->slock = 1; | 117 | lock->slock = 1; |
| @@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
| 140 | * read_can_lock - would read_trylock() succeed? | 140 | * read_can_lock - would read_trylock() succeed? |
| 141 | * @lock: the rwlock in question. | 141 | * @lock: the rwlock in question. |
| 142 | */ | 142 | */ |
| 143 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 143 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) |
| 144 | 144 | ||
| 145 | /** | 145 | /** |
| 146 | * write_can_lock - would write_trylock() succeed? | 146 | * write_can_lock - would write_trylock() succeed? |
| 147 | * @lock: the rwlock in question. | 147 | * @lock: the rwlock in question. |
| 148 | */ | 148 | */ |
| 149 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 149 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 150 | 150 | ||
| 151 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 151 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 152 | { | 152 | { |
| 153 | unsigned long tmp0, tmp1; | 153 | unsigned long tmp0, tmp1; |
| 154 | 154 | ||
| @@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
| 199 | ); | 199 | ); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 202 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 203 | { | 203 | { |
| 204 | unsigned long tmp0, tmp1, tmp2; | 204 | unsigned long tmp0, tmp1, tmp2; |
| 205 | 205 | ||
| @@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
| 252 | ); | 252 | ); |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 255 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 256 | { | 256 | { |
| 257 | unsigned long tmp0, tmp1; | 257 | unsigned long tmp0, tmp1; |
| 258 | 258 | ||
| @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
| 274 | ); | 274 | ); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 277 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 278 | { | 278 | { |
| 279 | unsigned long tmp0, tmp1, tmp2; | 279 | unsigned long tmp0, tmp1, tmp2; |
| 280 | 280 | ||
| @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
| 298 | ); | 298 | ); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 301 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
| 302 | { | 302 | { |
| 303 | atomic_t *count = (atomic_t*)lock; | 303 | atomic_t *count = (atomic_t*)lock; |
| 304 | if (atomic_dec_return(count) >= 0) | 304 | if (atomic_dec_return(count) >= 0) |
| @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
| 307 | return 0; | 307 | return 0; |
| 308 | } | 308 | } |
| 309 | 309 | ||
| 310 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 310 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
| 311 | { | 311 | { |
| 312 | atomic_t *count = (atomic_t *)lock; | 312 | atomic_t *count = (atomic_t *)lock; |
| 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| @@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
| 316 | return 0; | 316 | return 0; |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 319 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 320 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 321 | 321 | ||
| 322 | #define _raw_spin_relax(lock) cpu_relax() | 322 | #define arch_spin_relax(lock) cpu_relax() |
| 323 | #define _raw_read_relax(lock) cpu_relax() | 323 | #define arch_read_relax(lock) cpu_relax() |
| 324 | #define _raw_write_relax(lock) cpu_relax() | 324 | #define arch_write_relax(lock) cpu_relax() |
| 325 | 325 | ||
| 326 | #endif /* _ASM_M32R_SPINLOCK_H */ | 326 | #endif /* _ASM_M32R_SPINLOCK_H */ |
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 83f52105c0e4..92e27672661f 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h | |||
| @@ -7,17 +7,17 @@ | |||
| 7 | 7 | ||
| 8 | typedef struct { | 8 | typedef struct { |
| 9 | volatile int slock; | 9 | volatile int slock; |
| 10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
| 11 | 11 | ||
| 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
| 13 | 13 | ||
| 14 | typedef struct { | 14 | typedef struct { |
| 15 | volatile int lock; | 15 | volatile int lock; |
| 16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
| 17 | 17 | ||
| 18 | #define RW_LOCK_BIAS 0x01000000 | 18 | #define RW_LOCK_BIAS 0x01000000 |
| 19 | #define RW_LOCK_BIAS_STR "0x01000000" | 19 | #define RW_LOCK_BIAS_STR "0x01000000" |
| 20 | 20 | ||
| 21 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 21 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
| 22 | 22 | ||
| 23 | #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ | 23 | #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ |
diff --git a/arch/m32r/include/asm/tlbflush.h b/arch/m32r/include/asm/tlbflush.h index 0ef95307784e..92614b0ccf17 100644 --- a/arch/m32r/include/asm/tlbflush.h +++ b/arch/m32r/include/asm/tlbflush.h | |||
| @@ -92,6 +92,6 @@ static __inline__ void __flush_tlb_all(void) | |||
| 92 | ); | 92 | ); |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | 95 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
| 96 | 96 | ||
| 97 | #endif /* _ASM_M32R_TLBFLUSH_H */ | 97 | #endif /* _ASM_M32R_TLBFLUSH_H */ |
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h index cf701c933249..76125777483c 100644 --- a/arch/m32r/include/asm/unistd.h +++ b/arch/m32r/include/asm/unistd.h | |||
| @@ -339,6 +339,7 @@ | |||
| 339 | #define __ARCH_WANT_STAT64 | 339 | #define __ARCH_WANT_STAT64 |
| 340 | #define __ARCH_WANT_SYS_ALARM | 340 | #define __ARCH_WANT_SYS_ALARM |
| 341 | #define __ARCH_WANT_SYS_GETHOSTNAME | 341 | #define __ARCH_WANT_SYS_GETHOSTNAME |
| 342 | #define __ARCH_WANT_SYS_IPC | ||
| 342 | #define __ARCH_WANT_SYS_PAUSE | 343 | #define __ARCH_WANT_SYS_PAUSE |
| 343 | #define __ARCH_WANT_SYS_TIME | 344 | #define __ARCH_WANT_SYS_TIME |
| 344 | #define __ARCH_WANT_SYS_UTIME | 345 | #define __ARCH_WANT_SYS_UTIME |
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 8dfd31e87c4c..3c71f776872c 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
| @@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | if (i < NR_IRQS) { | 42 | if (i < NR_IRQS) { |
| 43 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 43 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
| 44 | action = irq_desc[i].action; | 44 | action = irq_desc[i].action; |
| 45 | if (!action) | 45 | if (!action) |
| 46 | goto skip; | 46 | goto skip; |
| @@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 59 | 59 | ||
| 60 | seq_putc(p, '\n'); | 60 | seq_putc(p, '\n'); |
| 61 | skip: | 61 | skip: |
| 62 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 62 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
| 63 | } | 63 | } |
| 64 | return 0; | 64 | return 0; |
| 65 | } | 65 | } |
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 67a01e1e4283..bc8c8c1511b2 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c | |||
| @@ -21,10 +21,10 @@ | |||
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
| 24 | #include <linux/slab.h> | ||
| 24 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 25 | #include <linux/ptrace.h> | 26 | #include <linux/ptrace.h> |
| 26 | #include <linux/unistd.h> | 27 | #include <linux/unistd.h> |
| 27 | #include <linux/slab.h> | ||
| 28 | #include <linux/hardirq.h> | 28 | #include <linux/hardirq.h> |
| 29 | 29 | ||
| 30 | #include <asm/io.h> | 30 | #include <asm/io.h> |
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 98682bba0ed9..e555091eb97c 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
| @@ -580,6 +580,35 @@ init_debug_traps(struct task_struct *child) | |||
| 580 | } | 580 | } |
| 581 | } | 581 | } |
| 582 | 582 | ||
| 583 | void user_enable_single_step(struct task_struct *child) | ||
| 584 | { | ||
| 585 | unsigned long next_pc; | ||
| 586 | unsigned long pc, insn; | ||
| 587 | |||
| 588 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
| 589 | |||
| 590 | /* Compute next pc. */ | ||
| 591 | pc = get_stack_long(child, PT_BPC); | ||
| 592 | |||
| 593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) | ||
| 594 | != sizeof(insn)) | ||
| 595 | break; | ||
| 596 | |||
| 597 | compute_next_pc(insn, pc, &next_pc, child); | ||
| 598 | if (next_pc & 0x80000000) | ||
| 599 | break; | ||
| 600 | |||
| 601 | if (embed_debug_trap(child, next_pc)) | ||
| 602 | break; | ||
| 603 | |||
| 604 | invalidate_cache(); | ||
| 605 | } | ||
| 606 | |||
| 607 | void user_disable_single_step(struct task_struct *child) | ||
| 608 | { | ||
| 609 | unregister_all_debug_traps(child); | ||
| 610 | invalidate_cache(); | ||
| 611 | } | ||
| 583 | 612 | ||
| 584 | /* | 613 | /* |
| 585 | * Called by kernel/ptrace.c when detaching.. | 614 | * Called by kernel/ptrace.c when detaching.. |
| @@ -630,74 +659,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 630 | ret = ptrace_write_user(child, addr, data); | 659 | ret = ptrace_write_user(child, addr, data); |
| 631 | break; | 660 | break; |
| 632 | 661 | ||
| 633 | /* | ||
| 634 | * continue/restart and stop at next (return from) syscall | ||
| 635 | */ | ||
| 636 | case PTRACE_SYSCALL: | ||
| 637 | case PTRACE_CONT: | ||
| 638 | ret = -EIO; | ||
| 639 | if (!valid_signal(data)) | ||
| 640 | break; | ||
| 641 | if (request == PTRACE_SYSCALL) | ||
| 642 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
| 643 | else | ||
| 644 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
| 645 | child->exit_code = data; | ||
| 646 | wake_up_process(child); | ||
| 647 | ret = 0; | ||
| 648 | break; | ||
| 649 | |||
| 650 | /* | ||
| 651 | * make the child exit. Best I can do is send it a sigkill. | ||
| 652 | * perhaps it should be put in the status that it wants to | ||
| 653 | * exit. | ||
| 654 | */ | ||
| 655 | case PTRACE_KILL: { | ||
| 656 | ret = 0; | ||
| 657 | unregister_all_debug_traps(child); | ||
| 658 | invalidate_cache(); | ||
| 659 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
| 660 | break; | ||
| 661 | child->exit_code = SIGKILL; | ||
| 662 | wake_up_process(child); | ||
| 663 | break; | ||
| 664 | } | ||
| 665 | |||
| 666 | /* | ||
| 667 | * execute single instruction. | ||
| 668 | */ | ||
| 669 | case PTRACE_SINGLESTEP: { | ||
| 670 | unsigned long next_pc; | ||
| 671 | unsigned long pc, insn; | ||
| 672 | |||
| 673 | ret = -EIO; | ||
| 674 | if (!valid_signal(data)) | ||
| 675 | break; | ||
| 676 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
| 677 | |||
| 678 | /* Compute next pc. */ | ||
| 679 | pc = get_stack_long(child, PT_BPC); | ||
| 680 | |||
| 681 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) | ||
| 682 | != sizeof(insn)) | ||
| 683 | break; | ||
| 684 | |||
| 685 | compute_next_pc(insn, pc, &next_pc, child); | ||
| 686 | if (next_pc & 0x80000000) | ||
| 687 | break; | ||
| 688 | |||
| 689 | if (embed_debug_trap(child, next_pc)) | ||
| 690 | break; | ||
| 691 | |||
| 692 | invalidate_cache(); | ||
| 693 | child->exit_code = data; | ||
| 694 | |||
| 695 | /* give it a chance to run. */ | ||
| 696 | wake_up_process(child); | ||
| 697 | ret = 0; | ||
| 698 | break; | ||
| 699 | } | ||
| 700 | |||
| 701 | case PTRACE_GETREGS: | 662 | case PTRACE_GETREGS: |
| 702 | ret = ptrace_getregs(child, (void __user *)data); | 663 | ret = ptrace_getregs(child, (void __user *)data); |
| 703 | break; | 664 | break; |
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index 305ac852bbed..0a00f467edfa 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c | |||
| @@ -76,122 +76,6 @@ asmlinkage int sys_tas(int __user *addr) | |||
| 76 | return oldval; | 76 | return oldval; |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
| 80 | unsigned long prot, unsigned long flags, | ||
| 81 | unsigned long fd, unsigned long pgoff) | ||
| 82 | { | ||
| 83 | int error = -EBADF; | ||
| 84 | struct file *file = NULL; | ||
| 85 | |||
| 86 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
| 87 | if (!(flags & MAP_ANONYMOUS)) { | ||
| 88 | file = fget(fd); | ||
| 89 | if (!file) | ||
| 90 | goto out; | ||
| 91 | } | ||
| 92 | |||
| 93 | down_write(¤t->mm->mmap_sem); | ||
| 94 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
| 95 | up_write(¤t->mm->mmap_sem); | ||
| 96 | |||
| 97 | if (file) | ||
| 98 | fput(file); | ||
| 99 | out: | ||
| 100 | return error; | ||
| 101 | } | ||
| 102 | |||
| 103 | /* | ||
| 104 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
| 105 | * | ||
| 106 | * This is really horribly ugly. | ||
| 107 | */ | ||
| 108 | asmlinkage int sys_ipc(uint call, int first, int second, | ||
| 109 | int third, void __user *ptr, long fifth) | ||
| 110 | { | ||
| 111 | int version, ret; | ||
| 112 | |||
| 113 | version = call >> 16; /* hack for backward compatibility */ | ||
| 114 | call &= 0xffff; | ||
| 115 | |||
| 116 | switch (call) { | ||
| 117 | case SEMOP: | ||
| 118 | return sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
| 119 | second, NULL); | ||
| 120 | case SEMTIMEDOP: | ||
| 121 | return sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
| 122 | second, (const struct timespec __user *)fifth); | ||
| 123 | case SEMGET: | ||
| 124 | return sys_semget (first, second, third); | ||
| 125 | case SEMCTL: { | ||
| 126 | union semun fourth; | ||
| 127 | if (!ptr) | ||
| 128 | return -EINVAL; | ||
| 129 | if (get_user(fourth.__pad, (void __user * __user *) ptr)) | ||
| 130 | return -EFAULT; | ||
| 131 | return sys_semctl (first, second, third, fourth); | ||
| 132 | } | ||
| 133 | |||
| 134 | case MSGSND: | ||
| 135 | return sys_msgsnd (first, (struct msgbuf __user *) ptr, | ||
| 136 | second, third); | ||
| 137 | case MSGRCV: | ||
| 138 | switch (version) { | ||
| 139 | case 0: { | ||
| 140 | struct ipc_kludge tmp; | ||
| 141 | if (!ptr) | ||
| 142 | return -EINVAL; | ||
| 143 | |||
| 144 | if (copy_from_user(&tmp, | ||
| 145 | (struct ipc_kludge __user *) ptr, | ||
| 146 | sizeof (tmp))) | ||
| 147 | return -EFAULT; | ||
| 148 | return sys_msgrcv (first, tmp.msgp, second, | ||
| 149 | tmp.msgtyp, third); | ||
| 150 | } | ||
| 151 | default: | ||
| 152 | return sys_msgrcv (first, | ||
| 153 | (struct msgbuf __user *) ptr, | ||
| 154 | second, fifth, third); | ||
| 155 | } | ||
| 156 | case MSGGET: | ||
| 157 | return sys_msgget ((key_t) first, second); | ||
| 158 | case MSGCTL: | ||
| 159 | return sys_msgctl (first, second, | ||
| 160 | (struct msqid_ds __user *) ptr); | ||
| 161 | case SHMAT: { | ||
| 162 | ulong raddr; | ||
| 163 | |||
| 164 | if (!access_ok(VERIFY_WRITE, (ulong __user *) third, | ||
| 165 | sizeof(ulong))) | ||
| 166 | return -EFAULT; | ||
| 167 | ret = do_shmat (first, (char __user *) ptr, second, &raddr); | ||
| 168 | if (ret) | ||
| 169 | return ret; | ||
| 170 | return put_user (raddr, (ulong __user *) third); | ||
| 171 | } | ||
| 172 | case SHMDT: | ||
| 173 | return sys_shmdt ((char __user *)ptr); | ||
| 174 | case SHMGET: | ||
| 175 | return sys_shmget (first, second, third); | ||
| 176 | case SHMCTL: | ||
| 177 | return sys_shmctl (first, second, | ||
| 178 | (struct shmid_ds __user *) ptr); | ||
| 179 | default: | ||
| 180 | return -ENOSYS; | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | asmlinkage int sys_uname(struct old_utsname __user * name) | ||
| 185 | { | ||
| 186 | int err; | ||
| 187 | if (!name) | ||
| 188 | return -EFAULT; | ||
| 189 | down_read(&uts_sem); | ||
| 190 | err = copy_to_user(name, utsname(), sizeof (*name)); | ||
| 191 | up_read(&uts_sem); | ||
| 192 | return err?-EFAULT:0; | ||
| 193 | } | ||
| 194 | |||
| 195 | asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) | 79 | asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) |
| 196 | { | 80 | { |
| 197 | /* This should flush more selectively ... */ | 81 | /* This should flush more selectively ... */ |
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S index aa3bf4cfab37..60536e271233 100644 --- a/arch/m32r/kernel/syscall_table.S +++ b/arch/m32r/kernel/syscall_table.S | |||
| @@ -191,7 +191,7 @@ ENTRY(sys_call_table) | |||
| 191 | .long sys_ni_syscall /* streams2 */ | 191 | .long sys_ni_syscall /* streams2 */ |
| 192 | .long sys_vfork /* 190 */ | 192 | .long sys_vfork /* 190 */ |
| 193 | .long sys_getrlimit | 193 | .long sys_getrlimit |
| 194 | .long sys_mmap2 | 194 | .long sys_mmap_pgoff |
| 195 | .long sys_truncate64 | 195 | .long sys_truncate64 |
| 196 | .long sys_ftruncate64 | 196 | .long sys_ftruncate64 |
| 197 | .long sys_stat64 /* 195 */ | 197 | .long sys_stat64 /* 195 */ |
diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c index 88469178ea6b..888aab1157ed 100644 --- a/arch/m32r/mm/fault-nommu.c +++ b/arch/m32r/mm/fault-nommu.c | |||
| @@ -95,7 +95,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, | |||
| 95 | * update_mmu_cache() | 95 | * update_mmu_cache() |
| 96 | *======================================================================*/ | 96 | *======================================================================*/ |
| 97 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | 97 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
| 98 | pte_t pte) | 98 | pte_t *ptep) |
| 99 | { | 99 | { |
| 100 | BUG(); | 100 | BUG(); |
| 101 | } | 101 | } |
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 7274b47f4c22..28ee389e5f5a 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c | |||
| @@ -336,7 +336,7 @@ vmalloc_fault: | |||
| 336 | 336 | ||
| 337 | addr = (address & PAGE_MASK); | 337 | addr = (address & PAGE_MASK); |
| 338 | set_thread_fault_code(error_code); | 338 | set_thread_fault_code(error_code); |
| 339 | update_mmu_cache(NULL, addr, *pte_k); | 339 | update_mmu_cache(NULL, addr, pte_k); |
| 340 | set_thread_fault_code(0); | 340 | set_thread_fault_code(0); |
| 341 | return; | 341 | return; |
| 342 | } | 342 | } |
| @@ -349,7 +349,7 @@ vmalloc_fault: | |||
| 349 | #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) | 349 | #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) |
| 350 | #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) | 350 | #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) |
| 351 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, | 351 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, |
| 352 | pte_t pte) | 352 | pte_t *ptep) |
| 353 | { | 353 | { |
| 354 | volatile unsigned long *entry1, *entry2; | 354 | volatile unsigned long *entry1, *entry2; |
| 355 | unsigned long pte_data, flags; | 355 | unsigned long pte_data, flags; |
| @@ -365,7 +365,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, | |||
| 365 | 365 | ||
| 366 | vaddr = (vaddr & PAGE_MASK) | get_asid(); | 366 | vaddr = (vaddr & PAGE_MASK) | get_asid(); |
| 367 | 367 | ||
| 368 | pte_data = pte_val(pte); | 368 | pte_data = pte_val(*ptep); |
| 369 | 369 | ||
| 370 | #ifdef CONFIG_CHIP_OPSP | 370 | #ifdef CONFIG_CHIP_OPSP |
| 371 | entry1 = (unsigned long *)ITLB_BASE; | 371 | entry1 = (unsigned long *)ITLB_BASE; |
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 9f581df3952b..73e2205ebf5a 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
| 20 | #include <linux/nodemask.h> | 20 | #include <linux/nodemask.h> |
| 21 | #include <linux/pfn.h> | 21 | #include <linux/pfn.h> |
| 22 | #include <linux/gfp.h> | ||
| 22 | #include <asm/types.h> | 23 | #include <asm/types.h> |
| 23 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
| 24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
