aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 21:53:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 21:53:26 -0400
commitbdab225015fbbb45ccd8913f5d7c01b2bf67d8b2 (patch)
tree5ef62301face958977a084bf2b6c5300296a25f2 /arch/mn10300/include/asm
parent7c5814c7199851c5fe9395d08fc1ab3c8c1531ea (diff)
parent7c7fcf762e405eb040ee10d22d656a791f616122 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits) MN10300: Save frame pointer in thread_info struct rather than global var MN10300: Change "Matsushita" to "Panasonic". MN10300: Create a defconfig for the ASB2364 board MN10300: Update the ASB2303 defconfig MN10300: ASB2364: Add support for SMSC911X and SMC911X MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA MN10300: Generic time support MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support MN10300: Map userspace atomic op regs as a vmalloc page MN10300: And Panasonic AM34 subarch and implement SMP MN10300: Delete idle_timestamp from irq_cpustat_t MN10300: Make various interrupt priority settings configurable MN10300: Optimise do_csum() MN10300: Implement atomic ops using atomic ops unit MN10300: Make the FPU operate in non-lazy mode under SMP MN10300: SMP TLB flushing MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control MN10300: Make the use of PIDR to mark TLB entries controllable MN10300: Rename __flush_tlb*() to local_flush_tlb*() MN10300: AM34 erratum requires MMUCTR read and write on exception entry ...
Diffstat (limited to 'arch/mn10300/include/asm')
-rw-r--r--arch/mn10300/include/asm/atomic.h350
-rw-r--r--arch/mn10300/include/asm/bitops.h14
-rw-r--r--arch/mn10300/include/asm/cache.h14
-rw-r--r--arch/mn10300/include/asm/cacheflush.h164
-rw-r--r--arch/mn10300/include/asm/cpu-regs.h91
-rw-r--r--arch/mn10300/include/asm/dmactl-regs.h87
-rw-r--r--arch/mn10300/include/asm/elf.h12
-rw-r--r--arch/mn10300/include/asm/exceptions.h8
-rw-r--r--arch/mn10300/include/asm/fpu.h157
-rw-r--r--arch/mn10300/include/asm/frame.inc20
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h2
-rw-r--r--arch/mn10300/include/asm/hardirq.h3
-rw-r--r--arch/mn10300/include/asm/highmem.h4
-rw-r--r--arch/mn10300/include/asm/intctl-regs.h37
-rw-r--r--arch/mn10300/include/asm/io.h13
-rw-r--r--arch/mn10300/include/asm/irq.h12
-rw-r--r--arch/mn10300/include/asm/irq_regs.h6
-rw-r--r--arch/mn10300/include/asm/irqflags.h111
-rw-r--r--arch/mn10300/include/asm/mmu_context.h73
-rw-r--r--arch/mn10300/include/asm/pgalloc.h1
-rw-r--r--arch/mn10300/include/asm/pgtable.h88
-rw-r--r--arch/mn10300/include/asm/processor.h66
-rw-r--r--arch/mn10300/include/asm/ptrace.h13
-rw-r--r--arch/mn10300/include/asm/reset-regs.h2
-rw-r--r--arch/mn10300/include/asm/rtc.h11
-rw-r--r--arch/mn10300/include/asm/rwlock.h125
-rw-r--r--arch/mn10300/include/asm/serial-regs.h51
-rw-r--r--arch/mn10300/include/asm/serial.h8
-rw-r--r--arch/mn10300/include/asm/smp.h91
-rw-r--r--arch/mn10300/include/asm/smsc911x.h1
-rw-r--r--arch/mn10300/include/asm/spinlock.h179
-rw-r--r--arch/mn10300/include/asm/spinlock_types.h20
-rw-r--r--arch/mn10300/include/asm/system.h73
-rw-r--r--arch/mn10300/include/asm/thread_info.h18
-rw-r--r--arch/mn10300/include/asm/timer-regs.h191
-rw-r--r--arch/mn10300/include/asm/timex.h20
-rw-r--r--arch/mn10300/include/asm/tlbflush.h174
-rw-r--r--arch/mn10300/include/asm/uaccess.h6
38 files changed, 1792 insertions, 524 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index f0cc1f84a72f..92d2f9298e38 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -1 +1,351 @@
1/* MN10300 Atomic counter operations
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_ATOMIC_H
12#define _ASM_ATOMIC_H
13
14#include <asm/irqflags.h>
15
16#ifndef __ASSEMBLY__
17
18#ifdef CONFIG_SMP
19#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
20static inline
21unsigned long __xchg(volatile unsigned long *m, unsigned long val)
22{
23 unsigned long status;
24 unsigned long oldval;
25
26 asm volatile(
27 "1: mov %4,(_AAR,%3) \n"
28 " mov (_ADR,%3),%1 \n"
29 " mov %5,(_ADR,%3) \n"
30 " mov (_ADR,%3),%0 \n" /* flush */
31 " mov (_ASR,%3),%0 \n"
32 " or %0,%0 \n"
33 " bne 1b \n"
34 : "=&r"(status), "=&r"(oldval), "=m"(*m)
35 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
36 : "memory", "cc");
37
38 return oldval;
39}
40
41static inline unsigned long __cmpxchg(volatile unsigned long *m,
42 unsigned long old, unsigned long new)
43{
44 unsigned long status;
45 unsigned long oldval;
46
47 asm volatile(
48 "1: mov %4,(_AAR,%3) \n"
49 " mov (_ADR,%3),%1 \n"
50 " cmp %5,%1 \n"
51 " bne 2f \n"
52 " mov %6,(_ADR,%3) \n"
53 "2: mov (_ADR,%3),%0 \n" /* flush */
54 " mov (_ASR,%3),%0 \n"
55 " or %0,%0 \n"
56 " bne 1b \n"
57 : "=&r"(status), "=&r"(oldval), "=m"(*m)
58 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
59 "r"(old), "r"(new)
60 : "memory", "cc");
61
62 return oldval;
63}
64#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
65#error "No SMP atomic operation support!"
66#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
67
68#else /* CONFIG_SMP */
69
70/*
71 * Emulate xchg for non-SMP MN10300
72 */
73struct __xchg_dummy { unsigned long a[100]; };
74#define __xg(x) ((struct __xchg_dummy *)(x))
75
76static inline
77unsigned long __xchg(volatile unsigned long *m, unsigned long val)
78{
79 unsigned long oldval;
80 unsigned long flags;
81
82 flags = arch_local_cli_save();
83 oldval = *m;
84 *m = val;
85 arch_local_irq_restore(flags);
86 return oldval;
87}
88
89/*
90 * Emulate cmpxchg for non-SMP MN10300
91 */
92static inline unsigned long __cmpxchg(volatile unsigned long *m,
93 unsigned long old, unsigned long new)
94{
95 unsigned long oldval;
96 unsigned long flags;
97
98 flags = arch_local_cli_save();
99 oldval = *m;
100 if (oldval == old)
101 *m = new;
102 arch_local_irq_restore(flags);
103 return oldval;
104}
105
106#endif /* CONFIG_SMP */
107
108#define xchg(ptr, v) \
109 ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
110 (unsigned long)(v)))
111
112#define cmpxchg(ptr, o, n) \
113 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
114 (unsigned long)(o), \
115 (unsigned long)(n)))
116
117#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
118#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
119
120#endif /* !__ASSEMBLY__ */
121
122#ifndef CONFIG_SMP
1#include <asm-generic/atomic.h> 123#include <asm-generic/atomic.h>
124#else
125
126/*
127 * Atomic operations that C can't guarantee us. Useful for
128 * resource counting etc..
129 */
130
131#define ATOMIC_INIT(i) { (i) }
132
133#ifdef __KERNEL__
134
135/**
136 * atomic_read - read atomic variable
137 * @v: pointer of type atomic_t
138 *
139 * Atomically reads the value of @v. Note that the guaranteed
140 * useful range of an atomic_t is only 24 bits.
141 */
142#define atomic_read(v) ((v)->counter)
143
144/**
145 * atomic_set - set atomic variable
146 * @v: pointer of type atomic_t
147 * @i: required value
148 *
149 * Atomically sets the value of @v to @i. Note that the guaranteed
150 * useful range of an atomic_t is only 24 bits.
151 */
152#define atomic_set(v, i) (((v)->counter) = (i))
153
154/**
155 * atomic_add_return - add integer to atomic variable
156 * @i: integer value to add
157 * @v: pointer of type atomic_t
158 *
159 * Atomically adds @i to @v and returns the result
160 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
161 */
162static inline int atomic_add_return(int i, atomic_t *v)
163{
164 int retval;
165#ifdef CONFIG_SMP
166 int status;
167
168 asm volatile(
169 "1: mov %4,(_AAR,%3) \n"
170 " mov (_ADR,%3),%1 \n"
171 " add %5,%1 \n"
172 " mov %1,(_ADR,%3) \n"
173 " mov (_ADR,%3),%0 \n" /* flush */
174 " mov (_ASR,%3),%0 \n"
175 " or %0,%0 \n"
176 " bne 1b \n"
177 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
178 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
179 : "memory", "cc");
180
181#else
182 unsigned long flags;
183
184 flags = arch_local_cli_save();
185 retval = v->counter;
186 retval += i;
187 v->counter = retval;
188 arch_local_irq_restore(flags);
189#endif
190 return retval;
191}
192
193/**
194 * atomic_sub_return - subtract integer from atomic variable
195 * @i: integer value to subtract
196 * @v: pointer of type atomic_t
197 *
198 * Atomically subtracts @i from @v and returns the result
199 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
200 */
201static inline int atomic_sub_return(int i, atomic_t *v)
202{
203 int retval;
204#ifdef CONFIG_SMP
205 int status;
206
207 asm volatile(
208 "1: mov %4,(_AAR,%3) \n"
209 " mov (_ADR,%3),%1 \n"
210 " sub %5,%1 \n"
211 " mov %1,(_ADR,%3) \n"
212 " mov (_ADR,%3),%0 \n" /* flush */
213 " mov (_ASR,%3),%0 \n"
214 " or %0,%0 \n"
215 " bne 1b \n"
216 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
217 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
218 : "memory", "cc");
219
220#else
221 unsigned long flags;
222 flags = arch_local_cli_save();
223 retval = v->counter;
224 retval -= i;
225 v->counter = retval;
226 arch_local_irq_restore(flags);
227#endif
228 return retval;
229}
230
231static inline int atomic_add_negative(int i, atomic_t *v)
232{
233 return atomic_add_return(i, v) < 0;
234}
235
236static inline void atomic_add(int i, atomic_t *v)
237{
238 atomic_add_return(i, v);
239}
240
241static inline void atomic_sub(int i, atomic_t *v)
242{
243 atomic_sub_return(i, v);
244}
245
246static inline void atomic_inc(atomic_t *v)
247{
248 atomic_add_return(1, v);
249}
250
251static inline void atomic_dec(atomic_t *v)
252{
253 atomic_sub_return(1, v);
254}
255
256#define atomic_dec_return(v) atomic_sub_return(1, (v))
257#define atomic_inc_return(v) atomic_add_return(1, (v))
258
259#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
260#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
261#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
262
263#define atomic_add_unless(v, a, u) \
264({ \
265 int c, old; \
266 c = atomic_read(v); \
267 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
268 c = old; \
269 c != (u); \
270})
271
272#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
273
274/**
275 * atomic_clear_mask - Atomically clear bits in memory
276 * @mask: Mask of the bits to be cleared
277 * @v: pointer to word in memory
278 *
279 * Atomically clears the bits set in mask from the memory word specified.
280 */
281static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
282{
283#ifdef CONFIG_SMP
284 int status;
285
286 asm volatile(
287 "1: mov %3,(_AAR,%2) \n"
288 " mov (_ADR,%2),%0 \n"
289 " and %4,%0 \n"
290 " mov %0,(_ADR,%2) \n"
291 " mov (_ADR,%2),%0 \n" /* flush */
292 " mov (_ASR,%2),%0 \n"
293 " or %0,%0 \n"
294 " bne 1b \n"
295 : "=&r"(status), "=m"(*addr)
296 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
297 : "memory", "cc");
298#else
299 unsigned long flags;
300
301 mask = ~mask;
302 flags = arch_local_cli_save();
303 *addr &= mask;
304 arch_local_irq_restore(flags);
305#endif
306}
307
308/**
309 * atomic_set_mask - Atomically set bits in memory
310 * @mask: Mask of the bits to be set
311 * @v: pointer to word in memory
312 *
313 * Atomically sets the bits set in mask from the memory word specified.
314 */
315static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
316{
317#ifdef CONFIG_SMP
318 int status;
319
320 asm volatile(
321 "1: mov %3,(_AAR,%2) \n"
322 " mov (_ADR,%2),%0 \n"
323 " or %4,%0 \n"
324 " mov %0,(_ADR,%2) \n"
325 " mov (_ADR,%2),%0 \n" /* flush */
326 " mov (_ASR,%2),%0 \n"
327 " or %0,%0 \n"
328 " bne 1b \n"
329 : "=&r"(status), "=m"(*addr)
330 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
331 : "memory", "cc");
332#else
333 unsigned long flags;
334
335 flags = arch_local_cli_save();
336 *addr |= mask;
337 arch_local_irq_restore(flags);
338#endif
339}
340
341/* Atomic operations are already serializing on MN10300??? */
342#define smp_mb__before_atomic_dec() barrier()
343#define smp_mb__after_atomic_dec() barrier()
344#define smp_mb__before_atomic_inc() barrier()
345#define smp_mb__after_atomic_inc() barrier()
346
347#include <asm-generic/atomic-long.h>
348
349#endif /* __KERNEL__ */
350#endif /* CONFIG_SMP */
351#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index 3f50e9661076..3b8a868188f5 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -57,7 +57,7 @@
57#define clear_bit(nr, addr) ___clear_bit((nr), (addr)) 57#define clear_bit(nr, addr) ___clear_bit((nr), (addr))
58 58
59 59
60static inline void __clear_bit(int nr, volatile void *addr) 60static inline void __clear_bit(unsigned long nr, volatile void *addr)
61{ 61{
62 unsigned int *a = (unsigned int *) addr; 62 unsigned int *a = (unsigned int *) addr;
63 int mask; 63 int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
70/* 70/*
71 * test bit 71 * test bit
72 */ 72 */
73static inline int test_bit(int nr, const volatile void *addr) 73static inline int test_bit(unsigned long nr, const volatile void *addr)
74{ 74{
75 return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31)); 75 return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
76} 76}
77 77
78/* 78/*
79 * change bit 79 * change bit
80 */ 80 */
81static inline void __change_bit(int nr, volatile void *addr) 81static inline void __change_bit(unsigned long nr, volatile void *addr)
82{ 82{
83 int mask; 83 int mask;
84 unsigned int *a = (unsigned int *) addr; 84 unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
88 *a ^= mask; 88 *a ^= mask;
89} 89}
90 90
91extern void change_bit(int nr, volatile void *addr); 91extern void change_bit(unsigned long nr, volatile void *addr);
92 92
93/* 93/*
94 * test and set bit 94 * test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
135/* 135/*
136 * test and change bit 136 * test and change bit
137 */ 137 */
138static inline int __test_and_change_bit(int nr, volatile void *addr) 138static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
139{ 139{
140 int mask, retval; 140 int mask, retval;
141 unsigned int *a = (unsigned int *)addr; 141 unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
148 return retval; 148 return retval;
149} 149}
150 150
151extern int test_and_change_bit(int nr, volatile void *addr); 151extern int test_and_change_bit(unsigned long nr, volatile void *addr);
152 152
153#include <asm-generic/bitops/lock.h> 153#include <asm-generic/bitops/lock.h>
154 154
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index 781bf613366d..f29cde2cfc91 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -43,14 +43,18 @@
43 43
44/* instruction cache access registers */ 44/* instruction cache access registers */
45#define ICACHE_DATA(WAY, ENTRY, OFF) \ 45#define ICACHE_DATA(WAY, ENTRY, OFF) \
46 __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32) 46 __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
47 (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
47#define ICACHE_TAG(WAY, ENTRY) \ 48#define ICACHE_TAG(WAY, ENTRY) \
48 __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32) 49 __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
50 (ENTRY) * L1_CACHE_BYTES, u32)
49 51
50/* instruction cache access registers */ 52/* data cache access registers */
51#define DCACHE_DATA(WAY, ENTRY, OFF) \ 53#define DCACHE_DATA(WAY, ENTRY, OFF) \
52 __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32) 54 __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
55 (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
53#define DCACHE_TAG(WAY, ENTRY) \ 56#define DCACHE_TAG(WAY, ENTRY) \
54 __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32) 57 __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
58 (ENTRY) * L1_CACHE_BYTES, u32)
55 59
56#endif /* _ASM_CACHE_H */ 60#endif /* _ASM_CACHE_H */
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index 29e692f7f030..faed90240ded 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -17,66 +17,55 @@
17#include <linux/mm.h> 17#include <linux/mm.h>
18 18
19/* 19/*
20 * virtually-indexed cache management (our cache is physically indexed) 20 * Primitive routines
21 */ 21 */
22#define flush_cache_all() do {} while (0) 22#ifdef CONFIG_MN10300_CACHE_ENABLED
23#define flush_cache_mm(mm) do {} while (0) 23extern void mn10300_local_icache_inv(void);
24#define flush_cache_dup_mm(mm) do {} while (0) 24extern void mn10300_local_icache_inv_page(unsigned long start);
25#define flush_cache_range(mm, start, end) do {} while (0) 25extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
26#define flush_cache_page(vma, vmaddr, pfn) do {} while (0) 26extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
27#define flush_cache_vmap(start, end) do {} while (0) 27extern void mn10300_local_dcache_inv(void);
28#define flush_cache_vunmap(start, end) do {} while (0) 28extern void mn10300_local_dcache_inv_page(unsigned long start);
29#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 29extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
30#define flush_dcache_page(page) do {} while (0) 30extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
31#define flush_dcache_mmap_lock(mapping) do {} while (0)
32#define flush_dcache_mmap_unlock(mapping) do {} while (0)
33
34/*
35 * physically-indexed cache management
36 */
37#ifndef CONFIG_MN10300_CACHE_DISABLED
38
39extern void flush_icache_range(unsigned long start, unsigned long end);
40extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
41
42#else
43
44#define flush_icache_range(start, end) do {} while (0)
45#define flush_icache_page(vma, pg) do {} while (0)
46
47#endif
48
49#define flush_icache_user_range(vma, pg, adr, len) \
50 flush_icache_range(adr, adr + len)
51
52#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
53 do { \
54 memcpy(dst, src, len); \
55 flush_icache_page(vma, page); \
56 } while (0)
57
58#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
59 memcpy(dst, src, len)
60
61/*
62 * primitive routines
63 */
64#ifndef CONFIG_MN10300_CACHE_DISABLED
65extern void mn10300_icache_inv(void); 31extern void mn10300_icache_inv(void);
32extern void mn10300_icache_inv_page(unsigned long start);
33extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
34extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
66extern void mn10300_dcache_inv(void); 35extern void mn10300_dcache_inv(void);
67extern void mn10300_dcache_inv_page(unsigned start); 36extern void mn10300_dcache_inv_page(unsigned long start);
68extern void mn10300_dcache_inv_range(unsigned start, unsigned end); 37extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
69extern void mn10300_dcache_inv_range2(unsigned start, unsigned size); 38extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
70#ifdef CONFIG_MN10300_CACHE_WBACK 39#ifdef CONFIG_MN10300_CACHE_WBACK
40extern void mn10300_local_dcache_flush(void);
41extern void mn10300_local_dcache_flush_page(unsigned long start);
42extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
43extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
44extern void mn10300_local_dcache_flush_inv(void);
45extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
46extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
47extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
71extern void mn10300_dcache_flush(void); 48extern void mn10300_dcache_flush(void);
72extern void mn10300_dcache_flush_page(unsigned start); 49extern void mn10300_dcache_flush_page(unsigned long start);
73extern void mn10300_dcache_flush_range(unsigned start, unsigned end); 50extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
74extern void mn10300_dcache_flush_range2(unsigned start, unsigned size); 51extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
75extern void mn10300_dcache_flush_inv(void); 52extern void mn10300_dcache_flush_inv(void);
76extern void mn10300_dcache_flush_inv_page(unsigned start); 53extern void mn10300_dcache_flush_inv_page(unsigned long start);
77extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end); 54extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
78extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size); 55extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
79#else 56#else
57#define mn10300_local_dcache_flush() do {} while (0)
58#define mn10300_local_dcache_flush_page(start) do {} while (0)
59#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
60#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
61#define mn10300_local_dcache_flush_inv() \
62 mn10300_local_dcache_inv()
63#define mn10300_local_dcache_flush_inv_page(start) \
64 mn10300_local_dcache_inv_page(start)
65#define mn10300_local_dcache_flush_inv_range(start, end) \
66 mn10300_local_dcache_inv_range(start, end)
67#define mn10300_local_dcache_flush_inv_range2(start, size) \
68 mn10300_local_dcache_inv_range2(start, size)
80#define mn10300_dcache_flush() do {} while (0) 69#define mn10300_dcache_flush() do {} while (0)
81#define mn10300_dcache_flush_page(start) do {} while (0) 70#define mn10300_dcache_flush_page(start) do {} while (0)
82#define mn10300_dcache_flush_range(start, end) do {} while (0) 71#define mn10300_dcache_flush_range(start, end) do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
90 mn10300_dcache_inv_range2((start), (size)) 79 mn10300_dcache_inv_range2((start), (size))
91#endif /* CONFIG_MN10300_CACHE_WBACK */ 80#endif /* CONFIG_MN10300_CACHE_WBACK */
92#else 81#else
82#define mn10300_local_icache_inv() do {} while (0)
83#define mn10300_local_icache_inv_page(start) do {} while (0)
84#define mn10300_local_icache_inv_range(start, end) do {} while (0)
85#define mn10300_local_icache_inv_range2(start, size) do {} while (0)
86#define mn10300_local_dcache_inv() do {} while (0)
87#define mn10300_local_dcache_inv_page(start) do {} while (0)
88#define mn10300_local_dcache_inv_range(start, end) do {} while (0)
89#define mn10300_local_dcache_inv_range2(start, size) do {} while (0)
90#define mn10300_local_dcache_flush() do {} while (0)
91#define mn10300_local_dcache_flush_inv_page(start) do {} while (0)
92#define mn10300_local_dcache_flush_inv() do {} while (0)
93#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
94#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
95#define mn10300_local_dcache_flush_page(start) do {} while (0)
96#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
97#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
93#define mn10300_icache_inv() do {} while (0) 98#define mn10300_icache_inv() do {} while (0)
99#define mn10300_icache_inv_page(start) do {} while (0)
100#define mn10300_icache_inv_range(start, end) do {} while (0)
101#define mn10300_icache_inv_range2(start, size) do {} while (0)
94#define mn10300_dcache_inv() do {} while (0) 102#define mn10300_dcache_inv() do {} while (0)
95#define mn10300_dcache_inv_page(start) do {} while (0) 103#define mn10300_dcache_inv_page(start) do {} while (0)
96#define mn10300_dcache_inv_range(start, end) do {} while (0) 104#define mn10300_dcache_inv_range(start, end) do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
103#define mn10300_dcache_flush_page(start) do {} while (0) 111#define mn10300_dcache_flush_page(start) do {} while (0)
104#define mn10300_dcache_flush_range(start, end) do {} while (0) 112#define mn10300_dcache_flush_range(start, end) do {} while (0)
105#define mn10300_dcache_flush_range2(start, size) do {} while (0) 113#define mn10300_dcache_flush_range2(start, size) do {} while (0)
106#endif /* CONFIG_MN10300_CACHE_DISABLED */ 114#endif /* CONFIG_MN10300_CACHE_ENABLED */
115
116/*
117 * Virtually-indexed cache management (our cache is physically indexed)
118 */
119#define flush_cache_all() do {} while (0)
120#define flush_cache_mm(mm) do {} while (0)
121#define flush_cache_dup_mm(mm) do {} while (0)
122#define flush_cache_range(mm, start, end) do {} while (0)
123#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
124#define flush_cache_vmap(start, end) do {} while (0)
125#define flush_cache_vunmap(start, end) do {} while (0)
126#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
127#define flush_dcache_page(page) do {} while (0)
128#define flush_dcache_mmap_lock(mapping) do {} while (0)
129#define flush_dcache_mmap_unlock(mapping) do {} while (0)
130
131/*
132 * Physically-indexed cache management
133 */
134#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
135extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
136extern void flush_icache_range(unsigned long start, unsigned long end);
137#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
138static inline void flush_icache_page(struct vm_area_struct *vma,
139 struct page *page)
140{
141 mn10300_icache_inv_page(page_to_phys(page));
142}
143extern void flush_icache_range(unsigned long start, unsigned long end);
144#else
145#define flush_icache_range(start, end) do {} while (0)
146#define flush_icache_page(vma, pg) do {} while (0)
147#endif
148
149
150#define flush_icache_user_range(vma, pg, adr, len) \
151 flush_icache_range(adr, adr + len)
152
153#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
154 do { \
155 memcpy(dst, src, len); \
156 flush_icache_page(vma, page); \
157 } while (0)
158
159#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
160 memcpy(dst, src, len)
107 161
108/* 162/*
109 * internal debugging function 163 * Internal debugging function
110 */ 164 */
111#ifdef CONFIG_DEBUG_PAGEALLOC 165#ifdef CONFIG_DEBUG_PAGEALLOC
112extern void kernel_map_pages(struct page *page, int numpages, int enable); 166extern void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/mn10300/include/asm/cpu-regs.h b/arch/mn10300/include/asm/cpu-regs.h
index 757e9b5388ea..90ed4a365c97 100644
--- a/arch/mn10300/include/asm/cpu-regs.h
+++ b/arch/mn10300/include/asm/cpu-regs.h
@@ -15,7 +15,6 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#endif 16#endif
17 17
18#ifdef CONFIG_MN10300_CPU_AM33V2
19/* we tell the compiler to pretend to be AM33 so that it doesn't try and use 18/* we tell the compiler to pretend to be AM33 so that it doesn't try and use
20 * the FP regs, but tell the assembler that we're actually allowed AM33v2 19 * the FP regs, but tell the assembler that we're actually allowed AM33v2
21 * instructions */ 20 * instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
24#else 23#else
25.am33_2 24.am33_2
26#endif 25#endif
27#endif
28 26
29#ifdef __KERNEL__ 27#ifdef __KERNEL__
30 28
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
58#define EPSW_nAR 0x00040000 /* register bank control */ 56#define EPSW_nAR 0x00040000 /* register bank control */
59#define EPSW_ML 0x00080000 /* monitor level */ 57#define EPSW_ML 0x00080000 /* monitor level */
60#define EPSW_FE 0x00100000 /* FPU enable */ 58#define EPSW_FE 0x00100000 /* FPU enable */
59#define EPSW_IM_SHIFT 8 /* EPSW_IM_SHIFT determines the interrupt mode */
60
61#define NUM2EPSW_IM(num) ((num) << EPSW_IM_SHIFT)
61 62
62/* FPU registers */ 63/* FPU registers */
63#define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */ 64#define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
99#define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */ 100#define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */
100#define CPUREV_TYPE 0x0000000f /* CPU type */ 101#define CPUREV_TYPE 0x0000000f /* CPU type */
101#define CPUREV_TYPE_S 0 102#define CPUREV_TYPE_S 0
102#define CPUREV_TYPE_AM33V1 0x00000000 /* - AM33 V1 core, AM33/1.00 arch */ 103#define CPUREV_TYPE_AM33_1 0x00000000 /* - AM33-1 core, AM33/1.00 arch */
103#define CPUREV_TYPE_AM33V2 0x00000001 /* - AM33 V2 core, AM33/2.00 arch */ 104#define CPUREV_TYPE_AM33_2 0x00000001 /* - AM33-2 core, AM33/2.00 arch */
104#define CPUREV_TYPE_AM34V1 0x00000002 /* - AM34 V1 core, AM33/2.00 arch */ 105#define CPUREV_TYPE_AM34_1 0x00000002 /* - AM34-1 core, AM33/2.00 arch */
106#define CPUREV_TYPE_AM33_3 0x00000003 /* - AM33-3 core, AM33/2.00 arch */
107#define CPUREV_TYPE_AM34_2 0x00000004 /* - AM34-2 core, AM33/3.00 arch */
105#define CPUREV_REVISION 0x000000f0 /* CPU revision */ 108#define CPUREV_REVISION 0x000000f0 /* CPU revision */
106#define CPUREV_REVISION_S 4 109#define CPUREV_REVISION_S 4
107#define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */ 110#define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
180#define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */ 183#define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */
181#define CHCTR_DCWMD 0xf000 /* data cache way mode */ 184#define CHCTR_DCWMD 0xf000 /* data cache way mode */
182 185
186#ifdef CONFIG_AM34_2
187#define ICIVCR __SYSREG(0xc0000c00, u32) /* icache area invalidate control */
188#define ICIVCR_ICIVBSY 0x00000008 /* icache area invalidate busy */
189#define ICIVCR_ICI 0x00000001 /* icache area invalidate */
190
191#define ICIVMR __SYSREG(0xc0000c04, u32) /* icache area invalidate mask */
192
193#define DCPGCR __SYSREG(0xc0000c10, u32) /* data cache area purge control */
194#define DCPGCR_DCPGBSY 0x00000008 /* data cache area purge busy */
195#define DCPGCR_DCP 0x00000002 /* data cache area purge */
196#define DCPGCR_DCI 0x00000001 /* data cache area invalidate */
197
198#define DCPGMR __SYSREG(0xc0000c14, u32) /* data cache area purge mask */
199#endif /* CONFIG_AM34_2 */
200
183/* MMU control registers */ 201/* MMU control registers */
184#define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */ 202#define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */
185#define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */ 203#define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
203#define MMUCTR_DTL_LOCK0_3 0x03000000 /* - entry 0-3 locked */ 221#define MMUCTR_DTL_LOCK0_3 0x03000000 /* - entry 0-3 locked */
204#define MMUCTR_DTL_LOCK0_7 0x04000000 /* - entry 0-7 locked */ 222#define MMUCTR_DTL_LOCK0_7 0x04000000 /* - entry 0-7 locked */
205#define MMUCTR_DTL_LOCK0_15 0x05000000 /* - entry 0-15 locked */ 223#define MMUCTR_DTL_LOCK0_15 0x05000000 /* - entry 0-15 locked */
224#ifdef CONFIG_AM34_2
225#define MMUCTR_WTE 0x80000000 /* write-through cache TLB entry bit enable */
226#endif
206 227
207#define PIDR __SYSREG(0xc0000094, u16) /* PID register */ 228#define PIDR __SYSREG(0xc0000094, u16) /* PID register */
208#define PIDR_PID 0x00ff /* process identifier */ 229#define PIDR_PID 0x00ff /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
231#define xPTEL_PS_4Mb 0x00000c00 /* - 4Mb page */ 252#define xPTEL_PS_4Mb 0x00000c00 /* - 4Mb page */
232#define xPTEL_PPN 0xfffff006 /* physical page number */ 253#define xPTEL_PPN 0xfffff006 /* physical page number */
233 254
234#define xPTEL_V_BIT 0 /* bit numbers corresponding to above masks */
235#define xPTEL_UNUSED1_BIT 1
236#define xPTEL_UNUSED2_BIT 2
237#define xPTEL_C_BIT 3
238#define xPTEL_PV_BIT 4
239#define xPTEL_D_BIT 5
240#define xPTEL_G_BIT 9
241
242#define IPTEU __SYSREG(0xc00000a4, u32) /* instruction TLB virtual addr */ 255#define IPTEU __SYSREG(0xc00000a4, u32) /* instruction TLB virtual addr */
243#define DPTEU __SYSREG(0xc00000b4, u32) /* data TLB virtual addr */ 256#define DPTEU __SYSREG(0xc00000b4, u32) /* data TLB virtual addr */
244#define xPTEU_VPN 0xfffffc00 /* virtual page number */ 257#define xPTEU_VPN 0xfffffc00 /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
262#define xPTEL2_PS_128Kb 0x00000100 /* - 128Kb page */ 275#define xPTEL2_PS_128Kb 0x00000100 /* - 128Kb page */
263#define xPTEL2_PS_1Kb 0x00000200 /* - 1Kb page */ 276#define xPTEL2_PS_1Kb 0x00000200 /* - 1Kb page */
264#define xPTEL2_PS_4Mb 0x00000300 /* - 4Mb page */ 277#define xPTEL2_PS_4Mb 0x00000300 /* - 4Mb page */
265#define xPTEL2_PPN 0xfffffc00 /* physical page number */ 278#define xPTEL2_CWT 0x00000400 /* cacheable write-through */
279#define xPTEL2_UNUSED1 0x00000800 /* unused bit (broadcast mask) */
280#define xPTEL2_PPN 0xfffff000 /* physical page number */
281
282#define xPTEL2_V_BIT 0 /* bit numbers corresponding to above masks */
283#define xPTEL2_C_BIT 1
284#define xPTEL2_PV_BIT 2
285#define xPTEL2_D_BIT 3
286#define xPTEL2_G_BIT 7
287#define xPTEL2_UNUSED1_BIT 11
266 288
267#define MMUFCR __SYSREGC(0xc000009c, u32) /* MMU exception cause */ 289#define MMUFCR __SYSREGC(0xc000009c, u32) /* MMU exception cause */
268#define MMUFCR_IFC __SYSREGC(0xc000009c, u16) /* MMU instruction excep cause */ 290#define MMUFCR_IFC __SYSREGC(0xc000009c, u16) /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
285#define MMUFCR_xFC_PR_RWK_RWU 0x01c0 /* - R/W kernel and R/W user */ 307#define MMUFCR_xFC_PR_RWK_RWU 0x01c0 /* - R/W kernel and R/W user */
286#define MMUFCR_xFC_ILLADDR 0x0200 /* illegal address excep flag */ 308#define MMUFCR_xFC_ILLADDR 0x0200 /* illegal address excep flag */
287 309
310#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
311/* atomic operation registers */
312#define AAR __SYSREG(0xc0000a00, u32) /* cacheable address */
313#define AAR2 __SYSREG(0xc0000a04, u32) /* uncacheable address */
314#define ADR __SYSREG(0xc0000a08, u32) /* data */
315#define ASR __SYSREG(0xc0000a0c, u32) /* status */
316#define AARU __SYSREG(0xd400aa00, u32) /* user address */
317#define ADRU __SYSREG(0xd400aa08, u32) /* user data */
318#define ASRU __SYSREG(0xd400aa0c, u32) /* user status */
319
320#define ASR_RW 0x00000008 /* read */
321#define ASR_BW 0x00000004 /* bus error */
322#define ASR_IW 0x00000002 /* interrupt */
323#define ASR_LW 0x00000001 /* bus lock */
324
325#define ASRU_RW ASR_RW /* read */
326#define ASRU_BW ASR_BW /* bus error */
327#define ASRU_IW ASR_IW /* interrupt */
328#define ASRU_LW ASR_LW /* bus lock */
329
330/* in inline ASM, we stick the base pointer in to a reg and use offsets from
331 * it */
332#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
333#ifndef __ASSEMBLY__
334asm(
335 "_AAR = 0\n"
336 "_AAR2 = 4\n"
337 "_ADR = 8\n"
338 "_ASR = 12\n");
339#else
340#define _AAR 0
341#define _AAR2 4
342#define _ADR 8
343#define _ASR 12
344#endif
345
346/* physical page address for userspace atomic operations registers */
347#define USER_ATOMIC_OPS_PAGE_ADDR 0xd400a000
348
349#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
350
288#endif /* __KERNEL__ */ 351#endif /* __KERNEL__ */
289 352
290#endif /* _ASM_CPU_REGS_H */ 353#endif /* _ASM_CPU_REGS_H */
diff --git a/arch/mn10300/include/asm/dmactl-regs.h b/arch/mn10300/include/asm/dmactl-regs.h
index 58a199da0f4a..80337b339c90 100644
--- a/arch/mn10300/include/asm/dmactl-regs.h
+++ b/arch/mn10300/include/asm/dmactl-regs.h
@@ -11,91 +11,6 @@
11#ifndef _ASM_DMACTL_REGS_H 11#ifndef _ASM_DMACTL_REGS_H
12#define _ASM_DMACTL_REGS_H 12#define _ASM_DMACTL_REGS_H
13 13
14#include <asm/cpu-regs.h> 14#include <proc/dmactl-regs.h>
15
16#ifdef __KERNEL__
17
18/* DMA registers */
19#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
20#define DMxCTR_BG 0x0000001f /* transfer request source */
21#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
22#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
23#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
24#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
25#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
26#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
27#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
28#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
29#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
30#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
31#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
32#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
33#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
34#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
35#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
36#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
37#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
38#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
39#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
40#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
41#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
42#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
43#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
44#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
45#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
46#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
47#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
48#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
49#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
50#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
51#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
52#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
53#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
54#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
55#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
56#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
57#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
58#define DMxCTR_RQM 0x00060000 /* external request input source mode */
59#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
60#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
61#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
62#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
63#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
64#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
65
66#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
67
68#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
69
70#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
71#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
72
73#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
74 * size reg */
75#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
76
77#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
78#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
79#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
80#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
81
82#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
83#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
84#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
85#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
86
87#ifndef __ASSEMBLY__
88
89struct mn10300_dmactl_regs {
90 u32 ctr;
91 const void *src;
92 void *dst;
93 u32 siz;
94 u32 cyc;
95} __attribute__((aligned(0x100)));
96
97#endif /* __ASSEMBLY__ */
98
99#endif /* __KERNEL__ */
100 15
101#endif /* _ASM_DMACTL_REGS_H */ 16#endif /* _ASM_DMACTL_REGS_H */
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index e5fa97cd9a14..8157c9267f42 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -32,6 +32,12 @@
32#define R_MN10300_ALIGN 34 /* Alignment requirement. */ 32#define R_MN10300_ALIGN 34 /* Alignment requirement. */
33 33
34/* 34/*
35 * AM33/AM34 HW Capabilities
36 */
37#define HWCAP_MN10300_ATOMIC_OP_UNIT 1 /* Has AM34 Atomic Operations */
38
39
40/*
35 * ELF register definitions.. 41 * ELF register definitions..
36 */ 42 */
37typedef unsigned long elf_greg_t; 43typedef unsigned long elf_greg_t;
@@ -47,8 +53,6 @@ typedef struct {
47 u_int32_t fpcr; 53 u_int32_t fpcr;
48} elf_fpregset_t; 54} elf_fpregset_t;
49 55
50extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
51
52/* 56/*
53 * This is used to ensure we don't load something for the wrong architecture 57 * This is used to ensure we don't load something for the wrong architecture
54 */ 58 */
@@ -130,7 +134,11 @@ do { \
130 * instruction set this CPU supports. This could be done in user space, 134 * instruction set this CPU supports. This could be done in user space,
131 * but it's not easy, and we've already done it here. 135 * but it's not easy, and we've already done it here.
132 */ 136 */
137#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
138#define ELF_HWCAP (HWCAP_MN10300_ATOMIC_OP_UNIT)
139#else
133#define ELF_HWCAP (0) 140#define ELF_HWCAP (0)
141#endif
134 142
135/* 143/*
136 * This yields a string that ld.so will use to load implementation 144 * This yields a string that ld.so will use to load implementation
diff --git a/arch/mn10300/include/asm/exceptions.h b/arch/mn10300/include/asm/exceptions.h
index fa16466ef3f9..ca3e20508c77 100644
--- a/arch/mn10300/include/asm/exceptions.h
+++ b/arch/mn10300/include/asm/exceptions.h
@@ -15,8 +15,8 @@
15 15
16/* 16/*
17 * define the breakpoint instruction opcode to use 17 * define the breakpoint instruction opcode to use
18 * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can 18 * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
19 * (can use 0xF7) 19 * the same time.
20 */ 20 */
21#define GDBSTUB_BKPT 0xFF 21#define GDBSTUB_BKPT 0xFF
22 22
@@ -90,7 +90,6 @@ enum exception_code {
90 90
91extern void __set_intr_stub(enum exception_code code, void *handler); 91extern void __set_intr_stub(enum exception_code code, void *handler);
92extern void set_intr_stub(enum exception_code code, void *handler); 92extern void set_intr_stub(enum exception_code code, void *handler);
93extern void set_jtag_stub(enum exception_code code, void *handler);
94 93
95struct pt_regs; 94struct pt_regs;
96 95
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
102extern asmlinkage void raw_bus_error(void); 101extern asmlinkage void raw_bus_error(void);
103extern asmlinkage void double_fault(void); 102extern asmlinkage void double_fault(void);
104extern asmlinkage int system_call(struct pt_regs *); 103extern asmlinkage int system_call(struct pt_regs *);
105extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
106extern asmlinkage void nmi(struct pt_regs *, enum exception_code); 104extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
107extern asmlinkage void uninitialised_exception(struct pt_regs *, 105extern asmlinkage void uninitialised_exception(struct pt_regs *,
108 enum exception_code); 106 enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
116 114
117extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code); 115extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
118 116
117#define NUM2EXCEP_IRQ_LEVEL(num) (EXCEP_IRQ_LEVEL0 + (num) * 8)
118
119#endif /* __ASSEMBLY__ */ 119#endif /* __ASSEMBLY__ */
120 120
121#endif /* _ASM_EXCEPTIONS_H */ 121#endif /* _ASM_EXCEPTIONS_H */
diff --git a/arch/mn10300/include/asm/fpu.h b/arch/mn10300/include/asm/fpu.h
index 64a2b83a7a6a..b7625de8eade 100644
--- a/arch/mn10300/include/asm/fpu.h
+++ b/arch/mn10300/include/asm/fpu.h
@@ -12,74 +12,125 @@
12#ifndef _ASM_FPU_H 12#ifndef _ASM_FPU_H
13#define _ASM_FPU_H 13#define _ASM_FPU_H
14 14
15#include <asm/processor.h> 15#ifndef __ASSEMBLY__
16
17#include <linux/sched.h>
18#include <asm/exceptions.h>
16#include <asm/sigcontext.h> 19#include <asm/sigcontext.h>
17#include <asm/user.h>
18 20
19#ifdef __KERNEL__ 21#ifdef __KERNEL__
20 22
21/* the task that owns the FPU state */ 23extern asmlinkage void fpu_disabled(void);
24
25#ifdef CONFIG_FPU
26
27#ifdef CONFIG_LAZY_SAVE_FPU
28/* the task that currently owns the FPU state */
22extern struct task_struct *fpu_state_owner; 29extern struct task_struct *fpu_state_owner;
30#endif
23 31
24#define set_using_fpu(tsk) \ 32#if (THREAD_USING_FPU & ~0xff)
25do { \ 33#error THREAD_USING_FPU must be smaller than 0x100.
26 (tsk)->thread.fpu_flags |= THREAD_USING_FPU; \ 34#endif
27} while (0)
28 35
29#define clear_using_fpu(tsk) \ 36static inline void set_using_fpu(struct task_struct *tsk)
30do { \ 37{
31 (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU; \ 38 asm volatile(
32} while (0) 39 "bset %0,(0,%1)"
40 :
41 : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
42 : "memory", "cc");
43}
33 44
34#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU) 45static inline void clear_using_fpu(struct task_struct *tsk)
46{
47 asm volatile(
48 "bclr %0,(0,%1)"
49 :
50 : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
51 : "memory", "cc");
52}
35 53
36#define unlazy_fpu(tsk) \ 54#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
37do { \
38 preempt_disable(); \
39 if (fpu_state_owner == (tsk)) \
40 fpu_save(&tsk->thread.fpu_state); \
41 preempt_enable(); \
42} while (0)
43
44#define exit_fpu() \
45do { \
46 struct task_struct *__tsk = current; \
47 preempt_disable(); \
48 if (fpu_state_owner == __tsk) \
49 fpu_state_owner = NULL; \
50 preempt_enable(); \
51} while (0)
52
53#define flush_fpu() \
54do { \
55 struct task_struct *__tsk = current; \
56 preempt_disable(); \
57 if (fpu_state_owner == __tsk) { \
58 fpu_state_owner = NULL; \
59 __tsk->thread.uregs->epsw &= ~EPSW_FE; \
60 } \
61 preempt_enable(); \
62 clear_using_fpu(__tsk); \
63} while (0)
64 55
65extern asmlinkage void fpu_init_state(void);
66extern asmlinkage void fpu_kill_state(struct task_struct *); 56extern asmlinkage void fpu_kill_state(struct task_struct *);
67extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
68extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code); 57extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
69 58extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
70#ifdef CONFIG_FPU 59extern asmlinkage void fpu_init_state(void);
71extern asmlinkage void fpu_save(struct fpu_state_struct *); 60extern asmlinkage void fpu_save(struct fpu_state_struct *);
72extern asmlinkage void fpu_restore(struct fpu_state_struct *);
73#else
74#define fpu_save(a)
75#define fpu_restore(a)
76#endif /* CONFIG_FPU */
77
78/*
79 * signal frame handlers
80 */
81extern int fpu_setup_sigcontext(struct fpucontext *buf); 61extern int fpu_setup_sigcontext(struct fpucontext *buf);
82extern int fpu_restore_sigcontext(struct fpucontext *buf); 62extern int fpu_restore_sigcontext(struct fpucontext *buf);
83 63
64static inline void unlazy_fpu(struct task_struct *tsk)
65{
66 preempt_disable();
67#ifndef CONFIG_LAZY_SAVE_FPU
68 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
69 fpu_save(&tsk->thread.fpu_state);
70 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
71 tsk->thread.uregs->epsw &= ~EPSW_FE;
72 }
73#else
74 if (fpu_state_owner == tsk)
75 fpu_save(&tsk->thread.fpu_state);
76#endif
77 preempt_enable();
78}
79
80static inline void exit_fpu(void)
81{
82#ifdef CONFIG_LAZY_SAVE_FPU
83 struct task_struct *tsk = current;
84
85 preempt_disable();
86 if (fpu_state_owner == tsk)
87 fpu_state_owner = NULL;
88 preempt_enable();
89#endif
90}
91
92static inline void flush_fpu(void)
93{
94 struct task_struct *tsk = current;
95
96 preempt_disable();
97#ifndef CONFIG_LAZY_SAVE_FPU
98 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
99 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
100 tsk->thread.uregs->epsw &= ~EPSW_FE;
101 }
102#else
103 if (fpu_state_owner == tsk) {
104 fpu_state_owner = NULL;
105 tsk->thread.uregs->epsw &= ~EPSW_FE;
106 }
107#endif
108 preempt_enable();
109 clear_using_fpu(tsk);
110}
111
112#else /* CONFIG_FPU */
113
114extern asmlinkage
115void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
116#define fpu_invalid_op unexpected_fpu_exception
117#define fpu_exception unexpected_fpu_exception
118
119struct task_struct;
120struct fpu_state_struct;
121static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
122static inline void set_using_fpu(struct task_struct *tsk) {}
123static inline void clear_using_fpu(struct task_struct *tsk) {}
124static inline void fpu_init_state(void) {}
125static inline void fpu_save(struct fpu_state_struct *s) {}
126static inline void fpu_kill_state(struct task_struct *tsk) {}
127static inline void unlazy_fpu(struct task_struct *tsk) {}
128static inline void exit_fpu(void) {}
129static inline void flush_fpu(void) {}
130static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
131static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
132#endif /* CONFIG_FPU */
133
84#endif /* __KERNEL__ */ 134#endif /* __KERNEL__ */
135#endif /* !__ASSEMBLY__ */
85#endif /* _ASM_FPU_H */ 136#endif /* _ASM_FPU_H */
diff --git a/arch/mn10300/include/asm/frame.inc b/arch/mn10300/include/asm/frame.inc
index 5b1949bdf039..2ee58e3eb6b3 100644
--- a/arch/mn10300/include/asm/frame.inc
+++ b/arch/mn10300/include/asm/frame.inc
@@ -18,6 +18,7 @@
18#ifndef __ASM_OFFSETS_H__ 18#ifndef __ASM_OFFSETS_H__
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#endif 20#endif
21#include <asm/thread_info.h>
21 22
22#define pi break 23#define pi break
23 24
@@ -37,11 +38,15 @@
37 movm [d2,d3,a2,a3,exreg0,exreg1,exother],(sp) 38 movm [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
38 mov sp,fp # FRAME pointer in A3 39 mov sp,fp # FRAME pointer in A3
39 add -12,sp # allow for calls to be made 40 add -12,sp # allow for calls to be made
40 mov (__frame),a1
41 mov a1,(REG_NEXT,fp)
42 mov fp,(__frame)
43 41
44 and ~EPSW_FE,epsw # disable the FPU inside the kernel 42 # push the exception frame onto the front of the list
43 GET_THREAD_INFO a1
44 mov (TI_frame,a1),a0
45 mov a0,(REG_NEXT,fp)
46 mov fp,(TI_frame,a1)
47
48 # disable the FPU inside the kernel
49 and ~EPSW_FE,epsw
45 50
46 # we may be holding current in E2 51 # we may be holding current in E2
47#ifdef CONFIG_MN10300_CURRENT_IN_E2 52#ifdef CONFIG_MN10300_CURRENT_IN_E2
@@ -57,10 +62,11 @@
57.macro RESTORE_ALL 62.macro RESTORE_ALL
58 # peel back the stack to the calling frame 63 # peel back the stack to the calling frame
59 # - this permits execve() to discard extra frames due to kernel syscalls 64 # - this permits execve() to discard extra frames due to kernel syscalls
60 mov (__frame),fp 65 GET_THREAD_INFO a0
66 mov (TI_frame,a0),fp
61 mov fp,sp 67 mov fp,sp
62 mov (REG_NEXT,fp),d0 # userspace has regs->next == 0 68 mov (REG_NEXT,fp),d0
63 mov d0,(__frame) 69 mov d0,(TI_frame,a0) # userspace has regs->next == 0
64 70
65#ifndef CONFIG_MN10300_USING_JTAG 71#ifndef CONFIG_MN10300_USING_JTAG
66 mov (REG_EPSW,fp),d0 72 mov (REG_EPSW,fp),d0
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index 41ed26763964..f5495ad82b77 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
110extern asmlinkage void __gdbstub_bug_trap(void); 110extern asmlinkage void __gdbstub_bug_trap(void);
111extern asmlinkage void __gdbstub_pause(void); 111extern asmlinkage void __gdbstub_pause(void);
112 112
113#ifndef CONFIG_MN10300_CACHE_DISABLED 113#ifdef CONFIG_MN10300_CACHE_ENABLED
114extern asmlinkage void gdbstub_purge_cache(void); 114extern asmlinkage void gdbstub_purge_cache(void);
115#else 115#else
116#define gdbstub_purge_cache() do {} while (0) 116#define gdbstub_purge_cache() do {} while (0)
diff --git a/arch/mn10300/include/asm/hardirq.h b/arch/mn10300/include/asm/hardirq.h
index 54d950117674..0000d650b55f 100644
--- a/arch/mn10300/include/asm/hardirq.h
+++ b/arch/mn10300/include/asm/hardirq.h
@@ -19,9 +19,10 @@
19/* assembly code in softirq.h is sensitive to the offsets of these fields */ 19/* assembly code in softirq.h is sensitive to the offsets of these fields */
20typedef struct { 20typedef struct {
21 unsigned int __softirq_pending; 21 unsigned int __softirq_pending;
22 unsigned long idle_timestamp; 22#ifdef CONFIG_MN10300_WD_TIMER
23 unsigned int __nmi_count; /* arch dependent */ 23 unsigned int __nmi_count; /* arch dependent */
24 unsigned int __irq_count; /* arch dependent */ 24 unsigned int __irq_count; /* arch dependent */
25#endif
25} ____cacheline_aligned irq_cpustat_t; 26} ____cacheline_aligned irq_cpustat_t;
26 27
27#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 28#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index e2155e686451..bfe2d88604d9 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
87 BUG(); 87 BUG();
88#endif 88#endif
89 set_pte(kmap_pte - idx, mk_pte(page, kmap_prot)); 89 set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
90 __flush_tlb_one(vaddr); 90 local_flush_tlb_one(vaddr);
91 91
92 return vaddr; 92 return vaddr;
93} 93}
@@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
116 * this pte without first remap it 116 * this pte without first remap it
117 */ 117 */
118 pte_clear(kmap_pte - idx); 118 pte_clear(kmap_pte - idx);
119 __flush_tlb_one(vaddr); 119 local_flush_tlb_one(vaddr);
120 } 120 }
121#endif 121#endif
122 122
diff --git a/arch/mn10300/include/asm/intctl-regs.h b/arch/mn10300/include/asm/intctl-regs.h
index ba544c796c5a..585b708c2bc0 100644
--- a/arch/mn10300/include/asm/intctl-regs.h
+++ b/arch/mn10300/include/asm/intctl-regs.h
@@ -15,24 +15,19 @@
15 15
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18/* interrupt controller registers */ 18/*
19#define GxICR(X) __SYSREG(0xd4000000 + (X) * 4, u16) /* group irq ctrl regs */ 19 * Interrupt controller registers
20 20 * - Registers 64-191 are at addresses offset from the main array
21#define IAGR __SYSREG(0xd4000100, u16) /* intr acceptance group reg */ 21 */
22#define IAGR_GN 0x00fc /* group number register 22#define GxICR(X) \
23 * (documentation _has_ to be wrong) 23 __SYSREG(0xd4000000 + (X) * 4 + \
24 */ 24 (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
25 25
26#define EXTMD __SYSREG(0xd4000200, u16) /* external pin intr spec reg */ 26#define GxICR_u8(X) \
27#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3) 27 __SYSREG(0xd4000000 + (X) * 4 + \
28 (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
28 29
29#define SET_XIRQ_TRIGGER(X,Y) \ 30#include <proc/intctl-regs.h>
30do { \
31 u16 x = EXTMD; \
32 x &= ~(3 << ((X) * 2)); \
33 x |= ((Y) & 3) << ((X) * 2); \
34 EXTMD = x; \
35} while (0)
36 31
37#define XIRQ_TRIGGER_LOWLEVEL 0 32#define XIRQ_TRIGGER_LOWLEVEL 0
38#define XIRQ_TRIGGER_HILEVEL 1 33#define XIRQ_TRIGGER_HILEVEL 1
@@ -59,10 +54,18 @@ do { \
59#define GxICR_LEVEL_5 0x5000 /* - level 5 */ 54#define GxICR_LEVEL_5 0x5000 /* - level 5 */
60#define GxICR_LEVEL_6 0x6000 /* - level 6 */ 55#define GxICR_LEVEL_6 0x6000 /* - level 6 */
61#define GxICR_LEVEL_SHIFT 12 56#define GxICR_LEVEL_SHIFT 12
57#define GxICR_NMI 0x8000 /* nmi request flag */
58
59#define NUM2GxICR_LEVEL(num) ((num) << GxICR_LEVEL_SHIFT)
62 60
63#ifndef __ASSEMBLY__ 61#ifndef __ASSEMBLY__
64extern void set_intr_level(int irq, u16 level); 62extern void set_intr_level(int irq, u16 level);
65extern void set_intr_postackable(int irq); 63extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
64extern void mn10300_intc_clear(unsigned int irq);
65extern void mn10300_intc_set(unsigned int irq);
66extern void mn10300_intc_enable(unsigned int irq);
67extern void mn10300_intc_disable(unsigned int irq);
68extern void mn10300_set_lateack_irq_type(int irq);
66#endif 69#endif
67 70
68/* external interrupts */ 71/* external interrupts */
diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h
index c1a4119e6497..787255da744e 100644
--- a/arch/mn10300/include/asm/io.h
+++ b/arch/mn10300/include/asm/io.h
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
206#define iowrite32_rep(p, src, count) \ 206#define iowrite32_rep(p, src, count) \
207 outsl((unsigned long) (p), (src), (count)) 207 outsl((unsigned long) (p), (src), (count))
208 208
209#define readsb(p, dst, count) \
210 insb((unsigned long) (p), (dst), (count))
211#define readsw(p, dst, count) \
212 insw((unsigned long) (p), (dst), (count))
213#define readsl(p, dst, count) \
214 insl((unsigned long) (p), (dst), (count))
215
216#define writesb(p, src, count) \
217 outsb((unsigned long) (p), (src), (count))
218#define writesw(p, src, count) \
219 outsw((unsigned long) (p), (src), (count))
220#define writesl(p, src, count) \
221 outsl((unsigned long) (p), (src), (count))
209 222
210#define IO_SPACE_LIMIT 0xffffffff 223#define IO_SPACE_LIMIT 0xffffffff
211 224
diff --git a/arch/mn10300/include/asm/irq.h b/arch/mn10300/include/asm/irq.h
index 25c045d16d1c..1a73fb3f60c6 100644
--- a/arch/mn10300/include/asm/irq.h
+++ b/arch/mn10300/include/asm/irq.h
@@ -21,8 +21,16 @@
21/* this number is used when no interrupt has been assigned */ 21/* this number is used when no interrupt has been assigned */
22#define NO_IRQ INT_MAX 22#define NO_IRQ INT_MAX
23 23
24/* hardware irq numbers */ 24/*
25#define NR_IRQS GxICR_NUM_IRQS 25 * hardware irq numbers
26 * - the ASB2364 has an FPGA with an IRQ multiplexer on it
27 */
28#ifdef CONFIG_MN10300_UNIT_ASB2364
29#include <unit/irq.h>
30#else
31#define NR_CPU_IRQS GxICR_NUM_IRQS
32#define NR_IRQS NR_CPU_IRQS
33#endif
26 34
27/* external hardware irq numbers */ 35/* external hardware irq numbers */
28#define NR_XIRQS GxICR_NUM_XIRQS 36#define NR_XIRQS GxICR_NUM_XIRQS
diff --git a/arch/mn10300/include/asm/irq_regs.h b/arch/mn10300/include/asm/irq_regs.h
index a848cd232eb4..97d0cb5af807 100644
--- a/arch/mn10300/include/asm/irq_regs.h
+++ b/arch/mn10300/include/asm/irq_regs.h
@@ -18,7 +18,11 @@
18#define ARCH_HAS_OWN_IRQ_REGS 18#define ARCH_HAS_OWN_IRQ_REGS
19 19
20#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
21#define get_irq_regs() (__frame) 21static inline __attribute__((const))
22struct pt_regs *get_irq_regs(void)
23{
24 return current_frame();
25}
22#endif 26#endif
23 27
24#endif /* _ASM_IRQ_REGS_H */ 28#endif /* _ASM_IRQ_REGS_H */
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
index 5e529a117cb2..7a7ae12c7119 100644
--- a/arch/mn10300/include/asm/irqflags.h
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -13,6 +13,9 @@
13#define _ASM_IRQFLAGS_H 13#define _ASM_IRQFLAGS_H
14 14
15#include <asm/cpu-regs.h> 15#include <asm/cpu-regs.h>
16#ifndef __ASSEMBLY__
17#include <linux/smp.h>
18#endif
16 19
17/* 20/*
18 * interrupt control 21 * interrupt control
@@ -23,11 +26,7 @@
23 * - level 6 - timer interrupt 26 * - level 6 - timer interrupt
24 * - "enabled": run in IM7 27 * - "enabled": run in IM7
25 */ 28 */
26#ifdef CONFIG_MN10300_TTYSM 29#define MN10300_CLI_LEVEL (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
27#define MN10300_CLI_LEVEL EPSW_IM_2
28#else
29#define MN10300_CLI_LEVEL EPSW_IM_1
30#endif
31 30
32#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
33 32
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
64/* 63/*
65 * we make sure arch_irq_enable() doesn't cause priority inversion 64 * we make sure arch_irq_enable() doesn't cause priority inversion
66 */ 65 */
67extern unsigned long __mn10300_irq_enabled_epsw; 66extern unsigned long __mn10300_irq_enabled_epsw[];
68 67
69static inline void arch_local_irq_enable(void) 68static inline void arch_local_irq_enable(void)
70{ 69{
71 unsigned long tmp; 70 unsigned long tmp;
71 int cpu = raw_smp_processor_id();
72 72
73 asm volatile( 73 asm volatile(
74 " mov epsw,%0 \n" 74 " mov epsw,%0 \n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
76 " or %2,%0 \n" 76 " or %2,%0 \n"
77 " mov %0,epsw \n" 77 " mov %0,epsw \n"
78 : "=&d"(tmp) 78 : "=&d"(tmp)
79 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) 79 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
80 : "memory"); 80 : "memory", "cc");
81} 81}
82 82
83static inline void arch_local_irq_restore(unsigned long flags) 83static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
94 94
95static inline bool arch_irqs_disabled_flags(unsigned long flags) 95static inline bool arch_irqs_disabled_flags(unsigned long flags)
96{ 96{
97 return (flags & EPSW_IM) <= MN10300_CLI_LEVEL; 97 return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
98} 98}
99 99
100static inline bool arch_irqs_disabled(void) 100static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
109 */ 109 */
110static inline void arch_safe_halt(void) 110static inline void arch_safe_halt(void)
111{ 111{
112#ifdef CONFIG_SMP
113 arch_local_irq_enable();
114#else
112 asm volatile( 115 asm volatile(
113 " or %0,epsw \n" 116 " or %0,epsw \n"
114 " nop \n" 117 " nop \n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
117 : 120 :
118 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP) 121 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
119 : "cc"); 122 : "cc");
123#endif
120} 124}
121 125
126#define __sleep_cpu() \
127do { \
128 asm volatile( \
129 " bset %1,(%0)\n" \
130 "1: btst %1,(%0)\n" \
131 " bne 1b\n" \
132 : \
133 : "i"(&CPUM), "i"(CPUM_SLEEP) \
134 : "cc" \
135 ); \
136} while (0)
137
138static inline void arch_local_cli(void)
139{
140 asm volatile(
141 " and %0,epsw \n"
142 " nop \n"
143 " nop \n"
144 " nop \n"
145 :
146 : "i"(~EPSW_IE)
147 : "memory"
148 );
149}
150
151static inline unsigned long arch_local_cli_save(void)
152{
153 unsigned long flags = arch_local_save_flags();
154 arch_local_cli();
155 return flags;
156}
157
158static inline void arch_local_sti(void)
159{
160 asm volatile(
161 " or %0,epsw \n"
162 :
163 : "i"(EPSW_IE)
164 : "memory");
165}
166
167static inline void arch_local_change_intr_mask_level(unsigned long level)
168{
169 asm volatile(
170 " and %0,epsw \n"
171 " or %1,epsw \n"
172 :
173 : "i"(~EPSW_IM), "i"(EPSW_IE | level)
174 : "cc", "memory");
175}
176
177#else /* !__ASSEMBLY__ */
178
179#define LOCAL_SAVE_FLAGS(reg) \
180 mov epsw,reg
181
182#define LOCAL_IRQ_DISABLE \
183 and ~EPSW_IM,epsw; \
184 or EPSW_IE|MN10300_CLI_LEVEL,epsw; \
185 nop; \
186 nop; \
187 nop
188
189#define LOCAL_IRQ_ENABLE \
190 or EPSW_IE|EPSW_IM_7,epsw
191
192#define LOCAL_IRQ_RESTORE(reg) \
193 mov reg,epsw
194
195#define LOCAL_CLI_SAVE(reg) \
196 mov epsw,reg; \
197 and ~EPSW_IE,epsw; \
198 nop; \
199 nop; \
200 nop
201
202#define LOCAL_CLI \
203 and ~EPSW_IE,epsw; \
204 nop; \
205 nop; \
206 nop
207
208#define LOCAL_STI \
209 or EPSW_IE,epsw
210
211#define LOCAL_CHANGE_INTR_MASK_LEVEL(level) \
212 and ~EPSW_IM,epsw; \
213 or EPSW_IE|(level),epsw
214
122#endif /* __ASSEMBLY__ */ 215#endif /* __ASSEMBLY__ */
123#endif /* _ASM_IRQFLAGS_H */ 216#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index cb294c244de3..c8f6c82672ad 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -27,28 +27,38 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm-generic/mm_hooks.h> 28#include <asm-generic/mm_hooks.h>
29 29
30#define MMU_CONTEXT_TLBPID_NR 256
30#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL 31#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
31#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL 32#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
32#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL 33#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
33#define MMU_NO_CONTEXT 0x00000000UL 34#define MMU_NO_CONTEXT 0x00000000UL
34 35#define MMU_CONTEXT_TLBPID_LOCK_NR 0
35extern unsigned long mmu_context_cache[NR_CPUS];
36#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
37 36
38#define enter_lazy_tlb(mm, tsk) do {} while (0) 37#define enter_lazy_tlb(mm, tsk) do {} while (0)
39 38
39static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
40{
40#ifdef CONFIG_SMP 41#ifdef CONFIG_SMP
41#define cpu_ran_vm(cpu, mm) \ 42 cpumask_set_cpu(cpu, mm_cpumask(mm));
42 cpumask_set_cpu((cpu), mm_cpumask(mm)) 43#endif
43#define cpu_maybe_ran_vm(cpu, mm) \ 44}
44 cpumask_test_and_set_cpu((cpu), mm_cpumask(mm)) 45
46static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
47{
48#ifdef CONFIG_SMP
49 return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
45#else 50#else
46#define cpu_ran_vm(cpu, mm) do {} while (0) 51 return true;
47#define cpu_maybe_ran_vm(cpu, mm) true 52#endif
48#endif /* CONFIG_SMP */ 53}
49 54
50/* 55#ifdef CONFIG_MN10300_TLB_USE_PIDR
51 * allocate an MMU context 56extern unsigned long mmu_context_cache[NR_CPUS];
57#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
58
59/**
60 * allocate_mmu_context - Allocate storage for the arch-specific MMU data
61 * @mm: The userspace VM context being set up
52 */ 62 */
53static inline unsigned long allocate_mmu_context(struct mm_struct *mm) 63static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
54{ 64{
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
58 if (!(mc & MMU_CONTEXT_TLBPID_MASK)) { 68 if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
59 /* we exhausted the TLB PIDs of this version on this CPU, so we 69 /* we exhausted the TLB PIDs of this version on this CPU, so we
60 * flush this CPU's TLB in its entirety and start new cycle */ 70 * flush this CPU's TLB in its entirety and start new cycle */
61 flush_tlb_all(); 71 local_flush_tlb_all();
62 72
63 /* fix the TLB version if needed (we avoid version #0 so as to 73 /* fix the TLB version if needed (we avoid version #0 so as to
64 * distingush MMU_NO_CONTEXT) */ 74 * distingush MMU_NO_CONTEXT) */
@@ -101,22 +111,34 @@ static inline int init_new_context(struct task_struct *tsk,
101} 111}
102 112
103/* 113/*
104 * destroy context related info for an mm_struct that is about to be put to
105 * rest
106 */
107#define destroy_context(mm) do { } while (0)
108
109/*
110 * after we have set current->mm to a new value, this activates the context for 114 * after we have set current->mm to a new value, this activates the context for
111 * the new mm so we see the new mappings. 115 * the new mm so we see the new mappings.
112 */ 116 */
113static inline void activate_context(struct mm_struct *mm, int cpu) 117static inline void activate_context(struct mm_struct *mm)
114{ 118{
115 PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK; 119 PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
116} 120}
121#else /* CONFIG_MN10300_TLB_USE_PIDR */
117 122
118/* 123#define init_new_context(tsk, mm) (0)
119 * change between virtual memory sets 124#define activate_context(mm) local_flush_tlb()
125
126#endif /* CONFIG_MN10300_TLB_USE_PIDR */
127
128/**
129 * destroy_context - Destroy mm context information
130 * @mm: The MM being destroyed.
131 *
132 * Destroy context related info for an mm_struct that is about to be put to
133 * rest
134 */
135#define destroy_context(mm) do {} while (0)
136
137/**
138 * switch_mm - Change between userspace virtual memory contexts
139 * @prev: The outgoing MM context.
140 * @next: The incoming MM context.
141 * @tsk: The incoming task.
120 */ 142 */
121static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 143static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
122 struct task_struct *tsk) 144 struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
124 int cpu = smp_processor_id(); 146 int cpu = smp_processor_id();
125 147
126 if (prev != next) { 148 if (prev != next) {
149#ifdef CONFIG_SMP
150 per_cpu(cpu_tlbstate, cpu).active_mm = next;
151#endif
127 cpu_ran_vm(cpu, next); 152 cpu_ran_vm(cpu, next);
128 activate_context(next, cpu);
129 PTBR = (unsigned long) next->pgd; 153 PTBR = (unsigned long) next->pgd;
130 } else if (!cpu_maybe_ran_vm(cpu, next)) { 154 activate_context(next);
131 activate_context(next, cpu);
132 } 155 }
133} 156}
134 157
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index a19f11327cd8..146bacf193ea 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -11,7 +11,6 @@
11#ifndef _ASM_PGALLOC_H 11#ifndef _ASM_PGALLOC_H
12#define _ASM_PGALLOC_H 12#define _ASM_PGALLOC_H
13 13
14#include <asm/processor.h>
15#include <asm/page.h> 14#include <asm/page.h>
16#include <linux/threads.h> 15#include <linux/threads.h>
17#include <linux/mm.h> /* for struct page */ 16#include <linux/mm.h> /* for struct page */
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index b049a8bd1577..a1e894b5f65b 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
90 * The vmalloc() routines also leaves a hole of 4kB between each vmalloced 90 * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
91 * area to catch addressing errors. 91 * area to catch addressing errors.
92 */ 92 */
93#ifndef __ASSEMBLY__
94#define VMALLOC_OFFSET (8UL * 1024 * 1024)
95#define VMALLOC_START (0x70000000UL)
96#define VMALLOC_END (0x7C000000UL)
97#else
93#define VMALLOC_OFFSET (8 * 1024 * 1024) 98#define VMALLOC_OFFSET (8 * 1024 * 1024)
94#define VMALLOC_START (0x70000000) 99#define VMALLOC_START (0x70000000)
95#define VMALLOC_END (0x7C000000) 100#define VMALLOC_END (0x7C000000)
101#endif
96 102
97#ifndef __ASSEMBLY__ 103#ifndef __ASSEMBLY__
98extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE]; 104extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
99#endif 105#endif
100 106
101/* IPTEL/DPTEL bit assignments */ 107/* IPTEL2/DPTEL2 bit assignments */
102#define _PAGE_BIT_VALID xPTEL_V_BIT 108#define _PAGE_BIT_VALID xPTEL2_V_BIT
103#define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */ 109#define _PAGE_BIT_CACHE xPTEL2_C_BIT
104#define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */ 110#define _PAGE_BIT_PRESENT xPTEL2_PV_BIT
105#define _PAGE_BIT_CACHE xPTEL_C_BIT 111#define _PAGE_BIT_DIRTY xPTEL2_D_BIT
106#define _PAGE_BIT_PRESENT xPTEL_PV_BIT 112#define _PAGE_BIT_GLOBAL xPTEL2_G_BIT
107#define _PAGE_BIT_DIRTY xPTEL_D_BIT 113#define _PAGE_BIT_ACCESSED xPTEL2_UNUSED1_BIT /* mustn't be loaded into IPTEL2/DPTEL2 */
108#define _PAGE_BIT_GLOBAL xPTEL_G_BIT 114
109 115#define _PAGE_VALID xPTEL2_V
110#define _PAGE_VALID xPTEL_V 116#define _PAGE_CACHE xPTEL2_C
111#define _PAGE_ACCESSED xPTEL_UNUSED1 117#define _PAGE_PRESENT xPTEL2_PV
112#define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */ 118#define _PAGE_DIRTY xPTEL2_D
113#define _PAGE_CACHE xPTEL_C 119#define _PAGE_PROT xPTEL2_PR
114#define _PAGE_PRESENT xPTEL_PV 120#define _PAGE_PROT_RKNU xPTEL2_PR_ROK
115#define _PAGE_DIRTY xPTEL_D 121#define _PAGE_PROT_WKNU xPTEL2_PR_RWK
116#define _PAGE_PROT xPTEL_PR 122#define _PAGE_PROT_RKRU xPTEL2_PR_ROK_ROU
117#define _PAGE_PROT_RKNU xPTEL_PR_ROK 123#define _PAGE_PROT_WKRU xPTEL2_PR_RWK_ROU
118#define _PAGE_PROT_WKNU xPTEL_PR_RWK 124#define _PAGE_PROT_WKWU xPTEL2_PR_RWK_RWU
119#define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU 125#define _PAGE_GLOBAL xPTEL2_G
120#define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU 126#define _PAGE_PS_MASK xPTEL2_PS
121#define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU 127#define _PAGE_PS_4Kb xPTEL2_PS_4Kb
122#define _PAGE_GLOBAL xPTEL_G 128#define _PAGE_PS_128Kb xPTEL2_PS_128Kb
123#define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */ 129#define _PAGE_PS_1Kb xPTEL2_PS_1Kb
124 130#define _PAGE_PS_4Mb xPTEL2_PS_4Mb
125#define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */ 131#define _PAGE_PSE xPTEL2_PS_4Mb /* 4MB page */
126 132#define _PAGE_CACHE_WT xPTEL2_CWT
127#define __PAGE_PROT_UWAUX 0x040 133#define _PAGE_ACCESSED xPTEL2_UNUSED1
128#define __PAGE_PROT_USER 0x080 134#define _PAGE_NX 0 /* no-execute bit */
129#define __PAGE_PROT_WRITE 0x100 135
136/* If _PAGE_VALID is clear, we use these: */
137#define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
138#define _PAGE_PROTNONE 0x000 /* If not present */
139
140#define __PAGE_PROT_UWAUX 0x010
141#define __PAGE_PROT_USER 0x020
142#define __PAGE_PROT_WRITE 0x040
130 143
131#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID) 144#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
132#define _PAGE_PROTNONE 0x000 /* If not present */
133 145
134#ifndef __ASSEMBLY__ 146#ifndef __ASSEMBLY__
135 147
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
170#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) 182#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
171#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) 183#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
172 184
185#define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
186#define PAGE_USERIO __pgprot(__PAGE_USERIO)
187
173/* 188/*
174 * Whilst the MN10300 can do page protection for execute (given separate data 189 * Whilst the MN10300 can do page protection for execute (given separate data
175 * and insn TLBs), we are not supporting it at the moment. Write permission, 190 * and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
323 return 1; 338 return 1;
324} 339}
325 340
326/* 341#define PTE_FILE_MAX_BITS 30
327 * Bits 0 and 1 are taken, split up the 29 bits of offset
328 * into this range:
329 */
330#define PTE_FILE_MAX_BITS 29
331 342
332#define pte_to_pgoff(pte) (pte_val(pte) >> 2) 343#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
333#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE) 344#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
373 * Macro to mark a page protection value as "uncacheable". On processors which 384 * Macro to mark a page protection value as "uncacheable". On processors which
374 * do not support it, this is a no-op. 385 * do not support it, this is a no-op.
375 */ 386 */
376#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE) 387#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
377 388
389/*
390 * Macro to mark a page protection value as "Write-Through".
391 * On processors which do not support it, this is a no-op.
392 */
393#define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
378 394
379/* 395/*
380 * Conversion functions: convert a page and protection to a page entry, 396 * Conversion functions: convert a page and protection to a page entry,
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index f7d4b0d285e8..4c1b5cc14c19 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -13,10 +13,13 @@
13#ifndef _ASM_PROCESSOR_H 13#ifndef _ASM_PROCESSOR_H
14#define _ASM_PROCESSOR_H 14#define _ASM_PROCESSOR_H
15 15
16#include <linux/threads.h>
17#include <linux/thread_info.h>
16#include <asm/page.h> 18#include <asm/page.h>
17#include <asm/ptrace.h> 19#include <asm/ptrace.h>
18#include <asm/cpu-regs.h> 20#include <asm/cpu-regs.h>
19#include <linux/threads.h> 21#include <asm/uaccess.h>
22#include <asm/current.h>
20 23
21/* Forward declaration, a strange C thing */ 24/* Forward declaration, a strange C thing */
22struct task_struct; 25struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
33 __pc; \ 36 __pc; \
34}) 37})
35 38
39extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
40
36extern void show_registers(struct pt_regs *regs); 41extern void show_registers(struct pt_regs *regs);
37 42
38/* 43/*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
43 48
44struct mn10300_cpuinfo { 49struct mn10300_cpuinfo {
45 int type; 50 int type;
46 unsigned long loops_per_sec; 51 unsigned long loops_per_jiffy;
47 char hard_math; 52 char hard_math;
48 unsigned long *pgd_quick;
49 unsigned long *pte_quick;
50 unsigned long pgtable_cache_sz;
51}; 53};
52 54
53extern struct mn10300_cpuinfo boot_cpu_data; 55extern struct mn10300_cpuinfo boot_cpu_data;
54 56
57#ifdef CONFIG_SMP
58#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
59# error Sorry, NR_CPUS should be 2 to 8
60#endif
61extern struct mn10300_cpuinfo cpu_data[];
62#define current_cpu_data cpu_data[smp_processor_id()]
63#else /* CONFIG_SMP */
55#define cpu_data &boot_cpu_data 64#define cpu_data &boot_cpu_data
56#define current_cpu_data boot_cpu_data 65#define current_cpu_data boot_cpu_data
66#endif /* CONFIG_SMP */
57 67
58extern void identify_cpu(struct mn10300_cpuinfo *); 68extern void identify_cpu(struct mn10300_cpuinfo *);
59extern void print_cpu_info(struct mn10300_cpuinfo *); 69extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
76 */ 86 */
77#define TASK_UNMAPPED_BASE 0x30000000 87#define TASK_UNMAPPED_BASE 0x30000000
78 88
79typedef struct {
80 unsigned long seg;
81} mm_segment_t;
82
83struct fpu_state_struct { 89struct fpu_state_struct {
84 unsigned long fs[32]; /* fpu registers */ 90 unsigned long fs[32]; /* fpu registers */
85 unsigned long fpcr; /* fpu control register */ 91 unsigned long fpcr; /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
92 unsigned long a3; /* kernel FP */ 98 unsigned long a3; /* kernel FP */
93 unsigned long wchan; 99 unsigned long wchan;
94 unsigned long usp; 100 unsigned long usp;
95 struct pt_regs *__frame;
96 unsigned long fpu_flags; 101 unsigned long fpu_flags;
97#define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */ 102#define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */
103#define THREAD_HAS_FPU 0x00000002 /* T if this task owns the FPU right now */
98 struct fpu_state_struct fpu_state; 104 struct fpu_state_struct fpu_state;
99}; 105};
100 106
101#define INIT_THREAD \ 107#define INIT_THREAD \
102{ \ 108{ \
103 .uregs = init_uregs, \ 109 .uregs = init_uregs, \
104 .pc = 0, \ 110 .pc = 0, \
105 .sp = 0, \ 111 .sp = 0, \
106 .a3 = 0, \ 112 .a3 = 0, \
107 .wchan = 0, \ 113 .wchan = 0, \
108 .__frame = NULL, \
109} 114}
110 115
111#define INIT_MMAP \ 116#define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
117 * - need to discard the frame stacked by the kernel thread invoking the execve 122 * - need to discard the frame stacked by the kernel thread invoking the execve
118 * syscall (see RESTORE_ALL macro) 123 * syscall (see RESTORE_ALL macro)
119 */ 124 */
120#define start_thread(regs, new_pc, new_sp) do { \ 125static inline void start_thread(struct pt_regs *regs,
121 set_fs(USER_DS); \ 126 unsigned long new_pc, unsigned long new_sp)
122 __frame = current->thread.uregs; \ 127{
123 __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM; \ 128 struct thread_info *ti = current_thread_info();
124 __frame->pc = new_pc; \ 129 struct pt_regs *frame0;
125 __frame->sp = new_sp; \ 130 set_fs(USER_DS);
126} while (0) 131
132 frame0 = thread_info_to_uregs(ti);
133 frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
134 frame0->pc = new_pc;
135 frame0->sp = new_sp;
136 ti->frame = frame0;
137}
138
127 139
128/* Free all resources held by a thread. */ 140/* Free all resources held by a thread. */
129extern void release_thread(struct task_struct *); 141extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
157 169
158static inline void prefetch(const void *x) 170static inline void prefetch(const void *x)
159{ 171{
160#ifndef CONFIG_MN10300_CACHE_DISABLED 172#ifdef CONFIG_MN10300_CACHE_ENABLED
161#ifdef CONFIG_MN10300_PROC_MN103E010 173#ifdef CONFIG_MN10300_PROC_MN103E010
162 asm volatile ("nop; nop; dcpf (%0)" : : "r"(x)); 174 asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
163#else 175#else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
168 180
169static inline void prefetchw(const void *x) 181static inline void prefetchw(const void *x)
170{ 182{
171#ifndef CONFIG_MN10300_CACHE_DISABLED 183#ifdef CONFIG_MN10300_CACHE_ENABLED
172#ifdef CONFIG_MN10300_PROC_MN103E010 184#ifdef CONFIG_MN10300_PROC_MN103E010
173 asm volatile ("nop; nop; dcpf (%0)" : : "r"(x)); 185 asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
174#else 186#else
diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h
index 7c2e911052b6..b6961811d445 100644
--- a/arch/mn10300/include/asm/ptrace.h
+++ b/arch/mn10300/include/asm/ptrace.h
@@ -40,7 +40,6 @@
40#define PT_PC 26 40#define PT_PC 26
41#define NR_PTREGS 27 41#define NR_PTREGS 27
42 42
43#ifndef __ASSEMBLY__
44/* 43/*
45 * This defines the way registers are stored in the event of an exception 44 * This defines the way registers are stored in the event of an exception
46 * - the strange order is due to the MOVM instruction 45 * - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
75 unsigned long epsw; 74 unsigned long epsw;
76 unsigned long pc; 75 unsigned long pc;
77}; 76};
78#endif
79 77
80/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ 78/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
81#define PTRACE_GETREGS 12 79#define PTRACE_GETREGS 12
@@ -86,12 +84,7 @@ struct pt_regs {
86/* options set using PTRACE_SETOPTIONS */ 84/* options set using PTRACE_SETOPTIONS */
87#define PTRACE_O_TRACESYSGOOD 0x00000001 85#define PTRACE_O_TRACESYSGOOD 0x00000001
88 86
89#if defined(__KERNEL__) 87#ifdef __KERNEL__
90
91extern struct pt_regs *__frame; /* current frame pointer */
92
93#if !defined(__ASSEMBLY__)
94struct task_struct;
95 88
96#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL) 89#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
97#define instruction_pointer(regs) ((regs)->pc) 90#define instruction_pointer(regs) ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
100 93
101#define arch_has_single_step() (1) 94#define arch_has_single_step() (1)
102 95
103#endif /* !__ASSEMBLY */
104
105#define profile_pc(regs) ((regs)->pc) 96#define profile_pc(regs) ((regs)->pc)
106 97
107#endif /* __KERNEL__ */ 98#endif /* __KERNEL__ */
108#endif /* _ASM_PTRACE_H */ 99#endif /* _ASM_PTRACE_H */
diff --git a/arch/mn10300/include/asm/reset-regs.h b/arch/mn10300/include/asm/reset-regs.h
index 174523d50132..10c7502a113f 100644
--- a/arch/mn10300/include/asm/reset-regs.h
+++ b/arch/mn10300/include/asm/reset-regs.h
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
50 RSTCTR |= RSTCTR_CHIPRST; 50 RSTCTR |= RSTCTR_CHIPRST;
51} 51}
52 52
53extern unsigned int watchdog_alert_counter; 53extern unsigned int watchdog_alert_counter[];
54 54
55extern void watchdog_go(void); 55extern void watchdog_go(void);
56extern asmlinkage void watchdog_handler(void); 56extern asmlinkage void watchdog_handler(void);
diff --git a/arch/mn10300/include/asm/rtc.h b/arch/mn10300/include/asm/rtc.h
index c295194cc703..6c14bb1d0d9b 100644
--- a/arch/mn10300/include/asm/rtc.h
+++ b/arch/mn10300/include/asm/rtc.h
@@ -15,25 +15,14 @@
15 15
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18extern void check_rtc_time(void);
19extern void __init calibrate_clock(void); 18extern void __init calibrate_clock(void);
20extern unsigned long __init get_initial_rtc_time(void);
21 19
22#else /* !CONFIG_MN10300_RTC */ 20#else /* !CONFIG_MN10300_RTC */
23 21
24static inline void check_rtc_time(void)
25{
26}
27
28static inline void calibrate_clock(void) 22static inline void calibrate_clock(void)
29{ 23{
30} 24}
31 25
32static inline unsigned long get_initial_rtc_time(void)
33{
34 return 0;
35}
36
37#endif /* !CONFIG_MN10300_RTC */ 26#endif /* !CONFIG_MN10300_RTC */
38 27
39#include <asm-generic/rtc.h> 28#include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644
index 000000000000..6d594d4a0e10
--- /dev/null
+++ b/arch/mn10300/include/asm/rwlock.h
@@ -0,0 +1,125 @@
1/*
2 * Helpers used by both rw spinlocks and rw semaphores.
3 *
4 * Based in part on code from semaphore.h and
5 * spinlock.h Copyright 1996 Linus Torvalds.
6 *
7 * Copyright 1999 Red Hat, Inc.
8 *
9 * Written by Benjamin LaHaise.
10 *
11 * Modified by Matsushita Electric Industrial Co., Ltd.
12 * Modifications:
13 * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option)
18 * any later version.
19 */
20#ifndef _ASM_RWLOCK_H
21#define _ASM_RWLOCK_H
22
23#define RW_LOCK_BIAS 0x01000000
24
25#ifndef CONFIG_SMP
26
27typedef struct { unsigned long a[100]; } __dummy_lock_t;
28#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
29
30#define RW_LOCK_BIAS_STR "0x01000000"
31
32#define __build_read_lock_ptr(rw, helper) \
33 do { \
34 asm volatile( \
35 " mov (%0),d3 \n" \
36 " sub 1,d3 \n" \
37 " mov d3,(%0) \n" \
38 " blt 1f \n" \
39 " bra 2f \n" \
40 "1: jmp 3f \n" \
41 "2: \n" \
42 " .section .text.lock,\"ax\" \n" \
43 "3: call "helper"[],0 \n" \
44 " jmp 2b \n" \
45 " .previous" \
46 : \
47 : "d" (rw) \
48 : "memory", "d3", "cc"); \
49 } while (0)
50
51#define __build_read_lock_const(rw, helper) \
52 do { \
53 asm volatile( \
54 " mov (%0),d3 \n" \
55 " sub 1,d3 \n" \
56 " mov d3,(%0) \n" \
57 " blt 1f \n" \
58 " bra 2f \n" \
59 "1: jmp 3f \n" \
60 "2: \n" \
61 " .section .text.lock,\"ax\" \n" \
62 "3: call "helper"[],0 \n" \
63 " jmp 2b \n" \
64 " .previous" \
65 : \
66 : "d" (rw) \
67 : "memory", "d3", "cc"); \
68 } while (0)
69
70#define __build_read_lock(rw, helper) \
71 do { \
72 if (__builtin_constant_p(rw)) \
73 __build_read_lock_const(rw, helper); \
74 else \
75 __build_read_lock_ptr(rw, helper); \
76 } while (0)
77
78#define __build_write_lock_ptr(rw, helper) \
79 do { \
80 asm volatile( \
81 " mov (%0),d3 \n" \
82 " sub 1,d3 \n" \
83 " mov d3,(%0) \n" \
84 " blt 1f \n" \
85 " bra 2f \n" \
86 "1: jmp 3f \n" \
87 "2: \n" \
88 " .section .text.lock,\"ax\" \n" \
89 "3: call "helper"[],0 \n" \
90 " jmp 2b \n" \
91 " .previous" \
92 : \
93 : "d" (rw) \
94 : "memory", "d3", "cc"); \
95 } while (0)
96
97#define __build_write_lock_const(rw, helper) \
98 do { \
99 asm volatile( \
100 " mov (%0),d3 \n" \
101 " sub 1,d3 \n" \
102 " mov d3,(%0) \n" \
103 " blt 1f \n" \
104 " bra 2f \n" \
105 "1: jmp 3f \n" \
106 "2: \n" \
107 " .section .text.lock,\"ax\" \n" \
108 "3: call "helper"[],0 \n" \
109 " jmp 2b \n" \
110 " .previous" \
111 : \
112 : "d" (rw) \
113 : "memory", "d3", "cc"); \
114 } while (0)
115
116#define __build_write_lock(rw, helper) \
117 do { \
118 if (__builtin_constant_p(rw)) \
119 __build_write_lock_const(rw, helper); \
120 else \
121 __build_write_lock_ptr(rw, helper); \
122 } while (0)
123
124#endif /* CONFIG_SMP */
125#endif /* _ASM_RWLOCK_H */
diff --git a/arch/mn10300/include/asm/serial-regs.h b/arch/mn10300/include/asm/serial-regs.h
index 6498469e93ac..8320cda32f5a 100644
--- a/arch/mn10300/include/asm/serial-regs.h
+++ b/arch/mn10300/include/asm/serial-regs.h
@@ -20,18 +20,25 @@
20/* serial port 0 */ 20/* serial port 0 */
21#define SC0CTR __SYSREG(0xd4002000, u16) /* control reg */ 21#define SC0CTR __SYSREG(0xd4002000, u16) /* control reg */
22#define SC01CTR_CK 0x0007 /* clock source select */ 22#define SC01CTR_CK 0x0007 /* clock source select */
23#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
24#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
25#define SC01CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */ 23#define SC01CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
26#define SC01CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */ 24#define SC01CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
25#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
26#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
27#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
28#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
27#define SC0CTR_CK_TM2UFLOW_2 0x0003 /* - 1/2 timer 2 underflow (serial port 0 only) */ 29#define SC0CTR_CK_TM2UFLOW_2 0x0003 /* - 1/2 timer 2 underflow (serial port 0 only) */
28#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */ 30#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
29#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 0 only) */
30#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 2 underflow (serial port 1 only) */
31#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */ 31#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
32#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
33#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
34#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 1 only) */
32#define SC1CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow (serial port 1 only) */ 35#define SC1CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow (serial port 1 only) */
33#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */ 36#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
34#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */ 37#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
38#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
39#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
40#define SC1CTR_CK_TM12UFLOW_8 0x0000 /* - 1/8 timer 12 underflow (serial port 1 only) */
41#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
35#define SC01CTR_STB 0x0008 /* stop bit select */ 42#define SC01CTR_STB 0x0008 /* stop bit select */
36#define SC01CTR_STB_1BIT 0x0000 /* - 1 stop bit */ 43#define SC01CTR_STB_1BIT 0x0000 /* - 1 stop bit */
37#define SC01CTR_STB_2BIT 0x0008 /* - 2 stop bits */ 44#define SC01CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -100,11 +107,23 @@
100 107
101/* serial port 2 */ 108/* serial port 2 */
102#define SC2CTR __SYSREG(0xd4002020, u16) /* control reg */ 109#define SC2CTR __SYSREG(0xd4002020, u16) /* control reg */
110#ifdef CONFIG_AM33_2
103#define SC2CTR_CK 0x0003 /* clock source select */ 111#define SC2CTR_CK 0x0003 /* clock source select */
104#define SC2CTR_CK_TM10UFLOW 0x0000 /* - timer 10 underflow */ 112#define SC2CTR_CK_TM10UFLOW 0x0000 /* - timer 10 underflow */
105#define SC2CTR_CK_TM2UFLOW 0x0001 /* - timer 2 underflow */ 113#define SC2CTR_CK_TM2UFLOW 0x0001 /* - timer 2 underflow */
106#define SC2CTR_CK_EXTERN 0x0002 /* - external closk */ 114#define SC2CTR_CK_EXTERN 0x0002 /* - external closk */
107#define SC2CTR_CK_TM3UFLOW 0x0003 /* - timer 3 underflow */ 115#define SC2CTR_CK_TM3UFLOW 0x0003 /* - timer 3 underflow */
116#else /* CONFIG_AM33_2 */
117#define SC2CTR_CK 0x0007 /* clock source select */
118#define SC2CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow */
119#define SC2CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
120#define SC2CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
121#define SC2CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow */
122#define SC2CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow */
123#define SC2CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow */
124#define SC2CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
125#define SC2CTR_CK_EXTERN 0x0007 /* - external closk */
126#endif /* CONFIG_AM33_2 */
108#define SC2CTR_STB 0x0008 /* stop bit select */ 127#define SC2CTR_STB 0x0008 /* stop bit select */
109#define SC2CTR_STB_1BIT 0x0000 /* - 1 stop bit */ 128#define SC2CTR_STB_1BIT 0x0000 /* - 1 stop bit */
110#define SC2CTR_STB_2BIT 0x0008 /* - 2 stop bits */ 129#define SC2CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -134,9 +153,14 @@
134#define SC2ICR_RES 0x04 /* receive error select */ 153#define SC2ICR_RES 0x04 /* receive error select */
135#define SC2ICR_RI 0x01 /* receive interrupt cause */ 154#define SC2ICR_RI 0x01 /* receive interrupt cause */
136 155
137#define SC2TXB __SYSREG(0xd4002018, u8) /* transmit buffer reg */ 156#define SC2TXB __SYSREG(0xd4002028, u8) /* transmit buffer reg */
138#define SC2RXB __SYSREG(0xd4002019, u8) /* receive buffer reg */ 157#define SC2RXB __SYSREG(0xd4002029, u8) /* receive buffer reg */
139#define SC2STR __SYSREG(0xd400201c, u8) /* status reg */ 158
159#ifdef CONFIG_AM33_2
160#define SC2STR __SYSREG(0xd400202c, u8) /* status reg */
161#else /* CONFIG_AM33_2 */
162#define SC2STR __SYSREG(0xd400202c, u16) /* status reg */
163#endif /* CONFIG_AM33_2 */
140#define SC2STR_OEF 0x0001 /* overrun error found */ 164#define SC2STR_OEF 0x0001 /* overrun error found */
141#define SC2STR_PEF 0x0002 /* parity error found */ 165#define SC2STR_PEF 0x0002 /* parity error found */
142#define SC2STR_FEF 0x0004 /* framing error found */ 166#define SC2STR_FEF 0x0004 /* framing error found */
@@ -146,10 +170,17 @@
146#define SC2STR_RXF 0x0040 /* receive status */ 170#define SC2STR_RXF 0x0040 /* receive status */
147#define SC2STR_TXF 0x0080 /* transmit status */ 171#define SC2STR_TXF 0x0080 /* transmit status */
148 172
173#ifdef CONFIG_AM33_2
149#define SC2TIM __SYSREG(0xd400202d, u8) /* status reg */ 174#define SC2TIM __SYSREG(0xd400202d, u8) /* status reg */
175#endif
150 176
177#ifdef CONFIG_AM33_2
151#define SC2RXIRQ 24 /* serial 2 Receive IRQ */ 178#define SC2RXIRQ 24 /* serial 2 Receive IRQ */
152#define SC2TXIRQ 25 /* serial 2 Transmit IRQ */ 179#define SC2TXIRQ 25 /* serial 2 Transmit IRQ */
180#else /* CONFIG_AM33_2 */
181#define SC2RXIRQ 68 /* serial 2 Receive IRQ */
182#define SC2TXIRQ 69 /* serial 2 Transmit IRQ */
183#endif /* CONFIG_AM33_2 */
153 184
154#define SC2RXICR GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */ 185#define SC2RXICR GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
155#define SC2TXICR GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */ 186#define SC2TXICR GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
diff --git a/arch/mn10300/include/asm/serial.h b/arch/mn10300/include/asm/serial.h
index a29445cddd6f..23a799293599 100644
--- a/arch/mn10300/include/asm/serial.h
+++ b/arch/mn10300/include/asm/serial.h
@@ -9,10 +9,8 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12/* 12#ifndef _ASM_SERIAL_H
13 * The ASB2305 has an 18.432 MHz clock the UART 13#define _ASM_SERIAL_H
14 */
15#define BASE_BAUD (18432000 / 16)
16 14
17/* Standard COM flags (except for COM4, because of the 8514 problem) */ 15/* Standard COM flags (except for COM4, because of the 8514 problem) */
18#ifdef CONFIG_SERIAL_DETECT_IRQ 16#ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
34#endif 32#endif
35 33
36#include <unit/serial.h> 34#include <unit/serial.h>
35
36#endif /* _ASM_SERIAL_H */
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h
index 4eb8c61b7dab..a3930e43a958 100644
--- a/arch/mn10300/include/asm/smp.h
+++ b/arch/mn10300/include/asm/smp.h
@@ -3,6 +3,16 @@
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * Modified by Matsushita Electric Industrial Co., Ltd.
7 * Modifications:
8 * 13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
9 * for SMP support.
10 * 22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
11 * 23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
12 * 23-Jun-2008 MEI Delete INTC_IPI.
13 * 22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
14 * 04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
15 *
6 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence 17 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version 18 * as published by the Free Software Foundation; either version
@@ -11,8 +21,85 @@
11#ifndef _ASM_SMP_H 21#ifndef _ASM_SMP_H
12#define _ASM_SMP_H 22#define _ASM_SMP_H
13 23
14#ifdef CONFIG_SMP 24#ifndef __ASSEMBLY__
15#error SMP not yet supported for MN10300 25#include <linux/threads.h>
26#include <linux/cpumask.h>
16#endif 27#endif
17 28
29#ifdef CONFIG_SMP
30#include <proc/smp-regs.h>
31
32#define RESCHEDULE_IPI 63
33#define CALL_FUNC_SINGLE_IPI 192
34#define LOCAL_TIMER_IPI 193
35#define FLUSH_CACHE_IPI 194
36#define CALL_FUNCTION_NMI_IPI 195
37#define GDB_NMI_IPI 196
38
39#define SMP_BOOT_IRQ 195
40
41#define RESCHEDULE_GxICR_LV GxICR_LEVEL_6
42#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
43#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
44#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
45#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
46
47#define TIME_OUT_COUNT_BOOT_IPI 100
48#define DELAY_TIME_BOOT_IPI 75000
49
50
51#ifndef __ASSEMBLY__
52
53/**
54 * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
55 *
56 * What we really want to do is to use the CPUID hardware CPU register to get
57 * this information, but accesses to that aren't cached, and run at system bus
58 * speed, not CPU speed. A copy of this value is, however, stored in the
59 * thread_info struct, and that can be cached.
60 *
61 * An alternate way of dealing with this could be to use the EPSW.S bits to
62 * cache this information for systems with up to four CPUs.
63 */
64#if 0
65#define raw_smp_processor_id() (CPUID)
66#else
67#define raw_smp_processor_id() (current_thread_info()->cpu)
18#endif 68#endif
69
70static inline int cpu_logical_map(int cpu)
71{
72 return cpu;
73}
74
75static inline int cpu_number_map(int cpu)
76{
77 return cpu;
78}
79
80
81extern cpumask_t cpu_boot_map;
82
83extern void smp_init_cpus(void);
84extern void smp_cache_interrupt(void);
85extern void send_IPI_allbutself(int irq);
86extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
87
88extern void arch_send_call_function_single_ipi(int cpu);
89extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
90
91#ifdef CONFIG_HOTPLUG_CPU
92extern int __cpu_disable(void);
93extern void __cpu_die(unsigned int cpu);
94#endif /* CONFIG_HOTPLUG_CPU */
95
96#endif /* __ASSEMBLY__ */
97#else /* CONFIG_SMP */
98#ifndef __ASSEMBLY__
99
100static inline void smp_init_cpus(void) {}
101
102#endif /* __ASSEMBLY__ */
103#endif /* CONFIG_SMP */
104
105#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644
index 000000000000..2fcd1080322b
--- /dev/null
+++ b/arch/mn10300/include/asm/smsc911x.h
@@ -0,0 +1 @@
#include <unit/smsc911x.h>
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 4bf9c8b169e0..93429154e898 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -11,6 +11,183 @@
11#ifndef _ASM_SPINLOCK_H 11#ifndef _ASM_SPINLOCK_H
12#define _ASM_SPINLOCK_H 12#define _ASM_SPINLOCK_H
13 13
14#error SMP spinlocks not implemented for MN10300 14#include <asm/atomic.h>
15#include <asm/rwlock.h>
16#include <asm/page.h>
15 17
18/*
19 * Simple spin lock operations. There are two variants, one clears IRQ's
20 * on the local processor, one does not.
21 *
22 * We make no fairness assumptions. They have a cost.
23 */
24
25#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
26#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
27
28static inline void arch_spin_unlock(arch_spinlock_t *lock)
29{
30 asm volatile(
31 " bclr 1,(0,%0) \n"
32 :
33 : "a"(&lock->slock)
34 : "memory", "cc");
35}
36
37static inline int arch_spin_trylock(arch_spinlock_t *lock)
38{
39 int ret;
40
41 asm volatile(
42 " mov 1,%0 \n"
43 " bset %0,(%1) \n"
44 " bne 1f \n"
45 " clr %0 \n"
46 "1: xor 1,%0 \n"
47 : "=d"(ret)
48 : "a"(&lock->slock)
49 : "memory", "cc");
50
51 return ret;
52}
53
54static inline void arch_spin_lock(arch_spinlock_t *lock)
55{
56 asm volatile(
57 "1: bset 1,(0,%0) \n"
58 " bne 1b \n"
59 :
60 : "a"(&lock->slock)
61 : "memory", "cc");
62}
63
64static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
65 unsigned long flags)
66{
67 int temp;
68
69 asm volatile(
70 "1: bset 1,(0,%2) \n"
71 " beq 3f \n"
72 " mov %1,epsw \n"
73 "2: mov (0,%2),%0 \n"
74 " or %0,%0 \n"
75 " bne 2b \n"
76 " mov %3,%0 \n"
77 " mov %0,epsw \n"
78 " nop \n"
79 " nop \n"
80 " bra 1b\n"
81 "3: \n"
82 : "=&d" (temp)
83 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
84 : "memory", "cc");
85}
86
87#ifdef __KERNEL__
88
89/*
90 * Read-write spinlocks, allowing multiple readers
91 * but only one writer.
92 *
93 * NOTE! it is quite common to have readers in interrupts
94 * but no interrupt writers. For those circumstances we
95 * can "mix" irq-safe locks - any writer needs to get a
96 * irq-safe write-lock, but readers can get non-irqsafe
97 * read-locks.
98 */
99
100/**
101 * read_can_lock - would read_trylock() succeed?
102 * @lock: the rwlock in question.
103 */
104#define arch_read_can_lock(x) ((int)(x)->lock > 0)
105
106/**
107 * write_can_lock - would write_trylock() succeed?
108 * @lock: the rwlock in question.
109 */
110#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
111
112/*
113 * On mn10300, we implement read-write locks as a 32-bit counter
114 * with the high bit (sign) being the "contended" bit.
115 */
116static inline void arch_read_lock(arch_rwlock_t *rw)
117{
118#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
119 __build_read_lock(rw, "__read_lock_failed");
120#else
121 {
122 atomic_t *count = (atomic_t *)rw;
123 while (atomic_dec_return(count) < 0)
124 atomic_inc(count);
125 }
126#endif
127}
128
129static inline void arch_write_lock(arch_rwlock_t *rw)
130{
131#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
132 __build_write_lock(rw, "__write_lock_failed");
133#else
134 {
135 atomic_t *count = (atomic_t *)rw;
136 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
137 atomic_add(RW_LOCK_BIAS, count);
138 }
139#endif
140}
141
142static inline void arch_read_unlock(arch_rwlock_t *rw)
143{
144#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
145 __build_read_unlock(rw);
146#else
147 {
148 atomic_t *count = (atomic_t *)rw;
149 atomic_inc(count);
150 }
151#endif
152}
153
154static inline void arch_write_unlock(arch_rwlock_t *rw)
155{
156#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
157 __build_write_unlock(rw);
158#else
159 {
160 atomic_t *count = (atomic_t *)rw;
161 atomic_add(RW_LOCK_BIAS, count);
162 }
163#endif
164}
165
166static inline int arch_read_trylock(arch_rwlock_t *lock)
167{
168 atomic_t *count = (atomic_t *)lock;
169 atomic_dec(count);
170 if (atomic_read(count) >= 0)
171 return 1;
172 atomic_inc(count);
173 return 0;
174}
175
176static inline int arch_write_trylock(arch_rwlock_t *lock)
177{
178 atomic_t *count = (atomic_t *)lock;
179 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
180 return 1;
181 atomic_add(RW_LOCK_BIAS, count);
182 return 0;
183}
184
185#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
186#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
187
188#define _raw_spin_relax(lock) cpu_relax()
189#define _raw_read_relax(lock) cpu_relax()
190#define _raw_write_relax(lock) cpu_relax()
191
192#endif /* __KERNEL__ */
16#endif /* _ASM_SPINLOCK_H */ 193#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..653dc519b405
--- /dev/null
+++ b/arch/mn10300/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
1#ifndef _ASM_SPINLOCK_TYPES_H
2#define _ASM_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct arch_spinlock {
9 unsigned int slock;
10} arch_spinlock_t;
11
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13
14typedef struct {
15 unsigned int lock;
16} arch_rwlock_t;
17
18#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19
20#endif /* _ASM_SPINLOCK_TYPES_H */
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
index 9f7c7e17c01e..8ff3e5aaca41 100644
--- a/arch/mn10300/include/asm/system.h
+++ b/arch/mn10300/include/asm/system.h
@@ -12,12 +12,29 @@
12#define _ASM_SYSTEM_H 12#define _ASM_SYSTEM_H
13 13
14#include <asm/cpu-regs.h> 14#include <asm/cpu-regs.h>
15#include <asm/intctl-regs.h>
15 16
16#ifdef __KERNEL__ 17#ifdef __KERNEL__
17#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
18 19
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/irqflags.h> 21#include <linux/irqflags.h>
22#include <asm/atomic.h>
23
24#if !defined(CONFIG_LAZY_SAVE_FPU)
25struct fpu_state_struct;
26extern asmlinkage void fpu_save(struct fpu_state_struct *);
27#define switch_fpu(prev, next) \
28 do { \
29 if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
30 (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
31 (prev)->thread.uregs->epsw &= ~EPSW_FE; \
32 fpu_save(&(prev)->thread.fpu_state); \
33 } \
34 } while (0)
35#else
36#define switch_fpu(prev, next) do {} while (0)
37#endif
21 38
22struct task_struct; 39struct task_struct;
23struct thread_struct; 40struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
30/* context switching is now performed out-of-line in switch_to.S */ 47/* context switching is now performed out-of-line in switch_to.S */
31#define switch_to(prev, next, last) \ 48#define switch_to(prev, next, last) \
32do { \ 49do { \
50 switch_fpu(prev, next); \
33 current->thread.wchan = (u_long) __builtin_return_address(0); \ 51 current->thread.wchan = (u_long) __builtin_return_address(0); \
34 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ 52 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
35 mb(); \ 53 mb(); \
@@ -40,8 +58,6 @@ do { \
40 58
41#define nop() asm volatile ("nop") 59#define nop() asm volatile ("nop")
42 60
43#endif /* !__ASSEMBLY__ */
44
45/* 61/*
46 * Force strict CPU ordering. 62 * Force strict CPU ordering.
47 * And yes, this is required on UP too when we're talking 63 * And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do { \
68#define smp_mb() mb() 84#define smp_mb() mb()
69#define smp_rmb() rmb() 85#define smp_rmb() rmb()
70#define smp_wmb() wmb() 86#define smp_wmb() wmb()
71#else 87#define set_mb(var, value) do { xchg(&var, value); } while (0)
88#else /* CONFIG_SMP */
72#define smp_mb() barrier() 89#define smp_mb() barrier()
73#define smp_rmb() barrier() 90#define smp_rmb() barrier()
74#define smp_wmb() barrier() 91#define smp_wmb() barrier()
75#endif
76
77#define set_mb(var, value) do { var = value; mb(); } while (0) 92#define set_mb(var, value) do { var = value; mb(); } while (0)
93#endif /* CONFIG_SMP */
94
78#define set_wmb(var, value) do { var = value; wmb(); } while (0) 95#define set_wmb(var, value) do { var = value; wmb(); } while (0)
79 96
80#define read_barrier_depends() do {} while (0) 97#define read_barrier_depends() do {} while (0)
81#define smp_read_barrier_depends() do {} while (0) 98#define smp_read_barrier_depends() do {} while (0)
82 99
83/*****************************************************************************/
84/*
85 * MN10300 doesn't actually have an exchange instruction
86 */
87#ifndef __ASSEMBLY__
88
89struct __xchg_dummy { unsigned long a[100]; };
90#define __xg(x) ((struct __xchg_dummy *)(x))
91
92static inline
93unsigned long __xchg(volatile unsigned long *m, unsigned long val)
94{
95 unsigned long retval;
96 unsigned long flags;
97
98 local_irq_save(flags);
99 retval = *m;
100 *m = val;
101 local_irq_restore(flags);
102 return retval;
103}
104
105#define xchg(ptr, v) \
106 ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
107 (unsigned long)(v)))
108
109static inline unsigned long __cmpxchg(volatile unsigned long *m,
110 unsigned long old, unsigned long new)
111{
112 unsigned long retval;
113 unsigned long flags;
114
115 local_irq_save(flags);
116 retval = *m;
117 if (retval == old)
118 *m = new;
119 local_irq_restore(flags);
120 return retval;
121}
122
123#define cmpxchg(ptr, o, n) \
124 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
125 (unsigned long)(o), \
126 (unsigned long)(n)))
127
128#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
129
130#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
131#endif /* _ASM_SYSTEM_H */ 102#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 2001cb657a95..aa07a4a5d794 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -16,10 +16,6 @@
16 16
17#include <asm/page.h> 17#include <asm/page.h>
18 18
19#ifndef __ASSEMBLY__
20#include <asm/processor.h>
21#endif
22
23#define PREEMPT_ACTIVE 0x10000000 19#define PREEMPT_ACTIVE 0x10000000
24 20
25#ifdef CONFIG_4KSTACKS 21#ifdef CONFIG_4KSTACKS
@@ -38,10 +34,14 @@
38 * must also be changed 34 * must also be changed
39 */ 35 */
40#ifndef __ASSEMBLY__ 36#ifndef __ASSEMBLY__
37typedef struct {
38 unsigned long seg;
39} mm_segment_t;
41 40
42struct thread_info { 41struct thread_info {
43 struct task_struct *task; /* main task structure */ 42 struct task_struct *task; /* main task structure */
44 struct exec_domain *exec_domain; /* execution domain */ 43 struct exec_domain *exec_domain; /* execution domain */
44 struct pt_regs *frame; /* current exception frame */
45 unsigned long flags; /* low level flags */ 45 unsigned long flags; /* low level flags */
46 __u32 cpu; /* current CPU */ 46 __u32 cpu; /* current CPU */
47 __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ 47 __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
55 __u8 supervisor_stack[0]; 55 __u8 supervisor_stack[0];
56}; 56};
57 57
58#define thread_info_to_uregs(ti) \
59 ((struct pt_regs *) \
60 ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
61
58#else /* !__ASSEMBLY__ */ 62#else /* !__ASSEMBLY__ */
59 63
60#ifndef __ASM_OFFSETS_H__ 64#ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
102 return ti; 106 return ti;
103} 107}
104 108
109static inline __attribute__((const))
110struct pt_regs *current_frame(void)
111{
112 return current_thread_info()->frame;
113}
114
105/* how to get the current stack pointer from C */ 115/* how to get the current stack pointer from C */
106static inline unsigned long current_stack_pointer(void) 116static inline unsigned long current_stack_pointer(void)
107{ 117{
diff --git a/arch/mn10300/include/asm/timer-regs.h b/arch/mn10300/include/asm/timer-regs.h
index 1d883b7f94ab..c634977caf66 100644
--- a/arch/mn10300/include/asm/timer-regs.h
+++ b/arch/mn10300/include/asm/timer-regs.h
@@ -17,21 +17,27 @@
17 17
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19 19
20/* timer prescalar control */ 20/*
21 * Timer prescalar control
22 */
21#define TMPSCNT __SYSREG(0xd4003071, u8) /* timer prescaler control */ 23#define TMPSCNT __SYSREG(0xd4003071, u8) /* timer prescaler control */
22#define TMPSCNT_ENABLE 0x80 /* timer prescaler enable */ 24#define TMPSCNT_ENABLE 0x80 /* timer prescaler enable */
23#define TMPSCNT_DISABLE 0x00 /* timer prescaler disable */ 25#define TMPSCNT_DISABLE 0x00 /* timer prescaler disable */
24 26
25/* 8 bit timers */ 27/*
28 * 8-bit timers
29 */
26#define TM0MD __SYSREG(0xd4003000, u8) /* timer 0 mode register */ 30#define TM0MD __SYSREG(0xd4003000, u8) /* timer 0 mode register */
27#define TM0MD_SRC 0x07 /* timer source */ 31#define TM0MD_SRC 0x07 /* timer source */
28#define TM0MD_SRC_IOCLK 0x00 /* - IOCLK */ 32#define TM0MD_SRC_IOCLK 0x00 /* - IOCLK */
29#define TM0MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */ 33#define TM0MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
30#define TM0MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */ 34#define TM0MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
31#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
32#define TM0MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 35#define TM0MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
33#define TM0MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 36#define TM0MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
37#if defined(CONFIG_AM33_2)
38#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
34#define TM0MD_SRC_TM0IO 0x07 /* - TM0IO pin input */ 39#define TM0MD_SRC_TM0IO 0x07 /* - TM0IO pin input */
40#endif /* CONFIG_AM33_2 */
35#define TM0MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 41#define TM0MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
36#define TM0MD_COUNT_ENABLE 0x80 /* timer count enable */ 42#define TM0MD_COUNT_ENABLE 0x80 /* timer count enable */
37 43
@@ -43,7 +49,9 @@
43#define TM1MD_SRC_TM0CASCADE 0x03 /* - cascade with timer 0 */ 49#define TM1MD_SRC_TM0CASCADE 0x03 /* - cascade with timer 0 */
44#define TM1MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 50#define TM1MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
45#define TM1MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 51#define TM1MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
52#if defined(CONFIG_AM33_2)
46#define TM1MD_SRC_TM1IO 0x07 /* - TM1IO pin input */ 53#define TM1MD_SRC_TM1IO 0x07 /* - TM1IO pin input */
54#endif /* CONFIG_AM33_2 */
47#define TM1MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 55#define TM1MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
48#define TM1MD_COUNT_ENABLE 0x80 /* timer count enable */ 56#define TM1MD_COUNT_ENABLE 0x80 /* timer count enable */
49 57
@@ -55,7 +63,9 @@
55#define TM2MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 1 */ 63#define TM2MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 1 */
56#define TM2MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 64#define TM2MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
57#define TM2MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 65#define TM2MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
66#if defined(CONFIG_AM33_2)
58#define TM2MD_SRC_TM2IO 0x07 /* - TM2IO pin input */ 67#define TM2MD_SRC_TM2IO 0x07 /* - TM2IO pin input */
68#endif /* CONFIG_AM33_2 */
59#define TM2MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 69#define TM2MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
60#define TM2MD_COUNT_ENABLE 0x80 /* timer count enable */ 70#define TM2MD_COUNT_ENABLE 0x80 /* timer count enable */
61 71
@@ -64,11 +74,13 @@
64#define TM3MD_SRC_IOCLK 0x00 /* - IOCLK */ 74#define TM3MD_SRC_IOCLK 0x00 /* - IOCLK */
65#define TM3MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */ 75#define TM3MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
66#define TM3MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */ 76#define TM3MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
67#define TM3MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 2 */ 77#define TM3MD_SRC_TM2CASCADE 0x03 /* - cascade with timer 2 */
68#define TM3MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 78#define TM3MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
69#define TM3MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 79#define TM3MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
70#define TM3MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 80#define TM3MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
81#if defined(CONFIG_AM33_2)
71#define TM3MD_SRC_TM3IO 0x07 /* - TM3IO pin input */ 82#define TM3MD_SRC_TM3IO 0x07 /* - TM3IO pin input */
83#endif /* CONFIG_AM33_2 */
72#define TM3MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 84#define TM3MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
73#define TM3MD_COUNT_ENABLE 0x80 /* timer count enable */ 85#define TM3MD_COUNT_ENABLE 0x80 /* timer count enable */
74 86
@@ -96,7 +108,9 @@
96#define TM2ICR GxICR(TM2IRQ) /* timer 2 uflow intr ctrl reg */ 108#define TM2ICR GxICR(TM2IRQ) /* timer 2 uflow intr ctrl reg */
97#define TM3ICR GxICR(TM3IRQ) /* timer 3 uflow intr ctrl reg */ 109#define TM3ICR GxICR(TM3IRQ) /* timer 3 uflow intr ctrl reg */
98 110
99/* 16-bit timers 4,5 & 7-11 */ 111/*
112 * 16-bit timers 4,5 & 7-15
113 */
100#define TM4MD __SYSREG(0xd4003080, u8) /* timer 4 mode register */ 114#define TM4MD __SYSREG(0xd4003080, u8) /* timer 4 mode register */
101#define TM4MD_SRC 0x07 /* timer source */ 115#define TM4MD_SRC 0x07 /* timer source */
102#define TM4MD_SRC_IOCLK 0x00 /* - IOCLK */ 116#define TM4MD_SRC_IOCLK 0x00 /* - IOCLK */
@@ -105,7 +119,9 @@
105#define TM4MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 119#define TM4MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
106#define TM4MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 120#define TM4MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
107#define TM4MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 121#define TM4MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
122#if defined(CONFIG_AM33_2)
108#define TM4MD_SRC_TM4IO 0x07 /* - TM4IO pin input */ 123#define TM4MD_SRC_TM4IO 0x07 /* - TM4IO pin input */
124#endif /* CONFIG_AM33_2 */
109#define TM4MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 125#define TM4MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
110#define TM4MD_COUNT_ENABLE 0x80 /* timer count enable */ 126#define TM4MD_COUNT_ENABLE 0x80 /* timer count enable */
111 127
@@ -118,7 +134,11 @@
118#define TM5MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 134#define TM5MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
119#define TM5MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 135#define TM5MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
120#define TM5MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 136#define TM5MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
137#if defined(CONFIG_AM33_2)
121#define TM5MD_SRC_TM5IO 0x07 /* - TM5IO pin input */ 138#define TM5MD_SRC_TM5IO 0x07 /* - TM5IO pin input */
139#else /* !CONFIG_AM33_2 */
140#define TM5MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
141#endif /* CONFIG_AM33_2 */
122#define TM5MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 142#define TM5MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
123#define TM5MD_COUNT_ENABLE 0x80 /* timer count enable */ 143#define TM5MD_COUNT_ENABLE 0x80 /* timer count enable */
124 144
@@ -130,7 +150,9 @@
130#define TM7MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 150#define TM7MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
131#define TM7MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 151#define TM7MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
132#define TM7MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 152#define TM7MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
153#if defined(CONFIG_AM33_2)
133#define TM7MD_SRC_TM7IO 0x07 /* - TM7IO pin input */ 154#define TM7MD_SRC_TM7IO 0x07 /* - TM7IO pin input */
155#endif /* CONFIG_AM33_2 */
134#define TM7MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 156#define TM7MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
135#define TM7MD_COUNT_ENABLE 0x80 /* timer count enable */ 157#define TM7MD_COUNT_ENABLE 0x80 /* timer count enable */
136 158
@@ -143,7 +165,11 @@
143#define TM8MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 165#define TM8MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
144#define TM8MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 166#define TM8MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
145#define TM8MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 167#define TM8MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
168#if defined(CONFIG_AM33_2)
146#define TM8MD_SRC_TM8IO 0x07 /* - TM8IO pin input */ 169#define TM8MD_SRC_TM8IO 0x07 /* - TM8IO pin input */
170#else /* !CONFIG_AM33_2 */
171#define TM8MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
172#endif /* CONFIG_AM33_2 */
147#define TM8MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 173#define TM8MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
148#define TM8MD_COUNT_ENABLE 0x80 /* timer count enable */ 174#define TM8MD_COUNT_ENABLE 0x80 /* timer count enable */
149 175
@@ -156,7 +182,11 @@
156#define TM9MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 182#define TM9MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
157#define TM9MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 183#define TM9MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
158#define TM9MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 184#define TM9MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
185#if defined(CONFIG_AM33_2)
159#define TM9MD_SRC_TM9IO 0x07 /* - TM9IO pin input */ 186#define TM9MD_SRC_TM9IO 0x07 /* - TM9IO pin input */
187#else /* !CONFIG_AM33_2 */
188#define TM9MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
189#endif /* CONFIG_AM33_2 */
160#define TM9MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 190#define TM9MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
161#define TM9MD_COUNT_ENABLE 0x80 /* timer count enable */ 191#define TM9MD_COUNT_ENABLE 0x80 /* timer count enable */
162 192
@@ -169,7 +199,11 @@
169#define TM10MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 199#define TM10MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
170#define TM10MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 200#define TM10MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
171#define TM10MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 201#define TM10MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
202#if defined(CONFIG_AM33_2)
172#define TM10MD_SRC_TM10IO 0x07 /* - TM10IO pin input */ 203#define TM10MD_SRC_TM10IO 0x07 /* - TM10IO pin input */
204#else /* !CONFIG_AM33_2 */
205#define TM10MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
206#endif /* CONFIG_AM33_2 */
173#define TM10MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 207#define TM10MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
174#define TM10MD_COUNT_ENABLE 0x80 /* timer count enable */ 208#define TM10MD_COUNT_ENABLE 0x80 /* timer count enable */
175 209
@@ -178,32 +212,101 @@
178#define TM11MD_SRC_IOCLK 0x00 /* - IOCLK */ 212#define TM11MD_SRC_IOCLK 0x00 /* - IOCLK */
179#define TM11MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */ 213#define TM11MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
180#define TM11MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */ 214#define TM11MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
181#define TM11MD_SRC_TM7CASCADE 0x03 /* - cascade with timer 7 */
182#define TM11MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */ 215#define TM11MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
183#define TM11MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */ 216#define TM11MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
184#define TM11MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */ 217#define TM11MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
218#if defined(CONFIG_AM33_2)
185#define TM11MD_SRC_TM11IO 0x07 /* - TM11IO pin input */ 219#define TM11MD_SRC_TM11IO 0x07 /* - TM11IO pin input */
220#else /* !CONFIG_AM33_2 */
221#define TM11MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
222#endif /* CONFIG_AM33_2 */
186#define TM11MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */ 223#define TM11MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
187#define TM11MD_COUNT_ENABLE 0x80 /* timer count enable */ 224#define TM11MD_COUNT_ENABLE 0x80 /* timer count enable */
188 225
226#if defined(CONFIG_AM34_2)
227#define TM12MD __SYSREG(0xd4003180, u8) /* timer 11 mode register */
228#define TM12MD_SRC 0x07 /* timer source */
229#define TM12MD_SRC_IOCLK 0x00 /* - IOCLK */
230#define TM12MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
231#define TM12MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
232#define TM12MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
233#define TM12MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
234#define TM12MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
235#define TM12MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
236#define TM12MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
237#define TM12MD_COUNT_ENABLE 0x80 /* timer count enable */
238
239#define TM13MD __SYSREG(0xd4003182, u8) /* timer 11 mode register */
240#define TM13MD_SRC 0x07 /* timer source */
241#define TM13MD_SRC_IOCLK 0x00 /* - IOCLK */
242#define TM13MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
243#define TM13MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
244#define TM13MD_SRC_TM12CASCADE 0x03 /* - cascade with timer 12 */
245#define TM13MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
246#define TM13MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
247#define TM13MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
248#define TM13MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
249#define TM13MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
250#define TM13MD_COUNT_ENABLE 0x80 /* timer count enable */
251
252#define TM14MD __SYSREG(0xd4003184, u8) /* timer 11 mode register */
253#define TM14MD_SRC 0x07 /* timer source */
254#define TM14MD_SRC_IOCLK 0x00 /* - IOCLK */
255#define TM14MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
256#define TM14MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
257#define TM14MD_SRC_TM13CASCADE 0x03 /* - cascade with timer 13 */
258#define TM14MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
259#define TM14MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
260#define TM14MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
261#define TM14MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
262#define TM14MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
263#define TM14MD_COUNT_ENABLE 0x80 /* timer count enable */
264
265#define TM15MD __SYSREG(0xd4003186, u8) /* timer 11 mode register */
266#define TM15MD_SRC 0x07 /* timer source */
267#define TM15MD_SRC_IOCLK 0x00 /* - IOCLK */
268#define TM15MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
269#define TM15MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
270#define TM15MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
271#define TM15MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
272#define TM15MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
273#define TM15MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
274#define TM15MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
275#define TM15MD_COUNT_ENABLE 0x80 /* timer count enable */
276#endif /* CONFIG_AM34_2 */
277
278
189#define TM4BR __SYSREG(0xd4003090, u16) /* timer 4 base register */ 279#define TM4BR __SYSREG(0xd4003090, u16) /* timer 4 base register */
190#define TM5BR __SYSREG(0xd4003092, u16) /* timer 5 base register */ 280#define TM5BR __SYSREG(0xd4003092, u16) /* timer 5 base register */
281#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
191#define TM7BR __SYSREG(0xd4003096, u16) /* timer 7 base register */ 282#define TM7BR __SYSREG(0xd4003096, u16) /* timer 7 base register */
192#define TM8BR __SYSREG(0xd4003098, u16) /* timer 8 base register */ 283#define TM8BR __SYSREG(0xd4003098, u16) /* timer 8 base register */
193#define TM9BR __SYSREG(0xd400309a, u16) /* timer 9 base register */ 284#define TM9BR __SYSREG(0xd400309a, u16) /* timer 9 base register */
285#define TM89BR __SYSREG(0xd4003098, u32) /* timer 8:9 base register */
194#define TM10BR __SYSREG(0xd400309c, u16) /* timer 10 base register */ 286#define TM10BR __SYSREG(0xd400309c, u16) /* timer 10 base register */
195#define TM11BR __SYSREG(0xd400309e, u16) /* timer 11 base register */ 287#define TM11BR __SYSREG(0xd400309e, u16) /* timer 11 base register */
196#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */ 288#if defined(CONFIG_AM34_2)
289#define TM12BR __SYSREG(0xd4003190, u16) /* timer 12 base register */
290#define TM13BR __SYSREG(0xd4003192, u16) /* timer 13 base register */
291#define TM14BR __SYSREG(0xd4003194, u16) /* timer 14 base register */
292#define TM15BR __SYSREG(0xd4003196, u16) /* timer 15 base register */
293#endif /* CONFIG_AM34_2 */
197 294
198#define TM4BC __SYSREG(0xd40030a0, u16) /* timer 4 binary counter */ 295#define TM4BC __SYSREG(0xd40030a0, u16) /* timer 4 binary counter */
199#define TM5BC __SYSREG(0xd40030a2, u16) /* timer 5 binary counter */ 296#define TM5BC __SYSREG(0xd40030a2, u16) /* timer 5 binary counter */
200#define TM45BC __SYSREG(0xd40030a0, u32) /* timer 4:5 binary counter */ 297#define TM45BC __SYSREG(0xd40030a0, u32) /* timer 4:5 binary counter */
201
202#define TM7BC __SYSREG(0xd40030a6, u16) /* timer 7 binary counter */ 298#define TM7BC __SYSREG(0xd40030a6, u16) /* timer 7 binary counter */
203#define TM8BC __SYSREG(0xd40030a8, u16) /* timer 8 binary counter */ 299#define TM8BC __SYSREG(0xd40030a8, u16) /* timer 8 binary counter */
204#define TM9BC __SYSREG(0xd40030aa, u16) /* timer 9 binary counter */ 300#define TM9BC __SYSREG(0xd40030aa, u16) /* timer 9 binary counter */
301#define TM89BC __SYSREG(0xd40030a8, u32) /* timer 8:9 binary counter */
205#define TM10BC __SYSREG(0xd40030ac, u16) /* timer 10 binary counter */ 302#define TM10BC __SYSREG(0xd40030ac, u16) /* timer 10 binary counter */
206#define TM11BC __SYSREG(0xd40030ae, u16) /* timer 11 binary counter */ 303#define TM11BC __SYSREG(0xd40030ae, u16) /* timer 11 binary counter */
304#if defined(CONFIG_AM34_2)
305#define TM12BC __SYSREG(0xd40031a0, u16) /* timer 12 binary counter */
306#define TM13BC __SYSREG(0xd40031a2, u16) /* timer 13 binary counter */
307#define TM14BC __SYSREG(0xd40031a4, u16) /* timer 14 binary counter */
308#define TM15BC __SYSREG(0xd40031a6, u16) /* timer 15 binary counter */
309#endif /* CONFIG_AM34_2 */
207 310
208#define TM4IRQ 6 /* timer 4 IRQ */ 311#define TM4IRQ 6 /* timer 4 IRQ */
209#define TM5IRQ 7 /* timer 5 IRQ */ 312#define TM5IRQ 7 /* timer 5 IRQ */
@@ -212,6 +315,12 @@
212#define TM9IRQ 13 /* timer 9 IRQ */ 315#define TM9IRQ 13 /* timer 9 IRQ */
213#define TM10IRQ 14 /* timer 10 IRQ */ 316#define TM10IRQ 14 /* timer 10 IRQ */
214#define TM11IRQ 15 /* timer 11 IRQ */ 317#define TM11IRQ 15 /* timer 11 IRQ */
318#if defined(CONFIG_AM34_2)
319#define TM12IRQ 64 /* timer 12 IRQ */
320#define TM13IRQ 65 /* timer 13 IRQ */
321#define TM14IRQ 66 /* timer 14 IRQ */
322#define TM15IRQ 67 /* timer 15 IRQ */
323#endif /* CONFIG_AM34_2 */
215 324
216#define TM4ICR GxICR(TM4IRQ) /* timer 4 uflow intr ctrl reg */ 325#define TM4ICR GxICR(TM4IRQ) /* timer 4 uflow intr ctrl reg */
217#define TM5ICR GxICR(TM5IRQ) /* timer 5 uflow intr ctrl reg */ 326#define TM5ICR GxICR(TM5IRQ) /* timer 5 uflow intr ctrl reg */
@@ -220,8 +329,16 @@
220#define TM9ICR GxICR(TM9IRQ) /* timer 9 uflow intr ctrl reg */ 329#define TM9ICR GxICR(TM9IRQ) /* timer 9 uflow intr ctrl reg */
221#define TM10ICR GxICR(TM10IRQ) /* timer 10 uflow intr ctrl reg */ 330#define TM10ICR GxICR(TM10IRQ) /* timer 10 uflow intr ctrl reg */
222#define TM11ICR GxICR(TM11IRQ) /* timer 11 uflow intr ctrl reg */ 331#define TM11ICR GxICR(TM11IRQ) /* timer 11 uflow intr ctrl reg */
223 332#if defined(CONFIG_AM34_2)
224/* 16-bit timer 6 */ 333#define TM12ICR GxICR(TM12IRQ) /* timer 12 uflow intr ctrl reg */
334#define TM13ICR GxICR(TM13IRQ) /* timer 13 uflow intr ctrl reg */
335#define TM14ICR GxICR(TM14IRQ) /* timer 14 uflow intr ctrl reg */
336#define TM15ICR GxICR(TM15IRQ) /* timer 15 uflow intr ctrl reg */
337#endif /* CONFIG_AM34_2 */
338
339/*
340 * 16-bit timer 6
341 */
225#define TM6MD __SYSREG(0xd4003084, u16) /* timer6 mode register */ 342#define TM6MD __SYSREG(0xd4003084, u16) /* timer6 mode register */
226#define TM6MD_SRC 0x0007 /* timer source */ 343#define TM6MD_SRC 0x0007 /* timer source */
227#define TM6MD_SRC_IOCLK 0x0000 /* - IOCLK */ 344#define TM6MD_SRC_IOCLK 0x0000 /* - IOCLK */
@@ -229,10 +346,14 @@
229#define TM6MD_SRC_IOCLK_32 0x0002 /* - 1/32 IOCLK */ 346#define TM6MD_SRC_IOCLK_32 0x0002 /* - 1/32 IOCLK */
230#define TM6MD_SRC_TM0UFLOW 0x0004 /* - timer 0 underflow */ 347#define TM6MD_SRC_TM0UFLOW 0x0004 /* - timer 0 underflow */
231#define TM6MD_SRC_TM1UFLOW 0x0005 /* - timer 1 underflow */ 348#define TM6MD_SRC_TM1UFLOW 0x0005 /* - timer 1 underflow */
232#define TM6MD_SRC_TM6IOB_BOTH 0x0006 /* - TM6IOB pin input (both edges) */ 349#define TM6MD_SRC_TM2UFLOW 0x0006 /* - timer 2 underflow */
350#if defined(CONFIG_AM33_2)
351/* #define TM6MD_SRC_TM6IOB_BOTH 0x0006 */ /* - TM6IOB pin input (both edges) */
233#define TM6MD_SRC_TM6IOB_SINGLE 0x0007 /* - TM6IOB pin input (single edge) */ 352#define TM6MD_SRC_TM6IOB_SINGLE 0x0007 /* - TM6IOB pin input (single edge) */
234#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */ 353#endif /* CONFIG_AM33_2 */
235#define TM6MD_ONESHOT_ENABLE 0x0040 /* oneshot count */ 354#define TM6MD_ONESHOT_ENABLE 0x0040 /* oneshot count */
355#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
356#if defined(CONFIG_AM33_2)
236#define TM6MD_TRIG_ENABLE 0x0080 /* TM6IOB pin trigger enable */ 357#define TM6MD_TRIG_ENABLE 0x0080 /* TM6IOB pin trigger enable */
237#define TM6MD_PWM 0x3800 /* PWM output mode */ 358#define TM6MD_PWM 0x3800 /* PWM output mode */
238#define TM6MD_PWM_DIS 0x0000 /* - disabled */ 359#define TM6MD_PWM_DIS 0x0000 /* - disabled */
@@ -240,10 +361,15 @@
240#define TM6MD_PWM_11BIT 0x1800 /* - 11 bits mode */ 361#define TM6MD_PWM_11BIT 0x1800 /* - 11 bits mode */
241#define TM6MD_PWM_12BIT 0x3000 /* - 12 bits mode */ 362#define TM6MD_PWM_12BIT 0x3000 /* - 12 bits mode */
242#define TM6MD_PWM_14BIT 0x3800 /* - 14 bits mode */ 363#define TM6MD_PWM_14BIT 0x3800 /* - 14 bits mode */
364#endif /* CONFIG_AM33_2 */
365
243#define TM6MD_INIT_COUNTER 0x4000 /* initialize TMnBC to zero */ 366#define TM6MD_INIT_COUNTER 0x4000 /* initialize TMnBC to zero */
244#define TM6MD_COUNT_ENABLE 0x8000 /* timer count enable */ 367#define TM6MD_COUNT_ENABLE 0x8000 /* timer count enable */
245 368
246#define TM6MDA __SYSREG(0xd40030b4, u8) /* timer6 cmp/cap A mode reg */ 369#define TM6MDA __SYSREG(0xd40030b4, u8) /* timer6 cmp/cap A mode reg */
370#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
371#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
372#if defined(CONFIG_AM33_2)
247#define TM6MDA_OUT 0x07 /* output select */ 373#define TM6MDA_OUT 0x07 /* output select */
248#define TM6MDA_OUT_SETA_RESETB 0x00 /* - set at match A, reset at match B */ 374#define TM6MDA_OUT_SETA_RESETB 0x00 /* - set at match A, reset at match B */
249#define TM6MDA_OUT_SETA_RESETOV 0x01 /* - set at match A, reset at overflow */ 375#define TM6MDA_OUT_SETA_RESETOV 0x01 /* - set at match A, reset at overflow */
@@ -251,30 +377,35 @@
251#define TM6MDA_OUT_RESETA 0x03 /* - reset at match A */ 377#define TM6MDA_OUT_RESETA 0x03 /* - reset at match A */
252#define TM6MDA_OUT_TOGGLE 0x04 /* - toggle on match A */ 378#define TM6MDA_OUT_TOGGLE 0x04 /* - toggle on match A */
253#define TM6MDA_MODE 0xc0 /* compare A register mode */ 379#define TM6MDA_MODE 0xc0 /* compare A register mode */
254#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
255#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
256#define TM6MDA_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */ 380#define TM6MDA_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
257#define TM6MDA_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */ 381#define TM6MDA_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
258#define TM6MDA_EDGE 0x20 /* compare A edge select */ 382#define TM6MDA_EDGE 0x20 /* compare A edge select */
259#define TM6MDA_EDGE_FALLING 0x00 /* capture on falling edge */ 383#define TM6MDA_EDGE_FALLING 0x00 /* capture on falling edge */
260#define TM6MDA_EDGE_RISING 0x20 /* capture on rising edge */ 384#define TM6MDA_EDGE_RISING 0x20 /* capture on rising edge */
261#define TM6MDA_CAPTURE_ENABLE 0x10 /* capture enable */ 385#define TM6MDA_CAPTURE_ENABLE 0x10 /* capture enable */
386#else /* !CONFIG_AM33_2 */
387#define TM6MDA_MODE 0x40 /* compare A register mode */
388#endif /* CONFIG_AM33_2 */
262 389
263#define TM6MDB __SYSREG(0xd40030b5, u8) /* timer6 cmp/cap B mode reg */ 390#define TM6MDB __SYSREG(0xd40030b5, u8) /* timer6 cmp/cap B mode reg */
391#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
392#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
393#if defined(CONFIG_AM33_2)
264#define TM6MDB_OUT 0x07 /* output select */ 394#define TM6MDB_OUT 0x07 /* output select */
265#define TM6MDB_OUT_SETB_RESETA 0x00 /* - set at match B, reset at match A */ 395#define TM6MDB_OUT_SETB_RESETA 0x00 /* - set at match B, reset at match A */
266#define TM6MDB_OUT_SETB_RESETOV 0x01 /* - set at match B */ 396#define TM6MDB_OUT_SETB_RESETOV 0x01 /* - set at match B */
267#define TM6MDB_OUT_RESETB 0x03 /* - reset at match B */ 397#define TM6MDB_OUT_RESETB 0x03 /* - reset at match B */
268#define TM6MDB_OUT_TOGGLE 0x04 /* - toggle on match B */ 398#define TM6MDB_OUT_TOGGLE 0x04 /* - toggle on match B */
269#define TM6MDB_MODE 0xc0 /* compare B register mode */ 399#define TM6MDB_MODE 0xc0 /* compare B register mode */
270#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
271#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
272#define TM6MDB_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */ 400#define TM6MDB_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
273#define TM6MDB_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */ 401#define TM6MDB_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
274#define TM6MDB_EDGE 0x20 /* compare B edge select */ 402#define TM6MDB_EDGE 0x20 /* compare B edge select */
275#define TM6MDB_EDGE_FALLING 0x00 /* capture on falling edge */ 403#define TM6MDB_EDGE_FALLING 0x00 /* capture on falling edge */
276#define TM6MDB_EDGE_RISING 0x20 /* capture on rising edge */ 404#define TM6MDB_EDGE_RISING 0x20 /* capture on rising edge */
277#define TM6MDB_CAPTURE_ENABLE 0x10 /* capture enable */ 405#define TM6MDB_CAPTURE_ENABLE 0x10 /* capture enable */
406#else /* !CONFIG_AM33_2 */
407#define TM6MDB_MODE 0x40 /* compare B register mode */
408#endif /* CONFIG_AM33_2 */
278 409
279#define TM6CA __SYSREG(0xd40030c4, u16) /* timer6 cmp/capture reg A */ 410#define TM6CA __SYSREG(0xd40030c4, u16) /* timer6 cmp/capture reg A */
280#define TM6CB __SYSREG(0xd40030d4, u16) /* timer6 cmp/capture reg B */ 411#define TM6CB __SYSREG(0xd40030d4, u16) /* timer6 cmp/capture reg B */
@@ -288,6 +419,34 @@
288#define TM6AICR GxICR(TM6AIRQ) /* timer 6A intr control reg */ 419#define TM6AICR GxICR(TM6AIRQ) /* timer 6A intr control reg */
289#define TM6BICR GxICR(TM6BIRQ) /* timer 6B intr control reg */ 420#define TM6BICR GxICR(TM6BIRQ) /* timer 6B intr control reg */
290 421
422#if defined(CONFIG_AM34_2)
423/*
424 * MTM: OS Tick-Timer
425 */
426#define TMTMD __SYSREG(0xd4004100, u8) /* Tick Timer mode register */
427#define TMTMD_TMTLDE 0x40 /* initialize TMTBC = TMTBR */
428#define TMTMD_TMTCNE 0x80 /* timer count enable */
429
430#define TMTBR __SYSREG(0xd4004110, u32) /* Tick Timer mode reg */
431#define TMTBC __SYSREG(0xd4004120, u32) /* Tick Timer mode reg */
432
433/*
434 * MTM: OS Timestamp-Timer
435 */
436#define TMSMD __SYSREG(0xd4004140, u8) /* Tick Timer mode register */
437#define TMSMD_TMSLDE 0x40 /* initialize TMSBC = TMSBR */
438#define TMSMD_TMSCNE 0x80 /* timer count enable */
439
440#define TMSBR __SYSREG(0xd4004150, u32) /* Tick Timer mode register */
441#define TMSBC __SYSREG(0xd4004160, u32) /* Tick Timer mode register */
442
443#define TMTIRQ 119 /* OS Tick timer IRQ */
444#define TMSIRQ 120 /* Timestamp timer IRQ */
445
446#define TMTICR GxICR(TMTIRQ) /* OS Tick timer uflow intr ctrl reg */
447#define TMSICR GxICR(TMSIRQ) /* Timestamp timer uflow intr ctrl reg */
448#endif /* CONFIG_AM34_2 */
449
291#endif /* __KERNEL__ */ 450#endif /* __KERNEL__ */
292 451
293#endif /* _ASM_TIMER_REGS_H */ 452#endif /* _ASM_TIMER_REGS_H */
diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h
index 8d031f9e117d..bd4e90dfe6c2 100644
--- a/arch/mn10300/include/asm/timex.h
+++ b/arch/mn10300/include/asm/timex.h
@@ -16,18 +16,30 @@
16 16
17#define TICK_SIZE (tick_nsec / 1000) 17#define TICK_SIZE (tick_nsec / 1000)
18 18
19#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set 19#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
20 * to something appropriate, but what? */
21
22extern cycles_t cacheflush_time;
23 20
24#ifdef __KERNEL__ 21#ifdef __KERNEL__
25 22
23extern cycles_t cacheflush_time;
24
26static inline cycles_t get_cycles(void) 25static inline cycles_t get_cycles(void)
27{ 26{
28 return read_timestamp_counter(); 27 return read_timestamp_counter();
29} 28}
30 29
30extern int init_clockevents(void);
31extern int init_clocksource(void);
32
33static inline void setup_jiffies_interrupt(int irq,
34 struct irqaction *action)
35{
36 u16 tmp;
37 setup_irq(irq, action);
38 set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
39 GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
40 tmp = GxICR(irq);
41}
42
31#endif /* __KERNEL__ */ 43#endif /* __KERNEL__ */
32 44
33#endif /* _ASM_TIMEX_H */ 45#endif /* _ASM_TIMEX_H */
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e29281c5d..efddd6e1adea 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -11,24 +11,78 @@
11#ifndef _ASM_TLBFLUSH_H 11#ifndef _ASM_TLBFLUSH_H
12#define _ASM_TLBFLUSH_H 12#define _ASM_TLBFLUSH_H
13 13
14#include <linux/mm.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15 16
16#define __flush_tlb() \ 17struct tlb_state {
17do { \ 18 struct mm_struct *active_mm;
18 int w; \ 19 int state;
19 __asm__ __volatile__ \ 20};
20 (" mov %1,%0 \n" \ 21DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
21 " or %2,%0 \n" \
22 " mov %0,%1 \n" \
23 : "=d"(w) \
24 : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
25 : "cc", "memory" \
26 ); \
27} while (0)
28 22
29#define __flush_tlb_all() __flush_tlb() 23/**
30#define __flush_tlb_one(addr) __flush_tlb() 24 * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
25 */
26static inline void local_flush_tlb(void)
27{
28 int w;
29 asm volatile(
30 " mov %1,%0 \n"
31 " or %2,%0 \n"
32 " mov %0,%1 \n"
33 : "=d"(w)
34 : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
35 : "cc", "memory");
36}
37
38/**
39 * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
40 */
41static inline void local_flush_tlb_all(void)
42{
43 local_flush_tlb();
44}
31 45
46/**
47 * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
48 */
49static inline void local_flush_tlb_one(unsigned long addr)
50{
51 local_flush_tlb();
52}
53
54/**
55 * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
56 * @mm: The MM to flush for
57 * @addr: The address of the target page in RAM (not its page struct)
58 */
59static inline
60void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
61{
62 unsigned long pteu, flags, cnx;
63
64 addr &= PAGE_MASK;
65
66 local_irq_save(flags);
67
68 cnx = 1;
69#ifdef CONFIG_MN10300_TLB_USE_PIDR
70 cnx = mm->context.tlbpid[smp_processor_id()];
71#endif
72 if (cnx) {
73 pteu = addr;
74#ifdef CONFIG_MN10300_TLB_USE_PIDR
75 pteu |= cnx & xPTEU_PID;
76#endif
77 IPTEU = pteu;
78 DPTEU = pteu;
79 if (IPTEL & xPTEL_V)
80 IPTEL = 0;
81 if (DPTEL & xPTEL_V)
82 DPTEL = 0;
83 }
84 local_irq_restore(flags);
85}
32 86
33/* 87/*
34 * TLB flushing: 88 * TLB flushing:
@@ -40,41 +94,61 @@ do { \
40 * - flush_tlb_range(mm, start, end) flushes a range of pages 94 * - flush_tlb_range(mm, start, end) flushes a range of pages
41 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 95 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
42 */ 96 */
43#define flush_tlb_all() \ 97#ifdef CONFIG_SMP
44do { \ 98
45 preempt_disable(); \ 99#include <asm/smp.h>
46 __flush_tlb_all(); \ 100
47 preempt_enable(); \ 101extern void flush_tlb_all(void);
48} while (0) 102extern void flush_tlb_current_task(void);
49 103extern void flush_tlb_mm(struct mm_struct *);
50#define flush_tlb_mm(mm) \ 104extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
51do { \ 105
52 preempt_disable(); \ 106#define flush_tlb() flush_tlb_current_task()
53 __flush_tlb_all(); \ 107
54 preempt_enable(); \ 108static inline void flush_tlb_range(struct vm_area_struct *vma,
55} while (0) 109 unsigned long start, unsigned long end)
56 110{
57#define flush_tlb_range(vma, start, end) \ 111 flush_tlb_mm(vma->vm_mm);
58do { \ 112}
59 unsigned long __s __attribute__((unused)) = (start); \ 113
60 unsigned long __e __attribute__((unused)) = (end); \ 114#else /* CONFIG_SMP */
61 preempt_disable(); \ 115
62 __flush_tlb_all(); \ 116static inline void flush_tlb_all(void)
63 preempt_enable(); \ 117{
64} while (0) 118 preempt_disable();
65 119 local_flush_tlb_all();
66 120 preempt_enable();
67#define __flush_tlb_global() flush_tlb_all() 121}
68#define flush_tlb() flush_tlb_all() 122
69#define flush_tlb_kernel_range(start, end) \ 123static inline void flush_tlb_mm(struct mm_struct *mm)
70do { \ 124{
71 unsigned long __s __attribute__((unused)) = (start); \ 125 preempt_disable();
72 unsigned long __e __attribute__((unused)) = (end); \ 126 local_flush_tlb_all();
73 flush_tlb_all(); \ 127 preempt_enable();
74} while (0) 128}
75 129
76extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); 130static inline void flush_tlb_range(struct vm_area_struct *vma,
77 131 unsigned long start, unsigned long end)
78#define flush_tlb_pgtables(mm, start, end) do {} while (0) 132{
133 preempt_disable();
134 local_flush_tlb_all();
135 preempt_enable();
136}
137
138#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
139#define flush_tlb() flush_tlb_all()
140
141#endif /* CONFIG_SMP */
142
143static inline void flush_tlb_kernel_range(unsigned long start,
144 unsigned long end)
145{
146 flush_tlb_all();
147}
148
149static inline void flush_tlb_pgtables(struct mm_struct *mm,
150 unsigned long start, unsigned long end)
151{
152}
79 153
80#endif /* _ASM_TLBFLUSH_H */ 154#endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 197a7af3dd8a..679dee0bbd08 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -14,9 +14,8 @@
14/* 14/*
15 * User space memory access functions 15 * User space memory access functions
16 */ 16 */
17#include <linux/sched.h> 17#include <linux/thread_info.h>
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/errno.h> 19#include <asm/errno.h>
21 20
22#define VERIFY_READ 0 21#define VERIFY_READ 0
@@ -29,7 +28,6 @@
29 * 28 *
30 * For historical reasons, these macros are grossly misnamed. 29 * For historical reasons, these macros are grossly misnamed.
31 */ 30 */
32
33#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 31#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
34 32
35#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF) 33#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
377 375
378 376
379#if 0 377#if 0
380#error don't use - these macros don't increment to & from pointers 378#error "don't use - these macros don't increment to & from pointers"
381/* Optimize just a little bit when we know the size of the move. */ 379/* Optimize just a little bit when we know the size of the move. */
382#define __constant_copy_user(to, from, size) \ 380#define __constant_copy_user(to, from, size) \
383do { \ 381do { \