aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/Kbuild5
-rw-r--r--include/asm-sh/addrspace.h43
-rw-r--r--include/asm-sh/atomic-grb.h169
-rw-r--r--include/asm-sh/atomic.h10
-rw-r--r--include/asm-sh/auxvec.h18
-rw-r--r--include/asm-sh/bitops-grb.h169
-rw-r--r--include/asm-sh/bitops-irq.h91
-rw-r--r--include/asm-sh/bitops.h117
-rw-r--r--include/asm-sh/bug.h9
-rw-r--r--include/asm-sh/bugs.h17
-rw-r--r--include/asm-sh/byteorder.h45
-rw-r--r--include/asm-sh/cache.h5
-rw-r--r--include/asm-sh/checksum.h216
-rw-r--r--include/asm-sh/checksum_32.h215
-rw-r--r--include/asm-sh/checksum_64.h78
-rw-r--r--include/asm-sh/cmpxchg-grb.h70
-rw-r--r--include/asm-sh/cmpxchg-irq.h40
-rw-r--r--include/asm-sh/cpu-sh2/addrspace.h7
-rw-r--r--include/asm-sh/cpu-sh2/cache.h8
-rw-r--r--include/asm-sh/cpu-sh2/rtc.h8
-rw-r--r--include/asm-sh/cpu-sh2a/addrspace.h11
-rw-r--r--include/asm-sh/cpu-sh2a/cache.h11
-rw-r--r--include/asm-sh/cpu-sh2a/freq.h2
-rw-r--r--include/asm-sh/cpu-sh2a/rtc.h8
-rw-r--r--include/asm-sh/cpu-sh3/addrspace.h7
-rw-r--r--include/asm-sh/cpu-sh3/cache.h8
-rw-r--r--include/asm-sh/cpu-sh3/dma.h4
-rw-r--r--include/asm-sh/cpu-sh3/freq.h5
-rw-r--r--include/asm-sh/cpu-sh3/gpio.h3
-rw-r--r--include/asm-sh/cpu-sh3/mmu_context.h3
-rw-r--r--include/asm-sh/cpu-sh3/rtc.h8
-rw-r--r--include/asm-sh/cpu-sh3/timer.h7
-rw-r--r--include/asm-sh/cpu-sh3/ubc.h3
-rw-r--r--include/asm-sh/cpu-sh4/addrspace.h6
-rw-r--r--include/asm-sh/cpu-sh4/cache.h5
-rw-r--r--include/asm-sh/cpu-sh4/fpu.h32
-rw-r--r--include/asm-sh/cpu-sh4/freq.h3
-rw-r--r--include/asm-sh/cpu-sh4/mmu_context.h10
-rw-r--r--include/asm-sh/cpu-sh4/rtc.h8
-rw-r--r--include/asm-sh/cpu-sh5/addrspace.h11
-rw-r--r--include/asm-sh/cpu-sh5/cache.h97
-rw-r--r--include/asm-sh/cpu-sh5/cacheflush.h35
-rw-r--r--include/asm-sh/cpu-sh5/dma.h6
-rw-r--r--include/asm-sh/cpu-sh5/irq.h117
-rw-r--r--include/asm-sh/cpu-sh5/mmu_context.h27
-rw-r--r--include/asm-sh/cpu-sh5/registers.h106
-rw-r--r--include/asm-sh/cpu-sh5/rtc.h8
-rw-r--r--include/asm-sh/cpu-sh5/timer.h4
-rw-r--r--include/asm-sh/delay.h8
-rw-r--r--include/asm-sh/dma-mapping.h56
-rw-r--r--include/asm-sh/elf.h75
-rw-r--r--include/asm-sh/fixmap.h5
-rw-r--r--include/asm-sh/flat.h2
-rw-r--r--include/asm-sh/fpu.h46
-rw-r--r--include/asm-sh/hd64461.h28
-rw-r--r--include/asm-sh/hs7751rvoip.h54
-rw-r--r--include/asm-sh/hw_irq.h12
-rw-r--r--include/asm-sh/io.h68
-rw-r--r--include/asm-sh/irqflags.h97
-rw-r--r--include/asm-sh/irqflags_32.h99
-rw-r--r--include/asm-sh/irqflags_64.h85
-rw-r--r--include/asm-sh/machvec.h3
-rw-r--r--include/asm-sh/microdev.h4
-rw-r--r--include/asm-sh/mmu_context.h87
-rw-r--r--include/asm-sh/mmu_context_32.h47
-rw-r--r--include/asm-sh/mmu_context_64.h75
-rw-r--r--include/asm-sh/module.h4
-rw-r--r--include/asm-sh/page.h102
-rw-r--r--include/asm-sh/param.h6
-rw-r--r--include/asm-sh/pci.h5
-rw-r--r--include/asm-sh/pgtable.h518
-rw-r--r--include/asm-sh/pgtable_32.h474
-rw-r--r--include/asm-sh/pgtable_64.h299
-rw-r--r--include/asm-sh/posix_types.h129
-rw-r--r--include/asm-sh/posix_types_32.h122
-rw-r--r--include/asm-sh/posix_types_64.h131
-rw-r--r--include/asm-sh/processor.h260
-rw-r--r--include/asm-sh/processor_32.h215
-rw-r--r--include/asm-sh/processor_64.h275
-rw-r--r--include/asm-sh/ptrace.h28
-rw-r--r--include/asm-sh/r7780rp.h33
-rw-r--r--include/asm-sh/rtc.h2
-rw-r--r--include/asm-sh/scatterlist.h2
-rw-r--r--include/asm-sh/sdk7780.h81
-rw-r--r--include/asm-sh/sections.h1
-rw-r--r--include/asm-sh/sigcontext.h16
-rw-r--r--include/asm-sh/spi.h13
-rw-r--r--include/asm-sh/stat.h63
-rw-r--r--include/asm-sh/string.h136
-rw-r--r--include/asm-sh/string_32.h131
-rw-r--r--include/asm-sh/string_64.h17
-rw-r--r--include/asm-sh/system.h173
-rw-r--r--include/asm-sh/system_32.h99
-rw-r--r--include/asm-sh/system_64.h40
-rw-r--r--include/asm-sh/thread_info.h8
-rw-r--r--include/asm-sh/tlb.h10
-rw-r--r--include/asm-sh/tlb_64.h69
-rw-r--r--include/asm-sh/types.h6
-rw-r--r--include/asm-sh/uaccess.h564
-rw-r--r--include/asm-sh/uaccess_32.h510
-rw-r--r--include/asm-sh/uaccess_64.h302
-rw-r--r--include/asm-sh/unistd.h379
-rw-r--r--include/asm-sh/unistd_32.h376
-rw-r--r--include/asm-sh/unistd_64.h415
-rw-r--r--include/asm-sh/user.h7
-rw-r--r--include/asm-sh/voyagergx.h341
106 files changed, 5951 insertions, 3157 deletions
diff --git a/include/asm-sh/Kbuild b/include/asm-sh/Kbuild
index 76a8ccf254a5..43910cdf78a5 100644
--- a/include/asm-sh/Kbuild
+++ b/include/asm-sh/Kbuild
@@ -1,3 +1,8 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += cpu-features.h 3header-y += cpu-features.h
4
5unifdef-y += unistd_32.h
6unifdef-y += unistd_64.h
7unifdef-y += posix_types_32.h
8unifdef-y += posix_types_64.h
diff --git a/include/asm-sh/addrspace.h b/include/asm-sh/addrspace.h
index b860218e402e..fa544fc38c23 100644
--- a/include/asm-sh/addrspace.h
+++ b/include/asm-sh/addrspace.h
@@ -9,24 +9,21 @@
9 */ 9 */
10#ifndef __ASM_SH_ADDRSPACE_H 10#ifndef __ASM_SH_ADDRSPACE_H
11#define __ASM_SH_ADDRSPACE_H 11#define __ASM_SH_ADDRSPACE_H
12
12#ifdef __KERNEL__ 13#ifdef __KERNEL__
13 14
14#include <asm/cpu/addrspace.h> 15#include <asm/cpu/addrspace.h>
15 16
16/* Memory segments (32bit Privileged mode addresses) */ 17/* If this CPU supports segmentation, hook up the helpers */
17#ifndef CONFIG_CPU_SH2A 18#ifdef P1SEG
18#define P0SEG 0x00000000 19
19#define P1SEG 0x80000000 20/*
20#define P2SEG 0xa0000000 21 [ P0/U0 (virtual) ] 0x00000000 <------ User space
21#define P3SEG 0xc0000000 22 [ P1 (fixed) cached ] 0x80000000 <------ Kernel space
22#define P4SEG 0xe0000000 23 [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
23#else 24 [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
24#define P0SEG 0x00000000 25 [ P4 control ] 0xE0000000
25#define P1SEG 0x00000000 26 */
26#define P2SEG 0x20000000
27#define P3SEG 0x00000000
28#define P4SEG 0x80000000
29#endif
30 27
31/* Returns the privileged segment base of a given address */ 28/* Returns the privileged segment base of a given address */
32#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) 29#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
@@ -34,13 +31,23 @@
34/* Returns the physical address of a PnSEG (n=1,2) address */ 31/* Returns the physical address of a PnSEG (n=1,2) address */
35#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) 32#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
36 33
34#ifdef CONFIG_29BIT
37/* 35/*
38 * Map an address to a certain privileged segment 36 * Map an address to a certain privileged segment
39 */ 37 */
40#define P1SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P1SEG)) 38#define P1SEGADDR(a) \
41#define P2SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P2SEG)) 39 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P1SEG))
42#define P3SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) 40#define P2SEGADDR(a) \
43#define P4SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) 41 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P2SEG))
42#define P3SEGADDR(a) \
43 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
44#define P4SEGADDR(a) \
45 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
46#endif /* 29BIT */
47#endif /* P1SEG */
48
49/* Check if an address can be reached in 29 bits */
50#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000)
44 51
45#endif /* __KERNEL__ */ 52#endif /* __KERNEL__ */
46#endif /* __ASM_SH_ADDRSPACE_H */ 53#endif /* __ASM_SH_ADDRSPACE_H */
diff --git a/include/asm-sh/atomic-grb.h b/include/asm-sh/atomic-grb.h
new file mode 100644
index 000000000000..4c5b7dbfcedb
--- /dev/null
+++ b/include/asm-sh/atomic-grb.h
@@ -0,0 +1,169 @@
1#ifndef __ASM_SH_ATOMIC_GRB_H
2#define __ASM_SH_ATOMIC_GRB_H
3
4static inline void atomic_add(int i, atomic_t *v)
5{
6 int tmp;
7
8 __asm__ __volatile__ (
9 " .align 2 \n\t"
10 " mova 1f, r0 \n\t" /* r0 = end point */
11 " mov r15, r1 \n\t" /* r1 = saved sp */
12 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
13 " mov.l @%1, %0 \n\t" /* load old value */
14 " add %2, %0 \n\t" /* add */
15 " mov.l %0, @%1 \n\t" /* store new value */
16 "1: mov r1, r15 \n\t" /* LOGOUT */
17 : "=&r" (tmp),
18 "+r" (v)
19 : "r" (i)
20 : "memory" , "r0", "r1");
21}
22
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 int tmp;
26
27 __asm__ __volatile__ (
28 " .align 2 \n\t"
29 " mova 1f, r0 \n\t" /* r0 = end point */
30 " mov r15, r1 \n\t" /* r1 = saved sp */
31 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
32 " mov.l @%1, %0 \n\t" /* load old value */
33 " sub %2, %0 \n\t" /* sub */
34 " mov.l %0, @%1 \n\t" /* store new value */
35 "1: mov r1, r15 \n\t" /* LOGOUT */
36 : "=&r" (tmp),
37 "+r" (v)
38 : "r" (i)
39 : "memory" , "r0", "r1");
40}
41
42static inline int atomic_add_return(int i, atomic_t *v)
43{
44 int tmp;
45
46 __asm__ __volatile__ (
47 " .align 2 \n\t"
48 " mova 1f, r0 \n\t" /* r0 = end point */
49 " mov r15, r1 \n\t" /* r1 = saved sp */
50 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
51 " mov.l @%1, %0 \n\t" /* load old value */
52 " add %2, %0 \n\t" /* add */
53 " mov.l %0, @%1 \n\t" /* store new value */
54 "1: mov r1, r15 \n\t" /* LOGOUT */
55 : "=&r" (tmp),
56 "+r" (v)
57 : "r" (i)
58 : "memory" , "r0", "r1");
59
60 return tmp;
61}
62
63static inline int atomic_sub_return(int i, atomic_t *v)
64{
65 int tmp;
66
67 __asm__ __volatile__ (
68 " .align 2 \n\t"
69 " mova 1f, r0 \n\t" /* r0 = end point */
70 " mov r15, r1 \n\t" /* r1 = saved sp */
71 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
72 " mov.l @%1, %0 \n\t" /* load old value */
73 " sub %2, %0 \n\t" /* sub */
74 " mov.l %0, @%1 \n\t" /* store new value */
75 "1: mov r1, r15 \n\t" /* LOGOUT */
76 : "=&r" (tmp),
77 "+r" (v)
78 : "r" (i)
79 : "memory", "r0", "r1");
80
81 return tmp;
82}
83
84static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
85{
86 int tmp;
87 unsigned int _mask = ~mask;
88
89 __asm__ __volatile__ (
90 " .align 2 \n\t"
91 " mova 1f, r0 \n\t" /* r0 = end point */
92 " mov r15, r1 \n\t" /* r1 = saved sp */
93 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
94 " mov.l @%1, %0 \n\t" /* load old value */
95 " and %2, %0 \n\t" /* add */
96 " mov.l %0, @%1 \n\t" /* store new value */
97 "1: mov r1, r15 \n\t" /* LOGOUT */
98 : "=&r" (tmp),
99 "+r" (v)
100 : "r" (_mask)
101 : "memory" , "r0", "r1");
102}
103
104static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
105{
106 int tmp;
107
108 __asm__ __volatile__ (
109 " .align 2 \n\t"
110 " mova 1f, r0 \n\t" /* r0 = end point */
111 " mov r15, r1 \n\t" /* r1 = saved sp */
112 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
113 " mov.l @%1, %0 \n\t" /* load old value */
114 " or %2, %0 \n\t" /* or */
115 " mov.l %0, @%1 \n\t" /* store new value */
116 "1: mov r1, r15 \n\t" /* LOGOUT */
117 : "=&r" (tmp),
118 "+r" (v)
119 : "r" (mask)
120 : "memory" , "r0", "r1");
121}
122
123static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
124{
125 int ret;
126
127 __asm__ __volatile__ (
128 " .align 2 \n\t"
129 " mova 1f, r0 \n\t"
130 " nop \n\t"
131 " mov r15, r1 \n\t"
132 " mov #-8, r15 \n\t"
133 " mov.l @%1, %0 \n\t"
134 " cmp/eq %2, %0 \n\t"
135 " bf 1f \n\t"
136 " mov.l %3, @%1 \n\t"
137 "1: mov r1, r15 \n\t"
138 : "=&r" (ret)
139 : "r" (v), "r" (old), "r" (new)
140 : "memory" , "r0", "r1" , "t");
141
142 return ret;
143}
144
145static inline int atomic_add_unless(atomic_t *v, int a, int u)
146{
147 int ret;
148 unsigned long tmp;
149
150 __asm__ __volatile__ (
151 " .align 2 \n\t"
152 " mova 1f, r0 \n\t"
153 " nop \n\t"
154 " mov r15, r1 \n\t"
155 " mov #-12, r15 \n\t"
156 " mov.l @%2, %1 \n\t"
157 " mov %1, %0 \n\t"
158 " cmp/eq %4, %0 \n\t"
159 " bt/s 1f \n\t"
160 " add %3, %1 \n\t"
161 " mov.l %1, @%2 \n\t"
162 "1: mov r1, r15 \n\t"
163 : "=&r" (ret), "=&r" (tmp)
164 : "r" (v), "r" (a), "r" (u)
165 : "memory" , "r0", "r1" , "t");
166
167 return ret != u;
168}
169#endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index e12570b9339d..c043ef003028 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,7 +17,9 @@ typedef struct { volatile int counter; } atomic_t;
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#ifdef CONFIG_CPU_SH4A 20#if defined(CONFIG_GUSA_RB)
21#include <asm/atomic-grb.h>
22#elif defined(CONFIG_CPU_SH4A)
21#include <asm/atomic-llsc.h> 23#include <asm/atomic-llsc.h>
22#else 24#else
23#include <asm/atomic-irq.h> 25#include <asm/atomic-irq.h>
@@ -44,6 +46,7 @@ typedef struct { volatile int counter; } atomic_t;
44#define atomic_inc(v) atomic_add(1,(v)) 46#define atomic_inc(v) atomic_add(1,(v))
45#define atomic_dec(v) atomic_sub(1,(v)) 47#define atomic_dec(v) atomic_sub(1,(v))
46 48
49#ifndef CONFIG_GUSA_RB
47static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 50static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
48{ 51{
49 int ret; 52 int ret;
@@ -58,8 +61,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
58 return ret; 61 return ret;
59} 62}
60 63
61#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
62
63static inline int atomic_add_unless(atomic_t *v, int a, int u) 64static inline int atomic_add_unless(atomic_t *v, int a, int u)
64{ 65{
65 int ret; 66 int ret;
@@ -73,6 +74,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
73 74
74 return ret != u; 75 return ret != u;
75} 76}
77#endif
78
79#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
76#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 80#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
77 81
78/* Atomic operations are already serializing on SH */ 82/* Atomic operations are already serializing on SH */
diff --git a/include/asm-sh/auxvec.h b/include/asm-sh/auxvec.h
index 1b6916e63e90..a6b9d4f4859e 100644
--- a/include/asm-sh/auxvec.h
+++ b/include/asm-sh/auxvec.h
@@ -6,6 +6,12 @@
6 * for more of them. 6 * for more of them.
7 */ 7 */
8 8
9/*
10 * This entry gives some information about the FPU initialization
11 * performed by the kernel.
12 */
13#define AT_FPUCW 18 /* Used FPU control word. */
14
9#ifdef CONFIG_VSYSCALL 15#ifdef CONFIG_VSYSCALL
10/* 16/*
11 * Only define this in the vsyscall case, the entry point to 17 * Only define this in the vsyscall case, the entry point to
@@ -15,4 +21,16 @@
15#define AT_SYSINFO_EHDR 33 21#define AT_SYSINFO_EHDR 33
16#endif 22#endif
17 23
24/*
25 * More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
26 * value is -1, then the cache doesn't exist. Otherwise:
27 *
28 * bit 0-3: Cache set-associativity; 0 means fully associative.
29 * bit 4-7: Log2 of cacheline size.
30 * bit 8-31: Size of the entire cache >> 8.
31 */
32#define AT_L1I_CACHESHAPE 34
33#define AT_L1D_CACHESHAPE 35
34#define AT_L2_CACHESHAPE 36
35
18#endif /* __ASM_SH_AUXVEC_H */ 36#endif /* __ASM_SH_AUXVEC_H */
diff --git a/include/asm-sh/bitops-grb.h b/include/asm-sh/bitops-grb.h
new file mode 100644
index 000000000000..a5907b94395b
--- /dev/null
+++ b/include/asm-sh/bitops-grb.h
@@ -0,0 +1,169 @@
1#ifndef __ASM_SH_BITOPS_GRB_H
2#define __ASM_SH_BITOPS_GRB_H
3
4static inline void set_bit(int nr, volatile void * addr)
5{
6 int mask;
7 volatile unsigned int *a = addr;
8 unsigned long tmp;
9
10 a += nr >> 5;
11 mask = 1 << (nr & 0x1f);
12
13 __asm__ __volatile__ (
14 " .align 2 \n\t"
15 " mova 1f, r0 \n\t" /* r0 = end point */
16 " mov r15, r1 \n\t" /* r1 = saved sp */
17 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
18 " mov.l @%1, %0 \n\t" /* load old value */
19 " or %2, %0 \n\t" /* or */
20 " mov.l %0, @%1 \n\t" /* store new value */
21 "1: mov r1, r15 \n\t" /* LOGOUT */
22 : "=&r" (tmp),
23 "+r" (a)
24 : "r" (mask)
25 : "memory" , "r0", "r1");
26}
27
28static inline void clear_bit(int nr, volatile void * addr)
29{
30 int mask;
31 volatile unsigned int *a = addr;
32 unsigned long tmp;
33
34 a += nr >> 5;
35 mask = ~(1 << (nr & 0x1f));
36 __asm__ __volatile__ (
37 " .align 2 \n\t"
38 " mova 1f, r0 \n\t" /* r0 = end point */
39 " mov r15, r1 \n\t" /* r1 = saved sp */
40 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
41 " mov.l @%1, %0 \n\t" /* load old value */
42 " and %2, %0 \n\t" /* and */
43 " mov.l %0, @%1 \n\t" /* store new value */
44 "1: mov r1, r15 \n\t" /* LOGOUT */
45 : "=&r" (tmp),
46 "+r" (a)
47 : "r" (mask)
48 : "memory" , "r0", "r1");
49}
50
51static inline void change_bit(int nr, volatile void * addr)
52{
53 int mask;
54 volatile unsigned int *a = addr;
55 unsigned long tmp;
56
57 a += nr >> 5;
58 mask = 1 << (nr & 0x1f);
59 __asm__ __volatile__ (
60 " .align 2 \n\t"
61 " mova 1f, r0 \n\t" /* r0 = end point */
62 " mov r15, r1 \n\t" /* r1 = saved sp */
63 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
64 " mov.l @%1, %0 \n\t" /* load old value */
65 " xor %2, %0 \n\t" /* xor */
66 " mov.l %0, @%1 \n\t" /* store new value */
67 "1: mov r1, r15 \n\t" /* LOGOUT */
68 : "=&r" (tmp),
69 "+r" (a)
70 : "r" (mask)
71 : "memory" , "r0", "r1");
72}
73
74static inline int test_and_set_bit(int nr, volatile void * addr)
75{
76 int mask, retval;
77 volatile unsigned int *a = addr;
78 unsigned long tmp;
79
80 a += nr >> 5;
81 mask = 1 << (nr & 0x1f);
82
83 __asm__ __volatile__ (
84 " .align 2 \n\t"
85 " mova 1f, r0 \n\t" /* r0 = end point */
86 " mov r15, r1 \n\t" /* r1 = saved sp */
87 " mov #-14, r15 \n\t" /* LOGIN: r15 = size */
88 " mov.l @%2, %0 \n\t" /* load old value */
89 " mov %0, %1 \n\t"
90 " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
91 " mov #-1, %1 \n\t" /* retvat = -1 */
92 " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
93 " or %3, %0 \n\t"
94 " mov.l %0, @%2 \n\t" /* store new value */
95 "1: mov r1, r15 \n\t" /* LOGOUT */
96 : "=&r" (tmp),
97 "=&r" (retval),
98 "+r" (a)
99 : "r" (mask)
100 : "memory" , "r0", "r1" ,"t");
101
102 return retval;
103}
104
105static inline int test_and_clear_bit(int nr, volatile void * addr)
106{
107 int mask, retval,not_mask;
108 volatile unsigned int *a = addr;
109 unsigned long tmp;
110
111 a += nr >> 5;
112 mask = 1 << (nr & 0x1f);
113
114 not_mask = ~mask;
115
116 __asm__ __volatile__ (
117 " .align 2 \n\t"
118 " mova 1f, r0 \n\t" /* r0 = end point */
119 " mov r15, r1 \n\t" /* r1 = saved sp */
120 " mov #-14, r15 \n\t" /* LOGIN */
121 " mov.l @%2, %0 \n\t" /* load old value */
122 " mov %0, %1 \n\t" /* %1 = *a */
123 " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
124 " mov #-1, %1 \n\t" /* retvat = -1 */
125 " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
126 " and %4, %0 \n\t"
127 " mov.l %0, @%2 \n\t" /* store new value */
128 "1: mov r1, r15 \n\t" /* LOGOUT */
129 : "=&r" (tmp),
130 "=&r" (retval),
131 "+r" (a)
132 : "r" (mask),
133 "r" (not_mask)
134 : "memory" , "r0", "r1", "t");
135
136 return retval;
137}
138
139static inline int test_and_change_bit(int nr, volatile void * addr)
140{
141 int mask, retval;
142 volatile unsigned int *a = addr;
143 unsigned long tmp;
144
145 a += nr >> 5;
146 mask = 1 << (nr & 0x1f);
147
148 __asm__ __volatile__ (
149 " .align 2 \n\t"
150 " mova 1f, r0 \n\t" /* r0 = end point */
151 " mov r15, r1 \n\t" /* r1 = saved sp */
152 " mov #-14, r15 \n\t" /* LOGIN */
153 " mov.l @%2, %0 \n\t" /* load old value */
154 " mov %0, %1 \n\t" /* %1 = *a */
155 " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
156 " mov #-1, %1 \n\t" /* retvat = -1 */
157 " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
158 " xor %3, %0 \n\t"
159 " mov.l %0, @%2 \n\t" /* store new value */
160 "1: mov r1, r15 \n\t" /* LOGOUT */
161 : "=&r" (tmp),
162 "=&r" (retval),
163 "+r" (a)
164 : "r" (mask)
165 : "memory" , "r0", "r1", "t");
166
167 return retval;
168}
169#endif /* __ASM_SH_BITOPS_GRB_H */
diff --git a/include/asm-sh/bitops-irq.h b/include/asm-sh/bitops-irq.h
new file mode 100644
index 000000000000..653a12750584
--- /dev/null
+++ b/include/asm-sh/bitops-irq.h
@@ -0,0 +1,91 @@
1#ifndef __ASM_SH_BITOPS_IRQ_H
2#define __ASM_SH_BITOPS_IRQ_H
3
4static inline void set_bit(int nr, volatile void *addr)
5{
6 int mask;
7 volatile unsigned int *a = addr;
8 unsigned long flags;
9
10 a += nr >> 5;
11 mask = 1 << (nr & 0x1f);
12 local_irq_save(flags);
13 *a |= mask;
14 local_irq_restore(flags);
15}
16
17static inline void clear_bit(int nr, volatile void *addr)
18{
19 int mask;
20 volatile unsigned int *a = addr;
21 unsigned long flags;
22
23 a += nr >> 5;
24 mask = 1 << (nr & 0x1f);
25 local_irq_save(flags);
26 *a &= ~mask;
27 local_irq_restore(flags);
28}
29
30static inline void change_bit(int nr, volatile void *addr)
31{
32 int mask;
33 volatile unsigned int *a = addr;
34 unsigned long flags;
35
36 a += nr >> 5;
37 mask = 1 << (nr & 0x1f);
38 local_irq_save(flags);
39 *a ^= mask;
40 local_irq_restore(flags);
41}
42
43static inline int test_and_set_bit(int nr, volatile void *addr)
44{
45 int mask, retval;
46 volatile unsigned int *a = addr;
47 unsigned long flags;
48
49 a += nr >> 5;
50 mask = 1 << (nr & 0x1f);
51 local_irq_save(flags);
52 retval = (mask & *a) != 0;
53 *a |= mask;
54 local_irq_restore(flags);
55
56 return retval;
57}
58
59static inline int test_and_clear_bit(int nr, volatile void *addr)
60{
61 int mask, retval;
62 volatile unsigned int *a = addr;
63 unsigned long flags;
64
65 a += nr >> 5;
66 mask = 1 << (nr & 0x1f);
67 local_irq_save(flags);
68 retval = (mask & *a) != 0;
69 *a &= ~mask;
70 local_irq_restore(flags);
71
72 return retval;
73}
74
75static inline int test_and_change_bit(int nr, volatile void *addr)
76{
77 int mask, retval;
78 volatile unsigned int *a = addr;
79 unsigned long flags;
80
81 a += nr >> 5;
82 mask = 1 << (nr & 0x1f);
83 local_irq_save(flags);
84 retval = (mask & *a) != 0;
85 *a ^= mask;
86 local_irq_restore(flags);
87
88 return retval;
89}
90
91#endif /* __ASM_SH_BITOPS_IRQ_H */
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index df805f20b267..b6ba5a60dec2 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -11,100 +11,22 @@
11/* For __swab32 */ 11/* For __swab32 */
12#include <asm/byteorder.h> 12#include <asm/byteorder.h>
13 13
14static inline void set_bit(int nr, volatile void * addr) 14#ifdef CONFIG_GUSA_RB
15{ 15#include <asm/bitops-grb.h>
16 int mask; 16#else
17 volatile unsigned int *a = addr; 17#include <asm/bitops-irq.h>
18 unsigned long flags; 18#endif
19 19
20 a += nr >> 5;
21 mask = 1 << (nr & 0x1f);
22 local_irq_save(flags);
23 *a |= mask;
24 local_irq_restore(flags);
25}
26 20
27/* 21/*
28 * clear_bit() doesn't provide any barrier for the compiler. 22 * clear_bit() doesn't provide any barrier for the compiler.
29 */ 23 */
30#define smp_mb__before_clear_bit() barrier() 24#define smp_mb__before_clear_bit() barrier()
31#define smp_mb__after_clear_bit() barrier() 25#define smp_mb__after_clear_bit() barrier()
32static inline void clear_bit(int nr, volatile void * addr)
33{
34 int mask;
35 volatile unsigned int *a = addr;
36 unsigned long flags;
37
38 a += nr >> 5;
39 mask = 1 << (nr & 0x1f);
40 local_irq_save(flags);
41 *a &= ~mask;
42 local_irq_restore(flags);
43}
44
45static inline void change_bit(int nr, volatile void * addr)
46{
47 int mask;
48 volatile unsigned int *a = addr;
49 unsigned long flags;
50
51 a += nr >> 5;
52 mask = 1 << (nr & 0x1f);
53 local_irq_save(flags);
54 *a ^= mask;
55 local_irq_restore(flags);
56}
57
58static inline int test_and_set_bit(int nr, volatile void * addr)
59{
60 int mask, retval;
61 volatile unsigned int *a = addr;
62 unsigned long flags;
63
64 a += nr >> 5;
65 mask = 1 << (nr & 0x1f);
66 local_irq_save(flags);
67 retval = (mask & *a) != 0;
68 *a |= mask;
69 local_irq_restore(flags);
70
71 return retval;
72}
73
74static inline int test_and_clear_bit(int nr, volatile void * addr)
75{
76 int mask, retval;
77 volatile unsigned int *a = addr;
78 unsigned long flags;
79
80 a += nr >> 5;
81 mask = 1 << (nr & 0x1f);
82 local_irq_save(flags);
83 retval = (mask & *a) != 0;
84 *a &= ~mask;
85 local_irq_restore(flags);
86
87 return retval;
88}
89
90static inline int test_and_change_bit(int nr, volatile void * addr)
91{
92 int mask, retval;
93 volatile unsigned int *a = addr;
94 unsigned long flags;
95
96 a += nr >> 5;
97 mask = 1 << (nr & 0x1f);
98 local_irq_save(flags);
99 retval = (mask & *a) != 0;
100 *a ^= mask;
101 local_irq_restore(flags);
102
103 return retval;
104}
105 26
106#include <asm-generic/bitops/non-atomic.h> 27#include <asm-generic/bitops/non-atomic.h>
107 28
29#ifdef CONFIG_SUPERH32
108static inline unsigned long ffz(unsigned long word) 30static inline unsigned long ffz(unsigned long word)
109{ 31{
110 unsigned long result; 32 unsigned long result;
@@ -138,6 +60,31 @@ static inline unsigned long __ffs(unsigned long word)
138 : "t"); 60 : "t");
139 return result; 61 return result;
140} 62}
63#else
64static inline unsigned long ffz(unsigned long word)
65{
66 unsigned long result, __d2, __d3;
67
68 __asm__("gettr tr0, %2\n\t"
69 "pta $+32, tr0\n\t"
70 "andi %1, 1, %3\n\t"
71 "beq %3, r63, tr0\n\t"
72 "pta $+4, tr0\n"
73 "0:\n\t"
74 "shlri.l %1, 1, %1\n\t"
75 "addi %0, 1, %0\n\t"
76 "andi %1, 1, %3\n\t"
77 "beqi %3, 1, tr0\n"
78 "1:\n\t"
79 "ptabs %2, tr0\n\t"
80 : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
81 : "0" (0L), "1" (word));
82
83 return result;
84}
85
86#include <asm-generic/bitops/__ffs.h>
87#endif
141 88
142#include <asm-generic/bitops/find.h> 89#include <asm-generic/bitops/find.h>
143#include <asm-generic/bitops/ffs.h> 90#include <asm-generic/bitops/ffs.h>
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h
index a78d482e8b2f..c01718040166 100644
--- a/include/asm-sh/bug.h
+++ b/include/asm-sh/bug.h
@@ -3,7 +3,7 @@
3 3
4#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ 4#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
5 5
6#ifdef CONFIG_BUG 6#ifdef CONFIG_GENERIC_BUG
7#define HAVE_ARCH_BUG 7#define HAVE_ARCH_BUG
8#define HAVE_ARCH_WARN_ON 8#define HAVE_ARCH_WARN_ON
9 9
@@ -72,12 +72,7 @@ do { \
72 unlikely(__ret_warn_on); \ 72 unlikely(__ret_warn_on); \
73}) 73})
74 74
75struct pt_regs; 75#endif /* CONFIG_GENERIC_BUG */
76
77/* arch/sh/kernel/traps.c */
78void handle_BUG(struct pt_regs *);
79
80#endif /* CONFIG_BUG */
81 76
82#include <asm-generic/bug.h> 77#include <asm-generic/bug.h>
83 78
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index b66139ff73fc..def8128b8b78 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -25,7 +25,7 @@ static void __init check_bugs(void)
25 case CPU_SH7619: 25 case CPU_SH7619:
26 *p++ = '2'; 26 *p++ = '2';
27 break; 27 break;
28 case CPU_SH7206: 28 case CPU_SH7203 ... CPU_SH7263:
29 *p++ = '2'; 29 *p++ = '2';
30 *p++ = 'a'; 30 *p++ = 'a';
31 break; 31 break;
@@ -35,7 +35,7 @@ static void __init check_bugs(void)
35 case CPU_SH7750 ... CPU_SH4_501: 35 case CPU_SH7750 ... CPU_SH4_501:
36 *p++ = '4'; 36 *p++ = '4';
37 break; 37 break;
38 case CPU_SH7770 ... CPU_SHX3: 38 case CPU_SH7763 ... CPU_SHX3:
39 *p++ = '4'; 39 *p++ = '4';
40 *p++ = 'a'; 40 *p++ = 'a';
41 break; 41 break;
@@ -48,9 +48,16 @@ static void __init check_bugs(void)
48 *p++ = 's'; 48 *p++ = 's';
49 *p++ = 'p'; 49 *p++ = 'p';
50 break; 50 break;
51 default: 51 case CPU_SH5_101 ... CPU_SH5_103:
52 *p++ = '?'; 52 *p++ = '6';
53 *p++ = '!'; 53 *p++ = '4';
54 break;
55 case CPU_SH_NONE:
56 /*
57 * Specifically use CPU_SH_NONE rather than default:,
58 * so we're able to have the compiler whine about
59 * unhandled enumerations.
60 */
54 break; 61 break;
55 } 62 }
56 63
diff --git a/include/asm-sh/byteorder.h b/include/asm-sh/byteorder.h
index bff2b1382e01..0eb9904b6545 100644
--- a/include/asm-sh/byteorder.h
+++ b/include/asm-sh/byteorder.h
@@ -3,40 +3,55 @@
3 3
4/* 4/*
5 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000, 2001 Paolo Alberelli
6 */ 7 */
7
8#include <asm/types.h>
9#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <linux/types.h>
10 10
11static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) 11static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
12{ 12{
13 __asm__("swap.b %0, %0\n\t" 13 __asm__(
14 "swap.w %0, %0\n\t" 14#ifdef CONFIG_SUPERH32
15 "swap.b %0, %0" 15 "swap.b %0, %0\n\t"
16 "swap.w %0, %0\n\t"
17 "swap.b %0, %0"
18#else
19 "byterev %0, %0\n\t"
20 "shari %0, 32, %0"
21#endif
16 : "=r" (x) 22 : "=r" (x)
17 : "0" (x)); 23 : "0" (x));
24
18 return x; 25 return x;
19} 26}
20 27
21static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) 28static inline __attribute_const__ __u16 ___arch__swab16(__u16 x)
22{ 29{
23 __asm__("swap.b %0, %0" 30 __asm__(
31#ifdef CONFIG_SUPERH32
32 "swap.b %0, %0"
33#else
34 "byterev %0, %0\n\t"
35 "shari %0, 32, %0"
36
37#endif
24 : "=r" (x) 38 : "=r" (x)
25 : "0" (x)); 39 : "0" (x));
40
26 return x; 41 return x;
27} 42}
28 43
29static inline __u64 ___arch__swab64(__u64 val) 44static inline __u64 ___arch__swab64(__u64 val)
30{ 45{
31 union { 46 union {
32 struct { __u32 a,b; } s; 47 struct { __u32 a,b; } s;
33 __u64 u; 48 __u64 u;
34 } v, w; 49 } v, w;
35 v.u = val; 50 v.u = val;
36 w.s.b = ___arch__swab32(v.s.a); 51 w.s.b = ___arch__swab32(v.s.a);
37 w.s.a = ___arch__swab32(v.s.b); 52 w.s.a = ___arch__swab32(v.s.b);
38 return w.u; 53 return w.u;
39} 54}
40 55
41#define __arch__swab64(x) ___arch__swab64(x) 56#define __arch__swab64(x) ___arch__swab64(x)
42#define __arch__swab32(x) ___arch__swab32(x) 57#define __arch__swab32(x) ___arch__swab32(x)
diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h
index 01e5cf51ba9b..083419f47c65 100644
--- a/include/asm-sh/cache.h
+++ b/include/asm-sh/cache.h
@@ -12,11 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <asm/cpu/cache.h> 13#include <asm/cpu/cache.h>
14 14
15#define SH_CACHE_VALID 1
16#define SH_CACHE_UPDATED 2
17#define SH_CACHE_COMBINED 4
18#define SH_CACHE_ASSOC 8
19
20#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 15#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
21 16
22#define __read_mostly __attribute__((__section__(".data.read_mostly"))) 17#define __read_mostly __attribute__((__section__(".data.read_mostly")))
diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h
index 4bc8357e8892..67496ab0ef04 100644
--- a/include/asm-sh/checksum.h
+++ b/include/asm-sh/checksum.h
@@ -1,215 +1,5 @@
1#ifndef __ASM_SH_CHECKSUM_H 1#ifdef CONFIG_SUPERH32
2#define __ASM_SH_CHECKSUM_H 2# include "checksum_32.h"
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
10 */
11
12#include <linux/in6.h>
13
14/*
15 * computes the checksum of a memory block at buff, length len,
16 * and adds in "sum" (32-bit)
17 *
18 * returns a 32-bit number suitable for feeding into itself
19 * or csum_tcpudp_magic
20 *
21 * this function must be called with even lengths, except
22 * for the last fragment, which may be odd
23 *
24 * it's best to have buff aligned on a 32-bit boundary
25 */
26asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
27
28/*
29 * the same as csum_partial, but copies from src while it
30 * checksums, and handles user-space pointer exceptions correctly, when needed.
31 *
32 * here even more important to align src and dst on a 32-bit (or even
33 * better 64-bit) boundary
34 */
35
36asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
37 int len, __wsum sum,
38 int *src_err_ptr, int *dst_err_ptr);
39
40/*
41 * Note: when you get a NULL pointer exception here this means someone
42 * passed in an incorrect kernel address to one of these functions.
43 *
44 * If you use these functions directly please don't forget the
45 * access_ok().
46 */
47static inline
48__wsum csum_partial_copy_nocheck(const void *src, void *dst,
49 int len, __wsum sum)
50{
51 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
52}
53
54static inline
55__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
56 int len, __wsum sum, int *err_ptr)
57{
58 return csum_partial_copy_generic((__force const void *)src, dst,
59 len, sum, err_ptr, NULL);
60}
61
62/*
63 * Fold a partial checksum
64 */
65
66static inline __sum16 csum_fold(__wsum sum)
67{
68 unsigned int __dummy;
69 __asm__("swap.w %0, %1\n\t"
70 "extu.w %0, %0\n\t"
71 "extu.w %1, %1\n\t"
72 "add %1, %0\n\t"
73 "swap.w %0, %1\n\t"
74 "add %1, %0\n\t"
75 "not %0, %0\n\t"
76 : "=r" (sum), "=&r" (__dummy)
77 : "0" (sum)
78 : "t");
79 return (__force __sum16)sum;
80}
81
82/*
83 * This is a version of ip_compute_csum() optimized for IP headers,
84 * which always checksum on 4 octet boundaries.
85 *
86 * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
87 * for linux by * Arnt Gulbrandsen.
88 */
89static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
90{
91 unsigned int sum, __dummy0, __dummy1;
92
93 __asm__ __volatile__(
94 "mov.l @%1+, %0\n\t"
95 "mov.l @%1+, %3\n\t"
96 "add #-2, %2\n\t"
97 "clrt\n\t"
98 "1:\t"
99 "addc %3, %0\n\t"
100 "movt %4\n\t"
101 "mov.l @%1+, %3\n\t"
102 "dt %2\n\t"
103 "bf/s 1b\n\t"
104 " cmp/eq #1, %4\n\t"
105 "addc %3, %0\n\t"
106 "addc %2, %0" /* Here %2 is 0, add carry-bit */
107 /* Since the input registers which are loaded with iph and ihl
108 are modified, we must also specify them as outputs, or gcc
109 will assume they contain their original values. */
110 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
111 : "1" (iph), "2" (ihl)
112 : "t");
113
114 return csum_fold(sum);
115}
116
117static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
118 unsigned short len,
119 unsigned short proto,
120 __wsum sum)
121{
122#ifdef __LITTLE_ENDIAN__
123 unsigned long len_proto = (proto + len) << 8;
124#else 3#else
125 unsigned long len_proto = proto + len; 4# include "checksum_64.h"
126#endif 5#endif
127 __asm__("clrt\n\t"
128 "addc %0, %1\n\t"
129 "addc %2, %1\n\t"
130 "addc %3, %1\n\t"
131 "movt %0\n\t"
132 "add %1, %0"
133 : "=r" (sum), "=r" (len_proto)
134 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
135 : "t");
136
137 return sum;
138}
139
140/*
141 * computes the checksum of the TCP/UDP pseudo-header
142 * returns a 16-bit checksum, already complemented
143 */
144static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
145 unsigned short len,
146 unsigned short proto,
147 __wsum sum)
148{
149 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
150}
151
152/*
153 * this routine is used for miscellaneous IP-like checksums, mainly
154 * in icmp.c
155 */
156static inline __sum16 ip_compute_csum(const void *buff, int len)
157{
158 return csum_fold(csum_partial(buff, len, 0));
159}
160
161#define _HAVE_ARCH_IPV6_CSUM
162static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
163 const struct in6_addr *daddr,
164 __u32 len, unsigned short proto,
165 __wsum sum)
166{
167 unsigned int __dummy;
168 __asm__("clrt\n\t"
169 "mov.l @(0,%2), %1\n\t"
170 "addc %1, %0\n\t"
171 "mov.l @(4,%2), %1\n\t"
172 "addc %1, %0\n\t"
173 "mov.l @(8,%2), %1\n\t"
174 "addc %1, %0\n\t"
175 "mov.l @(12,%2), %1\n\t"
176 "addc %1, %0\n\t"
177 "mov.l @(0,%3), %1\n\t"
178 "addc %1, %0\n\t"
179 "mov.l @(4,%3), %1\n\t"
180 "addc %1, %0\n\t"
181 "mov.l @(8,%3), %1\n\t"
182 "addc %1, %0\n\t"
183 "mov.l @(12,%3), %1\n\t"
184 "addc %1, %0\n\t"
185 "addc %4, %0\n\t"
186 "addc %5, %0\n\t"
187 "movt %1\n\t"
188 "add %1, %0\n"
189 : "=r" (sum), "=&r" (__dummy)
190 : "r" (saddr), "r" (daddr),
191 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
192 : "t");
193
194 return csum_fold(sum);
195}
196
197/*
198 * Copy and checksum to user
199 */
200#define HAVE_CSUM_COPY_USER
201static inline __wsum csum_and_copy_to_user(const void *src,
202 void __user *dst,
203 int len, __wsum sum,
204 int *err_ptr)
205{
206 if (access_ok(VERIFY_WRITE, dst, len))
207 return csum_partial_copy_generic((__force const void *)src,
208 dst, len, sum, NULL, err_ptr);
209
210 if (len)
211 *err_ptr = -EFAULT;
212
213 return (__force __wsum)-1; /* invalid checksum */
214}
215#endif /* __ASM_SH_CHECKSUM_H */
diff --git a/include/asm-sh/checksum_32.h b/include/asm-sh/checksum_32.h
new file mode 100644
index 000000000000..4bc8357e8892
--- /dev/null
+++ b/include/asm-sh/checksum_32.h
@@ -0,0 +1,215 @@
1#ifndef __ASM_SH_CHECKSUM_H
2#define __ASM_SH_CHECKSUM_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
10 */
11
12#include <linux/in6.h>
13
14/*
15 * computes the checksum of a memory block at buff, length len,
16 * and adds in "sum" (32-bit)
17 *
18 * returns a 32-bit number suitable for feeding into itself
19 * or csum_tcpudp_magic
20 *
21 * this function must be called with even lengths, except
22 * for the last fragment, which may be odd
23 *
24 * it's best to have buff aligned on a 32-bit boundary
25 */
26asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
27
28/*
29 * the same as csum_partial, but copies from src while it
30 * checksums, and handles user-space pointer exceptions correctly, when needed.
31 *
32 * here even more important to align src and dst on a 32-bit (or even
33 * better 64-bit) boundary
34 */
35
36asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
37 int len, __wsum sum,
38 int *src_err_ptr, int *dst_err_ptr);
39
40/*
41 * Note: when you get a NULL pointer exception here this means someone
42 * passed in an incorrect kernel address to one of these functions.
43 *
44 * If you use these functions directly please don't forget the
45 * access_ok().
46 */
47static inline
48__wsum csum_partial_copy_nocheck(const void *src, void *dst,
49 int len, __wsum sum)
50{
51 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
52}
53
54static inline
55__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
56 int len, __wsum sum, int *err_ptr)
57{
58 return csum_partial_copy_generic((__force const void *)src, dst,
59 len, sum, err_ptr, NULL);
60}
61
62/*
63 * Fold a partial checksum
64 */
65
66static inline __sum16 csum_fold(__wsum sum)
67{
68 unsigned int __dummy;
69 __asm__("swap.w %0, %1\n\t"
70 "extu.w %0, %0\n\t"
71 "extu.w %1, %1\n\t"
72 "add %1, %0\n\t"
73 "swap.w %0, %1\n\t"
74 "add %1, %0\n\t"
75 "not %0, %0\n\t"
76 : "=r" (sum), "=&r" (__dummy)
77 : "0" (sum)
78 : "t");
79 return (__force __sum16)sum;
80}
81
82/*
83 * This is a version of ip_compute_csum() optimized for IP headers,
84 * which always checksum on 4 octet boundaries.
85 *
86 * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
87 * for linux by * Arnt Gulbrandsen.
88 */
89static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
90{
91 unsigned int sum, __dummy0, __dummy1;
92
93 __asm__ __volatile__(
94 "mov.l @%1+, %0\n\t"
95 "mov.l @%1+, %3\n\t"
96 "add #-2, %2\n\t"
97 "clrt\n\t"
98 "1:\t"
99 "addc %3, %0\n\t"
100 "movt %4\n\t"
101 "mov.l @%1+, %3\n\t"
102 "dt %2\n\t"
103 "bf/s 1b\n\t"
104 " cmp/eq #1, %4\n\t"
105 "addc %3, %0\n\t"
106 "addc %2, %0" /* Here %2 is 0, add carry-bit */
107 /* Since the input registers which are loaded with iph and ihl
108 are modified, we must also specify them as outputs, or gcc
109 will assume they contain their original values. */
110 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
111 : "1" (iph), "2" (ihl)
112 : "t");
113
114 return csum_fold(sum);
115}
116
117static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
118 unsigned short len,
119 unsigned short proto,
120 __wsum sum)
121{
122#ifdef __LITTLE_ENDIAN__
123 unsigned long len_proto = (proto + len) << 8;
124#else
125 unsigned long len_proto = proto + len;
126#endif
127 __asm__("clrt\n\t"
128 "addc %0, %1\n\t"
129 "addc %2, %1\n\t"
130 "addc %3, %1\n\t"
131 "movt %0\n\t"
132 "add %1, %0"
133 : "=r" (sum), "=r" (len_proto)
134 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
135 : "t");
136
137 return sum;
138}
139
140/*
141 * computes the checksum of the TCP/UDP pseudo-header
142 * returns a 16-bit checksum, already complemented
143 */
144static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
145 unsigned short len,
146 unsigned short proto,
147 __wsum sum)
148{
149 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
150}
151
152/*
153 * this routine is used for miscellaneous IP-like checksums, mainly
154 * in icmp.c
155 */
156static inline __sum16 ip_compute_csum(const void *buff, int len)
157{
158 return csum_fold(csum_partial(buff, len, 0));
159}
160
161#define _HAVE_ARCH_IPV6_CSUM
162static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
163 const struct in6_addr *daddr,
164 __u32 len, unsigned short proto,
165 __wsum sum)
166{
167 unsigned int __dummy;
168 __asm__("clrt\n\t"
169 "mov.l @(0,%2), %1\n\t"
170 "addc %1, %0\n\t"
171 "mov.l @(4,%2), %1\n\t"
172 "addc %1, %0\n\t"
173 "mov.l @(8,%2), %1\n\t"
174 "addc %1, %0\n\t"
175 "mov.l @(12,%2), %1\n\t"
176 "addc %1, %0\n\t"
177 "mov.l @(0,%3), %1\n\t"
178 "addc %1, %0\n\t"
179 "mov.l @(4,%3), %1\n\t"
180 "addc %1, %0\n\t"
181 "mov.l @(8,%3), %1\n\t"
182 "addc %1, %0\n\t"
183 "mov.l @(12,%3), %1\n\t"
184 "addc %1, %0\n\t"
185 "addc %4, %0\n\t"
186 "addc %5, %0\n\t"
187 "movt %1\n\t"
188 "add %1, %0\n"
189 : "=r" (sum), "=&r" (__dummy)
190 : "r" (saddr), "r" (daddr),
191 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
192 : "t");
193
194 return csum_fold(sum);
195}
196
197/*
198 * Copy and checksum to user
199 */
200#define HAVE_CSUM_COPY_USER
201static inline __wsum csum_and_copy_to_user(const void *src,
202 void __user *dst,
203 int len, __wsum sum,
204 int *err_ptr)
205{
206 if (access_ok(VERIFY_WRITE, dst, len))
207 return csum_partial_copy_generic((__force const void *)src,
208 dst, len, sum, NULL, err_ptr);
209
210 if (len)
211 *err_ptr = -EFAULT;
212
213 return (__force __wsum)-1; /* invalid checksum */
214}
215#endif /* __ASM_SH_CHECKSUM_H */
diff --git a/include/asm-sh/checksum_64.h b/include/asm-sh/checksum_64.h
new file mode 100644
index 000000000000..9c62a031a8f5
--- /dev/null
+++ b/include/asm-sh/checksum_64.h
@@ -0,0 +1,78 @@
1#ifndef __ASM_SH_CHECKSUM_64_H
2#define __ASM_SH_CHECKSUM_64_H
3
4/*
5 * include/asm-sh/checksum_64.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14/*
15 * computes the checksum of a memory block at buff, length len,
16 * and adds in "sum" (32-bit)
17 *
18 * returns a 32-bit number suitable for feeding into itself
19 * or csum_tcpudp_magic
20 *
21 * this function must be called with even lengths, except
22 * for the last fragment, which may be odd
23 *
24 * it's best to have buff aligned on a 32-bit boundary
25 */
26asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
27
28/*
29 * Note: when you get a NULL pointer exception here this means someone
30 * passed in an incorrect kernel address to one of these functions.
31 *
32 * If you use these functions directly please don't forget the
33 * access_ok().
34 */
35
36
37__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
38 __wsum sum);
39
40__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
41 int len, __wsum sum, int *err_ptr);
42
43static inline __sum16 csum_fold(__wsum csum)
44{
45 u32 sum = (__force u32)csum;
46 sum = (sum & 0xffff) + (sum >> 16);
47 sum = (sum & 0xffff) + (sum >> 16);
48 return (__force __sum16)~sum;
49}
50
51__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
52
53__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
54 unsigned short len, unsigned short proto,
55 __wsum sum);
56
57/*
58 * computes the checksum of the TCP/UDP pseudo-header
59 * returns a 16-bit checksum, already complemented
60 */
61static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
62 unsigned short len,
63 unsigned short proto,
64 __wsum sum)
65{
66 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
67}
68
69/*
70 * this routine is used for miscellaneous IP-like checksums, mainly
71 * in icmp.c
72 */
73static inline __sum16 ip_compute_csum(const void *buff, int len)
74{
75 return csum_fold(csum_partial(buff, len, 0));
76}
77
78#endif /* __ASM_SH_CHECKSUM_64_H */
diff --git a/include/asm-sh/cmpxchg-grb.h b/include/asm-sh/cmpxchg-grb.h
new file mode 100644
index 000000000000..e2681abe764f
--- /dev/null
+++ b/include/asm-sh/cmpxchg-grb.h
@@ -0,0 +1,70 @@
1#ifndef __ASM_SH_CMPXCHG_GRB_H
2#define __ASM_SH_CMPXCHG_GRB_H
3
4static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
5{
6 unsigned long retval;
7
8 __asm__ __volatile__ (
9 " .align 2 \n\t"
10 " mova 1f, r0 \n\t" /* r0 = end point */
11 " nop \n\t"
12 " mov r15, r1 \n\t" /* r1 = saved sp */
13 " mov #-4, r15 \n\t" /* LOGIN */
14 " mov.l @%1, %0 \n\t" /* load old value */
15 " mov.l %2, @%1 \n\t" /* store new value */
16 "1: mov r1, r15 \n\t" /* LOGOUT */
17 : "=&r" (retval),
18 "+r" (m)
19 : "r" (val)
20 : "memory", "r0", "r1");
21
22 return retval;
23}
24
25static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
26{
27 unsigned long retval;
28
29 __asm__ __volatile__ (
30 " .align 2 \n\t"
31 " mova 1f, r0 \n\t" /* r0 = end point */
32 " mov r15, r1 \n\t" /* r1 = saved sp */
33 " mov #-6, r15 \n\t" /* LOGIN */
34 " mov.b @%1, %0 \n\t" /* load old value */
35 " extu.b %0, %0 \n\t" /* extend as unsigned */
36 " mov.b %2, @%1 \n\t" /* store new value */
37 "1: mov r1, r15 \n\t" /* LOGOUT */
38 : "=&r" (retval),
39 "+r" (m)
40 : "r" (val)
41 : "memory" , "r0", "r1");
42
43 return retval;
44}
45
46static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
47 unsigned long new)
48{
49 unsigned long retval;
50
51 __asm__ __volatile__ (
52 " .align 2 \n\t"
53 " mova 1f, r0 \n\t" /* r0 = end point */
54 " nop \n\t"
55 " mov r15, r1 \n\t" /* r1 = saved sp */
56 " mov #-8, r15 \n\t" /* LOGIN */
57 " mov.l @%1, %0 \n\t" /* load old value */
58 " cmp/eq %0, %2 \n\t"
59 " bf 1f \n\t" /* if not equal */
60 " mov.l %2, @%1 \n\t" /* store new value */
61 "1: mov r1, r15 \n\t" /* LOGOUT */
62 : "=&r" (retval),
63 "+r" (m)
64 : "r" (new)
65 : "memory" , "r0", "r1", "t");
66
67 return retval;
68}
69
70#endif /* __ASM_SH_CMPXCHG_GRB_H */
diff --git a/include/asm-sh/cmpxchg-irq.h b/include/asm-sh/cmpxchg-irq.h
new file mode 100644
index 000000000000..43049ec0554b
--- /dev/null
+++ b/include/asm-sh/cmpxchg-irq.h
@@ -0,0 +1,40 @@
1#ifndef __ASM_SH_CMPXCHG_IRQ_H
2#define __ASM_SH_CMPXCHG_IRQ_H
3
4static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
5{
6 unsigned long flags, retval;
7
8 local_irq_save(flags);
9 retval = *m;
10 *m = val;
11 local_irq_restore(flags);
12 return retval;
13}
14
15static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
16{
17 unsigned long flags, retval;
18
19 local_irq_save(flags);
20 retval = *m;
21 *m = val & 0xff;
22 local_irq_restore(flags);
23 return retval;
24}
25
26static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
27 unsigned long new)
28{
29 __u32 retval;
30 unsigned long flags;
31
32 local_irq_save(flags);
33 retval = *m;
34 if (retval == old)
35 *m = new;
36 local_irq_restore(flags); /* implies memory barrier */
37 return retval;
38}
39
40#endif /* __ASM_SH_CMPXCHG_IRQ_H */
diff --git a/include/asm-sh/cpu-sh2/addrspace.h b/include/asm-sh/cpu-sh2/addrspace.h
index 8706c903c5a0..2b9ab93efa4e 100644
--- a/include/asm-sh/cpu-sh2/addrspace.h
+++ b/include/asm-sh/cpu-sh2/addrspace.h
@@ -10,7 +10,10 @@
10#ifndef __ASM_CPU_SH2_ADDRSPACE_H 10#ifndef __ASM_CPU_SH2_ADDRSPACE_H
11#define __ASM_CPU_SH2_ADDRSPACE_H 11#define __ASM_CPU_SH2_ADDRSPACE_H
12 12
13/* Should fill here */ 13#define P0SEG 0x00000000
14#define P1SEG 0x80000000
15#define P2SEG 0xa0000000
16#define P3SEG 0xc0000000
17#define P4SEG 0xe0000000
14 18
15#endif /* __ASM_CPU_SH2_ADDRSPACE_H */ 19#endif /* __ASM_CPU_SH2_ADDRSPACE_H */
16
diff --git a/include/asm-sh/cpu-sh2/cache.h b/include/asm-sh/cpu-sh2/cache.h
index f02ba7a672b2..4e0b16500686 100644
--- a/include/asm-sh/cpu-sh2/cache.h
+++ b/include/asm-sh/cpu-sh2/cache.h
@@ -12,9 +12,13 @@
12 12
13#define L1_CACHE_SHIFT 4 13#define L1_CACHE_SHIFT 4
14 14
15#define SH_CACHE_VALID 1
16#define SH_CACHE_UPDATED 2
17#define SH_CACHE_COMBINED 4
18#define SH_CACHE_ASSOC 8
19
15#if defined(CONFIG_CPU_SUBTYPE_SH7619) 20#if defined(CONFIG_CPU_SUBTYPE_SH7619)
16#define CCR1 0xffffffec 21#define CCR 0xffffffec
17#define CCR CCR1
18 22
19#define CCR_CACHE_CE 0x01 /* Cache enable */ 23#define CCR_CACHE_CE 0x01 /* Cache enable */
20#define CCR_CACHE_WT 0x06 /* CCR[bit1=1,bit2=1] */ 24#define CCR_CACHE_WT 0x06 /* CCR[bit1=1,bit2=1] */
diff --git a/include/asm-sh/cpu-sh2/rtc.h b/include/asm-sh/cpu-sh2/rtc.h
new file mode 100644
index 000000000000..39e2d6e94782
--- /dev/null
+++ b/include/asm-sh/cpu-sh2/rtc.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_SH_CPU_SH2_RTC_H
2#define __ASM_SH_CPU_SH2_RTC_H
3
4#define rtc_reg_size sizeof(u16)
5#define RTC_BIT_INVERTED 0
6#define RTC_DEF_CAPABILITIES 0UL
7
8#endif /* __ASM_SH_CPU_SH2_RTC_H */
diff --git a/include/asm-sh/cpu-sh2a/addrspace.h b/include/asm-sh/cpu-sh2a/addrspace.h
index 3d2e9aa21522..795ddd6856a3 100644
--- a/include/asm-sh/cpu-sh2a/addrspace.h
+++ b/include/asm-sh/cpu-sh2a/addrspace.h
@@ -1 +1,10 @@
1#include <asm/cpu-sh2/addrspace.h> 1#ifndef __ASM_SH_CPU_SH2A_ADDRSPACE_H
2#define __ASM_SH_CPU_SH2A_ADDRSPACE_H
3
4#define P0SEG 0x00000000
5#define P1SEG 0x00000000
6#define P2SEG 0x20000000
7#define P3SEG 0x00000000
8#define P4SEG 0x80000000
9
10#endif /* __ASM_SH_CPU_SH2A_ADDRSPACE_H */
diff --git a/include/asm-sh/cpu-sh2a/cache.h b/include/asm-sh/cpu-sh2a/cache.h
index 3e4b9e480982..afe228b3f493 100644
--- a/include/asm-sh/cpu-sh2a/cache.h
+++ b/include/asm-sh/cpu-sh2a/cache.h
@@ -12,11 +12,13 @@
12 12
13#define L1_CACHE_SHIFT 4 13#define L1_CACHE_SHIFT 4
14 14
15#define CCR1 0xfffc1000 15#define SH_CACHE_VALID 1
16#define CCR2 0xfffc1004 16#define SH_CACHE_UPDATED 2
17#define SH_CACHE_COMBINED 4
18#define SH_CACHE_ASSOC 8
17 19
18/* CCR1 behaves more like the traditional CCR */ 20#define CCR 0xfffc1000 /* CCR1 */
19#define CCR CCR1 21#define CCR2 0xfffc1004
20 22
21/* 23/*
22 * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not 24 * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
@@ -36,4 +38,3 @@
36#define CCR_CACHE_INVALIDATE (CCR_CACHE_OCI | CCR_CACHE_ICI) 38#define CCR_CACHE_INVALIDATE (CCR_CACHE_OCI | CCR_CACHE_ICI)
37 39
38#endif /* __ASM_CPU_SH2A_CACHE_H */ 40#endif /* __ASM_CPU_SH2A_CACHE_H */
39
diff --git a/include/asm-sh/cpu-sh2a/freq.h b/include/asm-sh/cpu-sh2a/freq.h
index e518fff6d10f..830fd43b6cdc 100644
--- a/include/asm-sh/cpu-sh2a/freq.h
+++ b/include/asm-sh/cpu-sh2a/freq.h
@@ -10,9 +10,7 @@
10#ifndef __ASM_CPU_SH2A_FREQ_H 10#ifndef __ASM_CPU_SH2A_FREQ_H
11#define __ASM_CPU_SH2A_FREQ_H 11#define __ASM_CPU_SH2A_FREQ_H
12 12
13#if defined(CONFIG_CPU_SUBTYPE_SH7206)
14#define FREQCR 0xfffe0010 13#define FREQCR 0xfffe0010
15#endif
16 14
17#endif /* __ASM_CPU_SH2A_FREQ_H */ 15#endif /* __ASM_CPU_SH2A_FREQ_H */
18 16
diff --git a/include/asm-sh/cpu-sh2a/rtc.h b/include/asm-sh/cpu-sh2a/rtc.h
new file mode 100644
index 000000000000..afb511e2bed7
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/rtc.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_SH_CPU_SH2A_RTC_H
2#define __ASM_SH_CPU_SH2A_RTC_H
3
4#define rtc_reg_size sizeof(u16)
5#define RTC_BIT_INVERTED 0
6#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
7
8#endif /* __ASM_SH_CPU_SH2A_RTC_H */
diff --git a/include/asm-sh/cpu-sh3/addrspace.h b/include/asm-sh/cpu-sh3/addrspace.h
index 872e9e1b548c..0f94726c7d62 100644
--- a/include/asm-sh/cpu-sh3/addrspace.h
+++ b/include/asm-sh/cpu-sh3/addrspace.h
@@ -10,7 +10,10 @@
10#ifndef __ASM_CPU_SH3_ADDRSPACE_H 10#ifndef __ASM_CPU_SH3_ADDRSPACE_H
11#define __ASM_CPU_SH3_ADDRSPACE_H 11#define __ASM_CPU_SH3_ADDRSPACE_H
12 12
13/* Should fill here */ 13#define P0SEG 0x00000000
14#define P1SEG 0x80000000
15#define P2SEG 0xa0000000
16#define P3SEG 0xc0000000
17#define P4SEG 0xe0000000
14 18
15#endif /* __ASM_CPU_SH3_ADDRSPACE_H */ 19#endif /* __ASM_CPU_SH3_ADDRSPACE_H */
16
diff --git a/include/asm-sh/cpu-sh3/cache.h b/include/asm-sh/cpu-sh3/cache.h
index 255016fc91f0..56bd838b7db4 100644
--- a/include/asm-sh/cpu-sh3/cache.h
+++ b/include/asm-sh/cpu-sh3/cache.h
@@ -12,6 +12,11 @@
12 12
13#define L1_CACHE_SHIFT 4 13#define L1_CACHE_SHIFT 4
14 14
15#define SH_CACHE_VALID 1
16#define SH_CACHE_UPDATED 2
17#define SH_CACHE_COMBINED 4
18#define SH_CACHE_ASSOC 8
19
15#define CCR 0xffffffec /* Address of Cache Control Register */ 20#define CCR 0xffffffec /* Address of Cache Control Register */
16 21
17#define CCR_CACHE_CE 0x01 /* Cache Enable */ 22#define CCR_CACHE_CE 0x01 /* Cache Enable */
@@ -28,7 +33,8 @@
28 33
29#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ 34#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
30 defined(CONFIG_CPU_SUBTYPE_SH7710) || \ 35 defined(CONFIG_CPU_SUBTYPE_SH7710) || \
31 defined(CONFIG_CPU_SUBTYPE_SH7720) 36 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
37 defined(CONFIG_CPU_SUBTYPE_SH7721)
32#define CCR3 0xa40000b4 38#define CCR3 0xa40000b4
33#define CCR_CACHE_16KB 0x00010000 39#define CCR_CACHE_16KB 0x00010000
34#define CCR_CACHE_32KB 0x00020000 40#define CCR_CACHE_32KB 0x00020000
diff --git a/include/asm-sh/cpu-sh3/dma.h b/include/asm-sh/cpu-sh3/dma.h
index 54bfece328c2..092ff9d872c3 100644
--- a/include/asm-sh/cpu-sh3/dma.h
+++ b/include/asm-sh/cpu-sh3/dma.h
@@ -2,7 +2,9 @@
2#define __ASM_CPU_SH3_DMA_H 2#define __ASM_CPU_SH3_DMA_H
3 3
4 4
5#if defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7709) 5#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
6 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
7 defined(CONFIG_CPU_SUBTYPE_SH7709)
6#define SH_DMAC_BASE 0xa4010020 8#define SH_DMAC_BASE 0xa4010020
7 9
8#define DMTE0_IRQ 48 10#define DMTE0_IRQ 48
diff --git a/include/asm-sh/cpu-sh3/freq.h b/include/asm-sh/cpu-sh3/freq.h
index 0a054b53b9de..53c62302b2e3 100644
--- a/include/asm-sh/cpu-sh3/freq.h
+++ b/include/asm-sh/cpu-sh3/freq.h
@@ -10,7 +10,12 @@
10#ifndef __ASM_CPU_SH3_FREQ_H 10#ifndef __ASM_CPU_SH3_FREQ_H
11#define __ASM_CPU_SH3_FREQ_H 11#define __ASM_CPU_SH3_FREQ_H
12 12
13#ifdef CONFIG_CPU_SUBTYPE_SH7712
14#define FRQCR 0xA415FF80
15#else
13#define FRQCR 0xffffff80 16#define FRQCR 0xffffff80
17#endif
18
14#define MIN_DIVISOR_NR 0 19#define MIN_DIVISOR_NR 0
15#define MAX_DIVISOR_NR 4 20#define MAX_DIVISOR_NR 4
16 21
diff --git a/include/asm-sh/cpu-sh3/gpio.h b/include/asm-sh/cpu-sh3/gpio.h
index 48770c1c7bdf..4e53eb314b8f 100644
--- a/include/asm-sh/cpu-sh3/gpio.h
+++ b/include/asm-sh/cpu-sh3/gpio.h
@@ -12,7 +12,8 @@
12#ifndef _CPU_SH3_GPIO_H 12#ifndef _CPU_SH3_GPIO_H
13#define _CPU_SH3_GPIO_H 13#define _CPU_SH3_GPIO_H
14 14
15#if defined(CONFIG_CPU_SUBTYPE_SH7720) 15#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
16 defined(CONFIG_CPU_SUBTYPE_SH7721)
16 17
17/* Control registers */ 18/* Control registers */
18#define PORT_PACR 0xA4050100UL 19#define PORT_PACR 0xA4050100UL
diff --git a/include/asm-sh/cpu-sh3/mmu_context.h b/include/asm-sh/cpu-sh3/mmu_context.h
index 16c2d63b7e39..ab09da73ce77 100644
--- a/include/asm-sh/cpu-sh3/mmu_context.h
+++ b/include/asm-sh/cpu-sh3/mmu_context.h
@@ -33,7 +33,8 @@
33 defined(CONFIG_CPU_SUBTYPE_SH7709) || \ 33 defined(CONFIG_CPU_SUBTYPE_SH7709) || \
34 defined(CONFIG_CPU_SUBTYPE_SH7710) || \ 34 defined(CONFIG_CPU_SUBTYPE_SH7710) || \
35 defined(CONFIG_CPU_SUBTYPE_SH7712) || \ 35 defined(CONFIG_CPU_SUBTYPE_SH7712) || \
36 defined(CONFIG_CPU_SUBTYPE_SH7720) 36 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
37 defined(CONFIG_CPU_SUBTYPE_SH7721)
37#define INTEVT 0xa4000000 /* INTEVTE2(0xa4000000) */ 38#define INTEVT 0xa4000000 /* INTEVTE2(0xa4000000) */
38#else 39#else
39#define INTEVT 0xffffffd8 40#define INTEVT 0xffffffd8
diff --git a/include/asm-sh/cpu-sh3/rtc.h b/include/asm-sh/cpu-sh3/rtc.h
new file mode 100644
index 000000000000..319404aaee37
--- /dev/null
+++ b/include/asm-sh/cpu-sh3/rtc.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_SH_CPU_SH3_RTC_H
2#define __ASM_SH_CPU_SH3_RTC_H
3
4#define rtc_reg_size sizeof(u16)
5#define RTC_BIT_INVERTED 0 /* No bug on SH7708, SH7709A */
6#define RTC_DEF_CAPABILITIES 0UL
7
8#endif /* __ASM_SH_CPU_SH3_RTC_H */
diff --git a/include/asm-sh/cpu-sh3/timer.h b/include/asm-sh/cpu-sh3/timer.h
index 7b795ac5477c..793acf12aa08 100644
--- a/include/asm-sh/cpu-sh3/timer.h
+++ b/include/asm-sh/cpu-sh3/timer.h
@@ -23,12 +23,13 @@
23 * --------------------------------------------------------------------------- 23 * ---------------------------------------------------------------------------
24 */ 24 */
25 25
26#if !defined(CONFIG_CPU_SUBTYPE_SH7720) 26#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && !defined(CONFIG_CPU_SUBTYPE_SH7721)
27#define TMU_TOCR 0xfffffe90 /* Byte access */ 27#define TMU_TOCR 0xfffffe90 /* Byte access */
28#endif 28#endif
29 29
30#if defined(CONFIG_CPU_SUBTYPE_SH7710) || \ 30#if defined(CONFIG_CPU_SUBTYPE_SH7710) || \
31 defined(CONFIG_CPU_SUBTYPE_SH7720) 31 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
32 defined(CONFIG_CPU_SUBTYPE_SH7721)
32#define TMU_012_TSTR 0xa412fe92 /* Byte access */ 33#define TMU_012_TSTR 0xa412fe92 /* Byte access */
33 34
34#define TMU0_TCOR 0xa412fe94 /* Long access */ 35#define TMU0_TCOR 0xa412fe94 /* Long access */
@@ -57,7 +58,7 @@
57#define TMU2_TCOR 0xfffffeac /* Long access */ 58#define TMU2_TCOR 0xfffffeac /* Long access */
58#define TMU2_TCNT 0xfffffeb0 /* Long access */ 59#define TMU2_TCNT 0xfffffeb0 /* Long access */
59#define TMU2_TCR 0xfffffeb4 /* Word access */ 60#define TMU2_TCR 0xfffffeb4 /* Word access */
60#if !defined(CONFIG_CPU_SUBTYPE_SH7720) 61#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && !defined(CONFIG_CPU_SUBTYPE_SH7721)
61#define TMU2_TCPR2 0xfffffeb8 /* Long access */ 62#define TMU2_TCPR2 0xfffffeb8 /* Long access */
62#endif 63#endif
63#endif 64#endif
diff --git a/include/asm-sh/cpu-sh3/ubc.h b/include/asm-sh/cpu-sh3/ubc.h
index 18467c574534..4e6381d5ff7a 100644
--- a/include/asm-sh/cpu-sh3/ubc.h
+++ b/include/asm-sh/cpu-sh3/ubc.h
@@ -12,7 +12,8 @@
12#define __ASM_CPU_SH3_UBC_H 12#define __ASM_CPU_SH3_UBC_H
13 13
14#if defined(CONFIG_CPU_SUBTYPE_SH7710) || \ 14#if defined(CONFIG_CPU_SUBTYPE_SH7710) || \
15 defined(CONFIG_CPU_SUBTYPE_SH7720) 15 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
16 defined(CONFIG_CPU_SUBTYPE_SH7721)
16#define UBC_BARA 0xa4ffffb0 17#define UBC_BARA 0xa4ffffb0
17#define UBC_BAMRA 0xa4ffffb4 18#define UBC_BAMRA 0xa4ffffb4
18#define UBC_BBRA 0xa4ffffb8 19#define UBC_BBRA 0xa4ffffb8
diff --git a/include/asm-sh/cpu-sh4/addrspace.h b/include/asm-sh/cpu-sh4/addrspace.h
index bb2e1b03060c..a3fa733c1c7d 100644
--- a/include/asm-sh/cpu-sh4/addrspace.h
+++ b/include/asm-sh/cpu-sh4/addrspace.h
@@ -10,6 +10,12 @@
10#ifndef __ASM_CPU_SH4_ADDRSPACE_H 10#ifndef __ASM_CPU_SH4_ADDRSPACE_H
11#define __ASM_CPU_SH4_ADDRSPACE_H 11#define __ASM_CPU_SH4_ADDRSPACE_H
12 12
13#define P0SEG 0x00000000
14#define P1SEG 0x80000000
15#define P2SEG 0xa0000000
16#define P3SEG 0xc0000000
17#define P4SEG 0xe0000000
18
13/* Detailed P4SEG */ 19/* Detailed P4SEG */
14#define P4SEG_STORE_QUE (P4SEG) 20#define P4SEG_STORE_QUE (P4SEG)
15#define P4SEG_IC_ADDR 0xf0000000 21#define P4SEG_IC_ADDR 0xf0000000
diff --git a/include/asm-sh/cpu-sh4/cache.h b/include/asm-sh/cpu-sh4/cache.h
index f92b20a0983d..1c61ebf5c8e3 100644
--- a/include/asm-sh/cpu-sh4/cache.h
+++ b/include/asm-sh/cpu-sh4/cache.h
@@ -12,6 +12,11 @@
12 12
13#define L1_CACHE_SHIFT 5 13#define L1_CACHE_SHIFT 5
14 14
15#define SH_CACHE_VALID 1
16#define SH_CACHE_UPDATED 2
17#define SH_CACHE_COMBINED 4
18#define SH_CACHE_ASSOC 8
19
15#define CCR 0xff00001c /* Address of Cache Control Register */ 20#define CCR 0xff00001c /* Address of Cache Control Register */
16#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ 21#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
17#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ 22#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
diff --git a/include/asm-sh/cpu-sh4/fpu.h b/include/asm-sh/cpu-sh4/fpu.h
new file mode 100644
index 000000000000..febef7342528
--- /dev/null
+++ b/include/asm-sh/cpu-sh4/fpu.h
@@ -0,0 +1,32 @@
1/*
2 * linux/arch/sh/kernel/cpu/sh4/sh4_fpu.h
3 *
4 * Copyright (C) 2006 STMicroelectronics Limited
5 * Author: Carl Shaw <carl.shaw@st.com>
6 *
7 * May be copied or modified under the terms of the GNU General Public
8 * License Version 2. See linux/COPYING for more information.
9 *
10 * Definitions for SH4 FPU operations
11 */
12
13#ifndef __CPU_SH4_FPU_H
14#define __CPU_SH4_FPU_H
15
16#define FPSCR_ENABLE_MASK 0x00000f80UL
17
18#define FPSCR_FMOV_DOUBLE (1<<1)
19
20#define FPSCR_CAUSE_INEXACT (1<<12)
21#define FPSCR_CAUSE_UNDERFLOW (1<<13)
22#define FPSCR_CAUSE_OVERFLOW (1<<14)
23#define FPSCR_CAUSE_DIVZERO (1<<15)
24#define FPSCR_CAUSE_INVALID (1<<16)
25#define FPSCR_CAUSE_ERROR (1<<17)
26
27#define FPSCR_DBL_PRECISION (1<<19)
28#define FPSCR_ROUNDING_MODE(x) ((x >> 20) & 3)
29#define FPSCR_RM_NEAREST (0)
30#define FPSCR_RM_ZERO (1)
31
32#endif
diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h
index dc1d32a86374..1ac10b9a078f 100644
--- a/include/asm-sh/cpu-sh4/freq.h
+++ b/include/asm-sh/cpu-sh4/freq.h
@@ -16,7 +16,8 @@
16#define SCLKACR 0xa4150008 16#define SCLKACR 0xa4150008
17#define SCLKBCR 0xa415000c 17#define SCLKBCR 0xa415000c
18#define IrDACLKCR 0xa4150010 18#define IrDACLKCR 0xa4150010
19#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 19#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
20 defined(CONFIG_CPU_SUBTYPE_SH7780)
20#define FRQCR 0xffc80000 21#define FRQCR 0xffc80000
21#elif defined(CONFIG_CPU_SUBTYPE_SH7785) 22#elif defined(CONFIG_CPU_SUBTYPE_SH7785)
22#define FRQCR0 0xffc80000 23#define FRQCR0 0xffc80000
diff --git a/include/asm-sh/cpu-sh4/mmu_context.h b/include/asm-sh/cpu-sh4/mmu_context.h
index 979acddc0f8e..9ea8eb27b18e 100644
--- a/include/asm-sh/cpu-sh4/mmu_context.h
+++ b/include/asm-sh/cpu-sh4/mmu_context.h
@@ -22,12 +22,20 @@
22#define MMU_UTLB_ADDRESS_ARRAY 0xF6000000 22#define MMU_UTLB_ADDRESS_ARRAY 0xF6000000
23#define MMU_PAGE_ASSOC_BIT 0x80 23#define MMU_PAGE_ASSOC_BIT 0x80
24 24
25#define MMUCR_TI (1<<2)
26
25#ifdef CONFIG_X2TLB 27#ifdef CONFIG_X2TLB
26#define MMUCR_ME (1 << 7) 28#define MMUCR_ME (1 << 7)
27#else 29#else
28#define MMUCR_ME (0) 30#define MMUCR_ME (0)
29#endif 31#endif
30 32
33#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
34#define MMUCR_SE (1 << 4)
35#else
36#define MMUCR_SE (0)
37#endif
38
31#ifdef CONFIG_SH_STORE_QUEUES 39#ifdef CONFIG_SH_STORE_QUEUES
32#define MMUCR_SQMD (1 << 9) 40#define MMUCR_SQMD (1 << 9)
33#else 41#else
@@ -35,7 +43,7 @@
35#endif 43#endif
36 44
37#define MMU_NTLB_ENTRIES 64 45#define MMU_NTLB_ENTRIES 64
38#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME) 46#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME|MMUCR_SE)
39 47
40#define MMU_ITLB_DATA_ARRAY 0xF3000000 48#define MMU_ITLB_DATA_ARRAY 0xF3000000
41#define MMU_UTLB_DATA_ARRAY 0xF7000000 49#define MMU_UTLB_DATA_ARRAY 0xF7000000
diff --git a/include/asm-sh/cpu-sh4/rtc.h b/include/asm-sh/cpu-sh4/rtc.h
new file mode 100644
index 000000000000..f3d0f53275e4
--- /dev/null
+++ b/include/asm-sh/cpu-sh4/rtc.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_SH_CPU_SH4_RTC_H
2#define __ASM_SH_CPU_SH4_RTC_H
3
4#define rtc_reg_size sizeof(u32)
5#define RTC_BIT_INVERTED 0x40 /* bug on SH7750, SH7750S */
6#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
7
8#endif /* __ASM_SH_CPU_SH4_RTC_H */
diff --git a/include/asm-sh/cpu-sh5/addrspace.h b/include/asm-sh/cpu-sh5/addrspace.h
new file mode 100644
index 000000000000..dc36b9a03af6
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/addrspace.h
@@ -0,0 +1,11 @@
1#ifndef __ASM_SH_CPU_SH5_ADDRSPACE_H
2#define __ASM_SH_CPU_SH5_ADDRSPACE_H
3
4#define PHYS_PERIPHERAL_BLOCK 0x09000000
5#define PHYS_DMAC_BLOCK 0x0e000000
6#define PHYS_PCI_BLOCK 0x60000000
7#define PHYS_EMI_BLOCK 0xff000000
8
9/* No segmentation.. */
10
11#endif /* __ASM_SH_CPU_SH5_ADDRSPACE_H */
diff --git a/include/asm-sh/cpu-sh5/cache.h b/include/asm-sh/cpu-sh5/cache.h
new file mode 100644
index 000000000000..ed050ab526f2
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/cache.h
@@ -0,0 +1,97 @@
1#ifndef __ASM_SH_CPU_SH5_CACHE_H
2#define __ASM_SH_CPU_SH5_CACHE_H
3
4/*
5 * include/asm-sh/cpu-sh5/cache.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 * Copyright (C) 2003, 2004 Paul Mundt
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14
15#define L1_CACHE_SHIFT 5
16
17/* Valid and Dirty bits */
18#define SH_CACHE_VALID (1LL<<0)
19#define SH_CACHE_UPDATED (1LL<<57)
20
21/* Unimplemented compat bits.. */
22#define SH_CACHE_COMBINED 0
23#define SH_CACHE_ASSOC 0
24
25/* Cache flags */
26#define SH_CACHE_MODE_WT (1LL<<0)
27#define SH_CACHE_MODE_WB (1LL<<1)
28
29/*
30 * Control Registers.
31 */
32#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */
33#define ICCR_REG0 0 /* Register 0 offset */
34#define ICCR_REG1 1 /* Register 1 offset */
35#define ICCR0 ICCR_BASE+ICCR_REG0
36#define ICCR1 ICCR_BASE+ICCR_REG1
37
38#define ICCR0_OFF 0x0 /* Set ICACHE off */
39#define ICCR0_ON 0x1 /* Set ICACHE on */
40#define ICCR0_ICI 0x2 /* Invalidate all in IC */
41
42#define ICCR1_NOLOCK 0x0 /* Set No Locking */
43
44#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */
45#define OCCR_REG0 0 /* Register 0 offset */
46#define OCCR_REG1 1 /* Register 1 offset */
47#define OCCR0 OCCR_BASE+OCCR_REG0
48#define OCCR1 OCCR_BASE+OCCR_REG1
49
50#define OCCR0_OFF 0x0 /* Set OCACHE off */
51#define OCCR0_ON 0x1 /* Set OCACHE on */
52#define OCCR0_OCI 0x2 /* Invalidate all in OC */
53#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */
54#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */
55
56#define OCCR1_NOLOCK 0x0 /* Set No Locking */
57
58/*
59 * SH-5
60 * A bit of description here, for neff=32.
61 *
62 * |<--- tag (19 bits) --->|
63 * +-----------------------------+-----------------+------+----------+------+
64 * | | | ways |set index |offset|
65 * +-----------------------------+-----------------+------+----------+------+
66 * ^ 2 bits 8 bits 5 bits
67 * +- Bit 31
68 *
69 * Cacheline size is based on offset: 5 bits = 32 bytes per line
70 * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
71 * have a broader space for registers. These are outlined by
72 * CACHE_?C_*_STEP below.
73 *
74 */
75
76/* Instruction cache */
77#define CACHE_IC_ADDRESS_ARRAY 0x01000000
78
79/* Operand Cache */
80#define CACHE_OC_ADDRESS_ARRAY 0x01800000
81
82/* These declarations relate to cache 'synonyms' in the operand cache. A
83 'synonym' occurs where effective address bits overlap between those used for
84 indexing the cache sets and those passed to the MMU for translation. In the
85 case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
86
87#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */
88#define CACHE_OC_SYN_SHIFT 12
89/* Mask to select synonym bit(s) */
90#define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
91
92/*
93 * Instruction cache can't be invalidated based on physical addresses.
94 * No Instruction Cache defines required, then.
95 */
96
97#endif /* __ASM_SH_CPU_SH5_CACHE_H */
diff --git a/include/asm-sh/cpu-sh5/cacheflush.h b/include/asm-sh/cpu-sh5/cacheflush.h
new file mode 100644
index 000000000000..98edb5b1da32
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/cacheflush.h
@@ -0,0 +1,35 @@
1#ifndef __ASM_SH_CPU_SH5_CACHEFLUSH_H
2#define __ASM_SH_CPU_SH5_CACHEFLUSH_H
3
4#ifndef __ASSEMBLY__
5
6#include <asm/page.h>
7
8struct vm_area_struct;
9struct page;
10struct mm_struct;
11
12extern void flush_cache_all(void);
13extern void flush_cache_mm(struct mm_struct *mm);
14extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
15extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
16 unsigned long end);
17extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
18extern void flush_dcache_page(struct page *pg);
19extern void flush_icache_range(unsigned long start, unsigned long end);
20extern void flush_icache_user_range(struct vm_area_struct *vma,
21 struct page *page, unsigned long addr,
22 int len);
23
24#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
25
26#define flush_dcache_mmap_lock(mapping) do { } while (0)
27#define flush_dcache_mmap_unlock(mapping) do { } while (0)
28
29#define flush_icache_page(vma, page) do { } while (0)
30#define p3_cache_init() do { } while (0)
31
32#endif /* __ASSEMBLY__ */
33
34#endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */
35
diff --git a/include/asm-sh/cpu-sh5/dma.h b/include/asm-sh/cpu-sh5/dma.h
new file mode 100644
index 000000000000..7bf6bb3d35ed
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/dma.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH_CPU_SH5_DMA_H
2#define __ASM_SH_CPU_SH5_DMA_H
3
4/* Nothing yet */
5
6#endif /* __ASM_SH_CPU_SH5_DMA_H */
diff --git a/include/asm-sh/cpu-sh5/irq.h b/include/asm-sh/cpu-sh5/irq.h
new file mode 100644
index 000000000000..f0f0756e6e84
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/irq.h
@@ -0,0 +1,117 @@
1#ifndef __ASM_SH_CPU_SH5_IRQ_H
2#define __ASM_SH_CPU_SH5_IRQ_H
3
4/*
5 * include/asm-sh/cpu-sh5/irq.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14
15/*
16 * Encoded IRQs are not considered worth to be supported.
17 * Main reason is that there's no per-encoded-interrupt
18 * enable/disable mechanism (as there was in SH3/4).
19 * An all enabled/all disabled is worth only if there's
20 * a cascaded IC to disable/enable/ack on. Until such
21 * IC is available there's no such support.
22 *
23 * Presumably Encoded IRQs may use extra IRQs beyond 64,
24 * below. Some logic must be added to cope with IRQ_IRL?
25 * in an exclusive way.
26 *
27 * Priorities are set at Platform level, when IRQ_IRL0-3
28 * are set to 0 Encoding is allowed. Otherwise it's not
29 * allowed.
30 */
31
32/* Independent IRQs */
33#define IRQ_IRL0 0
34#define IRQ_IRL1 1
35#define IRQ_IRL2 2
36#define IRQ_IRL3 3
37
38#define IRQ_INTA 4
39#define IRQ_INTB 5
40#define IRQ_INTC 6
41#define IRQ_INTD 7
42
43#define IRQ_SERR 12
44#define IRQ_ERR 13
45#define IRQ_PWR3 14
46#define IRQ_PWR2 15
47#define IRQ_PWR1 16
48#define IRQ_PWR0 17
49
50#define IRQ_DMTE0 18
51#define IRQ_DMTE1 19
52#define IRQ_DMTE2 20
53#define IRQ_DMTE3 21
54#define IRQ_DAERR 22
55
56#define IRQ_TUNI0 32
57#define IRQ_TUNI1 33
58#define IRQ_TUNI2 34
59#define IRQ_TICPI2 35
60
61#define IRQ_ATI 36
62#define IRQ_PRI 37
63#define IRQ_CUI 38
64
65#define IRQ_ERI 39
66#define IRQ_RXI 40
67#define IRQ_BRI 41
68#define IRQ_TXI 42
69
70#define IRQ_ITI 63
71
72#define NR_INTC_IRQS 64
73
74#ifdef CONFIG_SH_CAYMAN
75#define NR_EXT_IRQS 32
76#define START_EXT_IRQS 64
77
78/* PCI bus 2 uses encoded external interrupts on the Cayman board */
79#define IRQ_P2INTA (START_EXT_IRQS + (3*8) + 0)
80#define IRQ_P2INTB (START_EXT_IRQS + (3*8) + 1)
81#define IRQ_P2INTC (START_EXT_IRQS + (3*8) + 2)
82#define IRQ_P2INTD (START_EXT_IRQS + (3*8) + 3)
83
84#define I8042_KBD_IRQ (START_EXT_IRQS + 2)
85#define I8042_AUX_IRQ (START_EXT_IRQS + 6)
86
87#define IRQ_CFCARD (START_EXT_IRQS + 7)
88#define IRQ_PCMCIA (0)
89
90#else
91#define NR_EXT_IRQS 0
92#endif
93
94/* Default IRQs, fixed */
95#define TIMER_IRQ IRQ_TUNI0
96#define RTC_IRQ IRQ_CUI
97
98/* Default Priorities, Platform may choose differently */
99#define NO_PRIORITY 0 /* Disabled */
100#define TIMER_PRIORITY 2
101#define RTC_PRIORITY TIMER_PRIORITY
102#define SCIF_PRIORITY 3
103#define INTD_PRIORITY 3
104#define IRL3_PRIORITY 4
105#define INTC_PRIORITY 6
106#define IRL2_PRIORITY 7
107#define INTB_PRIORITY 9
108#define IRL1_PRIORITY 10
109#define INTA_PRIORITY 12
110#define IRL0_PRIORITY 13
111#define TOP_PRIORITY 15
112
113extern int intc_evt_to_irq[(0xE20/0x20)+1];
114int intc_irq_describe(char* p, int irq);
115extern int platform_int_priority[NR_INTC_IRQS];
116
117#endif /* __ASM_SH_CPU_SH5_IRQ_H */
diff --git a/include/asm-sh/cpu-sh5/mmu_context.h b/include/asm-sh/cpu-sh5/mmu_context.h
new file mode 100644
index 000000000000..df857fc09960
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/mmu_context.h
@@ -0,0 +1,27 @@
1#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H
2#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H
3
4/* Common defines */
5#define TLB_STEP 0x00000010
6#define TLB_PTEH 0x00000000
7#define TLB_PTEL 0x00000008
8
9/* PTEH defines */
10#define PTEH_ASID_SHIFT 2
11#define PTEH_VALID 0x0000000000000001
12#define PTEH_SHARED 0x0000000000000002
13#define PTEH_MATCH_ASID 0x00000000000003ff
14
15#ifndef __ASSEMBLY__
16/* This has to be a common function because the next location to fill
17 * information is shared. */
18extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
19
20/* Profiling counter. */
21#ifdef CONFIG_SH64_PROC_TLB
22extern unsigned long long calls_to_do_fast_page_fault;
23#endif
24
25#endif /* __ASSEMBLY__ */
26
27#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */
diff --git a/include/asm-sh/cpu-sh5/registers.h b/include/asm-sh/cpu-sh5/registers.h
new file mode 100644
index 000000000000..6664ea6f1566
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/registers.h
@@ -0,0 +1,106 @@
1#ifndef __ASM_SH_CPU_SH5_REGISTERS_H
2#define __ASM_SH_CPU_SH5_REGISTERS_H
3
4/*
5 * include/asm-sh/cpu-sh5/registers.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 * Copyright (C) 2004 Richard Curnow
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14
15#ifdef __ASSEMBLY__
16/* =====================================================================
17**
18** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
19** Assigns symbolic names to control & target registers.
20*/
21
22/*
23 * Define some useful aliases for control registers.
24 */
25#define SR cr0
26#define SSR cr1
27#define PSSR cr2
28 /* cr3 UNDEFINED */
29#define INTEVT cr4
30#define EXPEVT cr5
31#define PEXPEVT cr6
32#define TRA cr7
33#define SPC cr8
34#define PSPC cr9
35#define RESVEC cr10
36#define VBR cr11
37 /* cr12 UNDEFINED */
38#define TEA cr13
39 /* cr14-cr15 UNDEFINED */
40#define DCR cr16
41#define KCR0 cr17
42#define KCR1 cr18
43 /* cr19-cr31 UNDEFINED */
44 /* cr32-cr61 RESERVED */
45#define CTC cr62
46#define USR cr63
47
48/*
49 * ABI dependent registers (general purpose set)
50 */
51#define RET r2
52#define ARG1 r2
53#define ARG2 r3
54#define ARG3 r4
55#define ARG4 r5
56#define ARG5 r6
57#define ARG6 r7
58#define SP r15
59#define LINK r18
60#define ZERO r63
61
62/*
63 * Status register defines: used only by assembly sources (and
64 * syntax independednt)
65 */
66#define SR_RESET_VAL 0x0000000050008000
67#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */
68#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */
69
70#if defined (CONFIG_SH64_SR_WATCH)
71#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */
72#else
73#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */
74#endif
75
76#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */
77#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */
78
79#else /* Not __ASSEMBLY__ syntax */
80
81/*
82** Stringify reg. name
83*/
84#define __str(x) #x
85
86/* Stringify control register names for use in inline assembly */
87#define __SR __str(SR)
88#define __SSR __str(SSR)
89#define __PSSR __str(PSSR)
90#define __INTEVT __str(INTEVT)
91#define __EXPEVT __str(EXPEVT)
92#define __PEXPEVT __str(PEXPEVT)
93#define __TRA __str(TRA)
94#define __SPC __str(SPC)
95#define __PSPC __str(PSPC)
96#define __RESVEC __str(RESVEC)
97#define __VBR __str(VBR)
98#define __TEA __str(TEA)
99#define __DCR __str(DCR)
100#define __KCR0 __str(KCR0)
101#define __KCR1 __str(KCR1)
102#define __CTC __str(CTC)
103#define __USR __str(USR)
104
105#endif /* __ASSEMBLY__ */
106#endif /* __ASM_SH_CPU_SH5_REGISTERS_H */
diff --git a/include/asm-sh/cpu-sh5/rtc.h b/include/asm-sh/cpu-sh5/rtc.h
new file mode 100644
index 000000000000..12ea0ed144e1
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/rtc.h
@@ -0,0 +1,8 @@
1#ifndef __ASM_SH_CPU_SH5_RTC_H
2#define __ASM_SH_CPU_SH5_RTC_H
3
4#define rtc_reg_size sizeof(u32)
5#define RTC_BIT_INVERTED 0 /* The SH-5 RTC is surprisingly sane! */
6#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
7
8#endif /* __ASM_SH_CPU_SH5_RTC_H */
diff --git a/include/asm-sh/cpu-sh5/timer.h b/include/asm-sh/cpu-sh5/timer.h
new file mode 100644
index 000000000000..88da9b341a36
--- /dev/null
+++ b/include/asm-sh/cpu-sh5/timer.h
@@ -0,0 +1,4 @@
1#ifndef __ASM_SH_CPU_SH5_TIMER_H
2#define __ASM_SH_CPU_SH5_TIMER_H
3
4#endif /* __ASM_SH_CPU_SH5_TIMER_H */
diff --git a/include/asm-sh/delay.h b/include/asm-sh/delay.h
index db599b2a5a9c..031db84f2aa1 100644
--- a/include/asm-sh/delay.h
+++ b/include/asm-sh/delay.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * Delay routines calling functions in arch/sh/lib/delay.c 7 * Delay routines calling functions in arch/sh/lib/delay.c
8 */ 8 */
9 9
10extern void __bad_udelay(void); 10extern void __bad_udelay(void);
11extern void __bad_ndelay(void); 11extern void __bad_ndelay(void);
12 12
@@ -15,13 +15,17 @@ extern void __ndelay(unsigned long nsecs);
15extern void __const_udelay(unsigned long usecs); 15extern void __const_udelay(unsigned long usecs);
16extern void __delay(unsigned long loops); 16extern void __delay(unsigned long loops);
17 17
18#ifdef CONFIG_SUPERH32
18#define udelay(n) (__builtin_constant_p(n) ? \ 19#define udelay(n) (__builtin_constant_p(n) ? \
19 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \ 20 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
20 __udelay(n)) 21 __udelay(n))
21 22
22
23#define ndelay(n) (__builtin_constant_p(n) ? \ 23#define ndelay(n) (__builtin_constant_p(n) ? \
24 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ 24 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
25 __ndelay(n)) 25 __ndelay(n))
26#else
27extern void udelay(unsigned long usecs);
28extern void ndelay(unsigned long nsecs);
29#endif
26 30
27#endif /* __ASM_SH_DELAY_H */ 31#endif /* __ASM_SH_DELAY_H */
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index fcea067f7a9c..22cc419389fe 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -8,11 +8,6 @@
8 8
9extern struct bus_type pci_bus_type; 9extern struct bus_type pci_bus_type;
10 10
11/* arch/sh/mm/consistent.c */
12extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
13extern void consistent_free(void *vaddr, size_t size);
14extern void consistent_sync(void *vaddr, size_t size, int direction);
15
16#define dma_supported(dev, mask) (1) 11#define dma_supported(dev, mask) (1)
17 12
18static inline int dma_set_mask(struct device *dev, u64 mask) 13static inline int dma_set_mask(struct device *dev, u64 mask)
@@ -25,44 +20,19 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
25 return 0; 20 return 0;
26} 21}
27 22
28static inline void *dma_alloc_coherent(struct device *dev, size_t size, 23void *dma_alloc_coherent(struct device *dev, size_t size,
29 dma_addr_t *dma_handle, gfp_t flag) 24 dma_addr_t *dma_handle, gfp_t flag);
30{
31 if (sh_mv.mv_consistent_alloc) {
32 void *ret;
33 25
34 ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); 26void dma_free_coherent(struct device *dev, size_t size,
35 if (ret != NULL) 27 void *vaddr, dma_addr_t dma_handle);
36 return ret;
37 }
38
39 return consistent_alloc(flag, size, dma_handle);
40}
41
42static inline void dma_free_coherent(struct device *dev, size_t size,
43 void *vaddr, dma_addr_t dma_handle)
44{
45 if (sh_mv.mv_consistent_free) {
46 int ret;
47
48 ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
49 if (ret == 0)
50 return;
51 }
52 28
53 consistent_free(vaddr, size); 29void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
54} 30 enum dma_data_direction dir);
55 31
56#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 32#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
57#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 33#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
58#define dma_is_consistent(d, h) (1) 34#define dma_is_consistent(d, h) (1)
59 35
60static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
61 enum dma_data_direction dir)
62{
63 consistent_sync(vaddr, size, (int)dir);
64}
65
66static inline dma_addr_t dma_map_single(struct device *dev, 36static inline dma_addr_t dma_map_single(struct device *dev,
67 void *ptr, size_t size, 37 void *ptr, size_t size,
68 enum dma_data_direction dir) 38 enum dma_data_direction dir)
@@ -205,4 +175,18 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
205{ 175{
206 return dma_addr == 0; 176 return dma_addr == 0;
207} 177}
178
179#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
180
181extern int
182dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
183 dma_addr_t device_addr, size_t size, int flags);
184
185extern void
186dma_release_declared_memory(struct device *dev);
187
188extern void *
189dma_mark_declared_memory_occupied(struct device *dev,
190 dma_addr_t device_addr, size_t size);
191
208#endif /* __ASM_SH_DMA_MAPPING_H */ 192#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h
index 12cc4b392bf0..05092da1aa59 100644
--- a/include/asm-sh/elf.h
+++ b/include/asm-sh/elf.h
@@ -5,7 +5,7 @@
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/user.h> 6#include <asm/user.h>
7 7
8/* SH relocation types */ 8/* SH (particularly SHcompact) relocation types */
9#define R_SH_NONE 0 9#define R_SH_NONE 0
10#define R_SH_DIR32 1 10#define R_SH_DIR32 1
11#define R_SH_REL32 2 11#define R_SH_REL32 2
@@ -43,6 +43,11 @@
43#define R_SH_RELATIVE 165 43#define R_SH_RELATIVE 165
44#define R_SH_GOTOFF 166 44#define R_SH_GOTOFF 166
45#define R_SH_GOTPC 167 45#define R_SH_GOTPC 167
46/* SHmedia relocs */
47#define R_SH_IMM_LOW16 246
48#define R_SH_IMM_LOW16_PCREL 247
49#define R_SH_IMM_MEDLOW16 248
50#define R_SH_IMM_MEDLOW16_PCREL 249
46/* Keep this the last entry. */ 51/* Keep this the last entry. */
47#define R_SH_NUM 256 52#define R_SH_NUM 256
48 53
@@ -58,11 +63,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
58typedef struct user_fpu_struct elf_fpregset_t; 63typedef struct user_fpu_struct elf_fpregset_t;
59 64
60/* 65/*
61 * This is used to ensure we don't load something for the wrong architecture.
62 */
63#define elf_check_arch(x) ( (x)->e_machine == EM_SH )
64
65/*
66 * These are used to set parameters in the core dumps. 66 * These are used to set parameters in the core dumps.
67 */ 67 */
68#define ELF_CLASS ELFCLASS32 68#define ELF_CLASS ELFCLASS32
@@ -73,6 +73,12 @@ typedef struct user_fpu_struct elf_fpregset_t;
73#endif 73#endif
74#define ELF_ARCH EM_SH 74#define ELF_ARCH EM_SH
75 75
76#ifdef __KERNEL__
77/*
78 * This is used to ensure we don't load something for the wrong architecture.
79 */
80#define elf_check_arch(x) ( (x)->e_machine == EM_SH )
81
76#define USE_ELF_CORE_DUMP 82#define USE_ELF_CORE_DUMP
77#define ELF_EXEC_PAGESIZE PAGE_SIZE 83#define ELF_EXEC_PAGESIZE PAGE_SIZE
78 84
@@ -83,7 +89,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
83 89
84#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 90#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
85 91
86
87#define ELF_CORE_COPY_REGS(_dest,_regs) \ 92#define ELF_CORE_COPY_REGS(_dest,_regs) \
88 memcpy((char *) &_dest, (char *) _regs, \ 93 memcpy((char *) &_dest, (char *) _regs, \
89 sizeof(struct pt_regs)); 94 sizeof(struct pt_regs));
@@ -101,16 +106,38 @@ typedef struct user_fpu_struct elf_fpregset_t;
101 For the moment, we have only optimizations for the Intel generations, 106 For the moment, we have only optimizations for the Intel generations,
102 but that could change... */ 107 but that could change... */
103 108
104#define ELF_PLATFORM (NULL) 109#define ELF_PLATFORM (utsname()->machine)
105 110
111#ifdef __SH5__
112#define ELF_PLAT_INIT(_r, load_addr) \
113 do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
114 _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
115 _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
116 _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
117 _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
118 _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
119 _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
120 _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
121 _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
122 _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
123 _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
124 _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
125 _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
126 _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
127 _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
128 _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
129 _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
130 _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
131 _r->sr = SR_FD | SR_MMU; } while (0)
132#else
106#define ELF_PLAT_INIT(_r, load_addr) \ 133#define ELF_PLAT_INIT(_r, load_addr) \
107 do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ 134 do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
108 _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ 135 _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
109 _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ 136 _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
110 _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \ 137 _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \
111 _r->sr = SR_FD; } while (0) 138 _r->sr = SR_FD; } while (0)
139#endif
112 140
113#ifdef __KERNEL__
114#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) 141#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
115struct task_struct; 142struct task_struct;
116extern int dump_task_regs (struct task_struct *, elf_gregset_t *); 143extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
@@ -118,7 +145,6 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
118 145
119#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) 146#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
120#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 147#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
121#endif
122 148
123#ifdef CONFIG_VSYSCALL 149#ifdef CONFIG_VSYSCALL
124/* vDSO has arch_setup_additional_pages */ 150/* vDSO has arch_setup_additional_pages */
@@ -133,12 +159,35 @@ extern void __kernel_vsyscall;
133#define VDSO_BASE ((unsigned long)current->mm->context.vdso) 159#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
134#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x)) 160#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
135 161
162#define VSYSCALL_AUX_ENT \
163 if (vdso_enabled) \
164 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
165#else
166#define VSYSCALL_AUX_ENT
167#endif /* CONFIG_VSYSCALL */
168
169#ifdef CONFIG_SH_FPU
170#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
171#else
172#define FPU_AUX_ENT
173#endif
174
175extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
176
136/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 177/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
137#define ARCH_DLINFO \ 178#define ARCH_DLINFO \
138do { \ 179do { \
139 if (vdso_enabled) \ 180 /* Optional FPU initialization */ \
140 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ 181 FPU_AUX_ENT; \
182 \
183 /* Optional vsyscall entry */ \
184 VSYSCALL_AUX_ENT; \
185 \
186 /* Cache desc */ \
187 NEW_AUX_ENT(AT_L1I_CACHESHAPE, l1i_cache_shape); \
188 NEW_AUX_ENT(AT_L1D_CACHESHAPE, l1d_cache_shape); \
189 NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape); \
141} while (0) 190} while (0)
142#endif /* CONFIG_VSYSCALL */
143 191
192#endif /* __KERNEL__ */
144#endif /* __ASM_SH_ELF_H */ 193#endif /* __ASM_SH_ELF_H */
diff --git a/include/asm-sh/fixmap.h b/include/asm-sh/fixmap.h
index 8a566177ad96..721fcc4d5e98 100644
--- a/include/asm-sh/fixmap.h
+++ b/include/asm-sh/fixmap.h
@@ -49,6 +49,7 @@ enum fixed_addresses {
49#define FIX_N_COLOURS 16 49#define FIX_N_COLOURS 16
50 FIX_CMAP_BEGIN, 50 FIX_CMAP_BEGIN,
51 FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, 51 FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
52 FIX_UNCACHED,
52#ifdef CONFIG_HIGHMEM 53#ifdef CONFIG_HIGHMEM
53 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 54 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
54 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 55 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -73,7 +74,11 @@ extern void __set_fixmap(enum fixed_addresses idx,
73 * the start of the fixmap, and leave one page empty 74 * the start of the fixmap, and leave one page empty
74 * at the top of mem.. 75 * at the top of mem..
75 */ 76 */
77#ifdef CONFIG_SUPERH32
76#define FIXADDR_TOP (P4SEG - PAGE_SIZE) 78#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
79#else
80#define FIXADDR_TOP (0xff000000 - PAGE_SIZE)
81#endif
77#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 82#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
78#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 83#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
79 84
diff --git a/include/asm-sh/flat.h b/include/asm-sh/flat.h
index dc4f5950dafa..0cc800299e06 100644
--- a/include/asm-sh/flat.h
+++ b/include/asm-sh/flat.h
@@ -19,6 +19,6 @@
19#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp) 19#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
20#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) 20#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
21#define flat_get_relocate_addr(rel) (rel) 21#define flat_get_relocate_addr(rel) (rel)
22#define flat_set_persistent(relval, p) 0 22#define flat_set_persistent(relval, p) ({ (void)p; 0; })
23 23
24#endif /* __ASM_SH_FLAT_H */ 24#endif /* __ASM_SH_FLAT_H */
diff --git a/include/asm-sh/fpu.h b/include/asm-sh/fpu.h
new file mode 100644
index 000000000000..f8429880a270
--- /dev/null
+++ b/include/asm-sh/fpu.h
@@ -0,0 +1,46 @@
1#ifndef __ASM_SH_FPU_H
2#define __ASM_SH_FPU_H
3
4#define SR_FD 0x00008000
5
6#ifndef __ASSEMBLY__
7#include <asm/ptrace.h>
8
9#ifdef CONFIG_SH_FPU
10static inline void release_fpu(struct pt_regs *regs)
11{
12 regs->sr |= SR_FD;
13}
14
15static inline void grab_fpu(struct pt_regs *regs)
16{
17 regs->sr &= ~SR_FD;
18}
19
20struct task_struct;
21
22extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs);
23#else
24#define release_fpu(regs) do { } while (0)
25#define grab_fpu(regs) do { } while (0)
26#define save_fpu(tsk, regs) do { } while (0)
27#endif
28
29extern int do_fpu_inst(unsigned short, struct pt_regs *);
30
31#define unlazy_fpu(tsk, regs) do { \
32 if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \
33 save_fpu(tsk, regs); \
34 } \
35} while (0)
36
37#define clear_fpu(tsk, regs) do { \
38 if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \
39 clear_tsk_thread_flag(tsk, TIF_USEDFPU); \
40 release_fpu(regs); \
41 } \
42} while (0)
43
44#endif /* __ASSEMBLY__ */
45
46#endif /* __ASM_SH_FPU_H */
diff --git a/include/asm-sh/hd64461.h b/include/asm-sh/hd64461.h
index 342ca55a266a..8c1353baf00f 100644
--- a/include/asm-sh/hd64461.h
+++ b/include/asm-sh/hd64461.h
@@ -46,10 +46,10 @@
46/* CPU Data Bus Control Register */ 46/* CPU Data Bus Control Register */
47#define HD64461_SCPUCR (CONFIG_HD64461_IOBASE + 0x04) 47#define HD64461_SCPUCR (CONFIG_HD64461_IOBASE + 0x04)
48 48
49/* Base Adress Register */ 49/* Base Address Register */
50#define HD64461_LCDCBAR (CONFIG_HD64461_IOBASE + 0x1000) 50#define HD64461_LCDCBAR (CONFIG_HD64461_IOBASE + 0x1000)
51 51
52/* Line increment adress */ 52/* Line increment address */
53#define HD64461_LCDCLOR (CONFIG_HD64461_IOBASE + 0x1002) 53#define HD64461_LCDCLOR (CONFIG_HD64461_IOBASE + 0x1002)
54 54
55/* Controls LCD controller */ 55/* Controls LCD controller */
@@ -80,9 +80,9 @@
80#define HD64461_LDR3 (CONFIG_HD64461_IOBASE + 0x101e) 80#define HD64461_LDR3 (CONFIG_HD64461_IOBASE + 0x101e)
81 81
82/* Palette Registers */ 82/* Palette Registers */
83#define HD64461_CPTWAR (CONFIG_HD64461_IOBASE + 0x1030) /* Color Palette Write Adress Register */ 83#define HD64461_CPTWAR (CONFIG_HD64461_IOBASE + 0x1030) /* Color Palette Write Address Register */
84#define HD64461_CPTWDR (CONFIG_HD64461_IOBASE + 0x1032) /* Color Palette Write Data Register */ 84#define HD64461_CPTWDR (CONFIG_HD64461_IOBASE + 0x1032) /* Color Palette Write Data Register */
85#define HD64461_CPTRAR (CONFIG_HD64461_IOBASE + 0x1034) /* Color Palette Read Adress Register */ 85#define HD64461_CPTRAR (CONFIG_HD64461_IOBASE + 0x1034) /* Color Palette Read Address Register */
86#define HD64461_CPTRDR (CONFIG_HD64461_IOBASE + 0x1036) /* Color Palette Read Data Register */ 86#define HD64461_CPTRDR (CONFIG_HD64461_IOBASE + 0x1036) /* Color Palette Read Data Register */
87 87
88#define HD64461_GRDOR (CONFIG_HD64461_IOBASE + 0x1040) /* Display Resolution Offset Register */ 88#define HD64461_GRDOR (CONFIG_HD64461_IOBASE + 0x1040) /* Display Resolution Offset Register */
@@ -97,8 +97,8 @@
97#define HD64461_GRCFGR_COLORDEPTH8 0x01 /* Sets Colordepth 8 for Accelerator */ 97#define HD64461_GRCFGR_COLORDEPTH8 0x01 /* Sets Colordepth 8 for Accelerator */
98 98
99/* Line Drawing Registers */ 99/* Line Drawing Registers */
100#define HD64461_LNSARH (CONFIG_HD64461_IOBASE + 0x1046) /* Line Start Adress Register (H) */ 100#define HD64461_LNSARH (CONFIG_HD64461_IOBASE + 0x1046) /* Line Start Address Register (H) */
101#define HD64461_LNSARL (CONFIG_HD64461_IOBASE + 0x1048) /* Line Start Adress Register (L) */ 101#define HD64461_LNSARL (CONFIG_HD64461_IOBASE + 0x1048) /* Line Start Address Register (L) */
102#define HD64461_LNAXLR (CONFIG_HD64461_IOBASE + 0x104a) /* Axis Pixel Length Register */ 102#define HD64461_LNAXLR (CONFIG_HD64461_IOBASE + 0x104a) /* Axis Pixel Length Register */
103#define HD64461_LNDGR (CONFIG_HD64461_IOBASE + 0x104c) /* Diagonal Register */ 103#define HD64461_LNDGR (CONFIG_HD64461_IOBASE + 0x104c) /* Diagonal Register */
104#define HD64461_LNAXR (CONFIG_HD64461_IOBASE + 0x104e) /* Axial Register */ 104#define HD64461_LNAXR (CONFIG_HD64461_IOBASE + 0x104e) /* Axial Register */
@@ -106,16 +106,16 @@
106#define HD64461_LNMDR (CONFIG_HD64461_IOBASE + 0x1052) /* Line Mode Register */ 106#define HD64461_LNMDR (CONFIG_HD64461_IOBASE + 0x1052) /* Line Mode Register */
107 107
108/* BitBLT Registers */ 108/* BitBLT Registers */
109#define HD64461_BBTSSARH (CONFIG_HD64461_IOBASE + 0x1054) /* Source Start Adress Register (H) */ 109#define HD64461_BBTSSARH (CONFIG_HD64461_IOBASE + 0x1054) /* Source Start Address Register (H) */
110#define HD64461_BBTSSARL (CONFIG_HD64461_IOBASE + 0x1056) /* Source Start Adress Register (L) */ 110#define HD64461_BBTSSARL (CONFIG_HD64461_IOBASE + 0x1056) /* Source Start Address Register (L) */
111#define HD64461_BBTDSARH (CONFIG_HD64461_IOBASE + 0x1058) /* Destination Start Adress Register (H) */ 111#define HD64461_BBTDSARH (CONFIG_HD64461_IOBASE + 0x1058) /* Destination Start Address Register (H) */
112#define HD64461_BBTDSARL (CONFIG_HD64461_IOBASE + 0x105a) /* Destination Start Adress Register (L) */ 112#define HD64461_BBTDSARL (CONFIG_HD64461_IOBASE + 0x105a) /* Destination Start Address Register (L) */
113#define HD64461_BBTDWR (CONFIG_HD64461_IOBASE + 0x105c) /* Destination Block Width Register */ 113#define HD64461_BBTDWR (CONFIG_HD64461_IOBASE + 0x105c) /* Destination Block Width Register */
114#define HD64461_BBTDHR (CONFIG_HD64461_IOBASE + 0x105e) /* Destination Block Height Register */ 114#define HD64461_BBTDHR (CONFIG_HD64461_IOBASE + 0x105e) /* Destination Block Height Register */
115#define HD64461_BBTPARH (CONFIG_HD64461_IOBASE + 0x1060) /* Pattern Start Adress Register (H) */ 115#define HD64461_BBTPARH (CONFIG_HD64461_IOBASE + 0x1060) /* Pattern Start Address Register (H) */
116#define HD64461_BBTPARL (CONFIG_HD64461_IOBASE + 0x1062) /* Pattern Start Adress Register (L) */ 116#define HD64461_BBTPARL (CONFIG_HD64461_IOBASE + 0x1062) /* Pattern Start Address Register (L) */
117#define HD64461_BBTMARH (CONFIG_HD64461_IOBASE + 0x1064) /* Mask Start Adress Register (H) */ 117#define HD64461_BBTMARH (CONFIG_HD64461_IOBASE + 0x1064) /* Mask Start Address Register (H) */
118#define HD64461_BBTMARL (CONFIG_HD64461_IOBASE + 0x1066) /* Mask Start Adress Register (L) */ 118#define HD64461_BBTMARL (CONFIG_HD64461_IOBASE + 0x1066) /* Mask Start Address Register (L) */
119#define HD64461_BBTROPR (CONFIG_HD64461_IOBASE + 0x1068) /* ROP Register */ 119#define HD64461_BBTROPR (CONFIG_HD64461_IOBASE + 0x1068) /* ROP Register */
120#define HD64461_BBTMDR (CONFIG_HD64461_IOBASE + 0x106a) /* BitBLT Mode Register */ 120#define HD64461_BBTMDR (CONFIG_HD64461_IOBASE + 0x106a) /* BitBLT Mode Register */
121 121
diff --git a/include/asm-sh/hs7751rvoip.h b/include/asm-sh/hs7751rvoip.h
deleted file mode 100644
index c4cff9d33927..000000000000
--- a/include/asm-sh/hs7751rvoip.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef __ASM_SH_RENESAS_HS7751RVOIP_H
2#define __ASM_SH_RENESAS_HS7751RVOIP_H
3
4/*
5 * linux/include/asm-sh/hs7751rvoip/hs7751rvoip.h
6 *
7 * Copyright (C) 2000 Atom Create Engineering Co., Ltd.
8 *
9 * Renesas Technology Sales HS7751RVoIP support
10 */
11
12/* Box specific addresses. */
13
14#define PA_BCR 0xa4000000 /* FPGA */
15#define PA_SLICCNTR1 0xa4000006 /* SLIC PIO Control 1 */
16#define PA_SLICCNTR2 0xa4000008 /* SLIC PIO Control 2 */
17#define PA_DMACNTR 0xa400000a /* USB DMA Control */
18#define PA_INPORTR 0xa400000c /* Input Port Register */
19#define PA_OUTPORTR 0xa400000e /* Output Port Reguster */
20#define PA_VERREG 0xa4000014 /* FPGA Version Register */
21
22#define PA_IDE_OFFSET 0x1f0 /* CF IDE Offset */
23
24#define IRLCNTR1 (PA_BCR + 0) /* Interrupt Control Register1 */
25#define IRLCNTR2 (PA_BCR + 2) /* Interrupt Control Register2 */
26#define IRLCNTR3 (PA_BCR + 4) /* Interrupt Control Register3 */
27#define IRLCNTR4 (PA_BCR + 16) /* Interrupt Control Register4 */
28#define IRLCNTR5 (PA_BCR + 18) /* Interrupt Control Register5 */
29
30#define IRQ_PCIETH 6 /* PCI Ethernet IRQ */
31#define IRQ_PCIHUB 7 /* PCI Ethernet Hub IRQ */
32#define IRQ_USBCOM 8 /* USB Comunication IRQ */
33#define IRQ_USBCON 9 /* USB Connect IRQ */
34#define IRQ_USBDMA 10 /* USB DMA IRQ */
35#define IRQ_CFCARD 11 /* CF Card IRQ */
36#define IRQ_PCMCIA 12 /* PCMCIA IRQ */
37#define IRQ_PCISLOT 13 /* PCI Slot #1 IRQ */
38#define IRQ_ONHOOK1 0 /* ON HOOK1 IRQ */
39#define IRQ_OFFHOOK1 1 /* OFF HOOK1 IRQ */
40#define IRQ_ONHOOK2 2 /* ON HOOK2 IRQ */
41#define IRQ_OFFHOOK2 3 /* OFF HOOK2 IRQ */
42#define IRQ_RINGING 4 /* Ringing IRQ */
43#define IRQ_CODEC 5 /* CODEC IRQ */
44
45#define __IO_PREFIX hs7751rvoip
46#include <asm/io_generic.h>
47
48/* arch/sh/boards/renesas/hs7751rvoip/irq.c */
49void init_hs7751rvoip_IRQ(void);
50
51/* arch/sh/boards/renesas/hs7751rvoip/io.c */
52void *hs7751rvoip_ioremap(unsigned long, unsigned long);
53
54#endif /* __ASM_SH_RENESAS_HS7751RVOIP */
diff --git a/include/asm-sh/hw_irq.h b/include/asm-sh/hw_irq.h
index cb0b6c9f7020..c958fdaa0095 100644
--- a/include/asm-sh/hw_irq.h
+++ b/include/asm-sh/hw_irq.h
@@ -33,13 +33,6 @@ struct intc_vect {
33#define INTC_VECT(enum_id, vect) { enum_id, vect } 33#define INTC_VECT(enum_id, vect) { enum_id, vect }
34#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq)) 34#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq))
35 35
36struct intc_prio {
37 intc_enum enum_id;
38 unsigned char priority;
39};
40
41#define INTC_PRIO(enum_id, prio) { enum_id, prio }
42
43struct intc_group { 36struct intc_group {
44 intc_enum enum_id; 37 intc_enum enum_id;
45 intc_enum enum_ids[32]; 38 intc_enum enum_ids[32];
@@ -79,8 +72,6 @@ struct intc_desc {
79 unsigned int nr_vectors; 72 unsigned int nr_vectors;
80 struct intc_group *groups; 73 struct intc_group *groups;
81 unsigned int nr_groups; 74 unsigned int nr_groups;
82 struct intc_prio *priorities;
83 unsigned int nr_priorities;
84 struct intc_mask_reg *mask_regs; 75 struct intc_mask_reg *mask_regs;
85 unsigned int nr_mask_regs; 76 unsigned int nr_mask_regs;
86 struct intc_prio_reg *prio_regs; 77 struct intc_prio_reg *prio_regs;
@@ -92,10 +83,9 @@ struct intc_desc {
92 83
93#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) 84#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a)
94#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \ 85#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \
95 priorities, mask_regs, prio_regs, sense_regs) \ 86 mask_regs, prio_regs, sense_regs) \
96struct intc_desc symbol __initdata = { \ 87struct intc_desc symbol __initdata = { \
97 _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ 88 _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \
98 _INTC_ARRAY(priorities), \
99 _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ 89 _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \
100 _INTC_ARRAY(sense_regs), \ 90 _INTC_ARRAY(sense_regs), \
101 chipname, \ 91 chipname, \
diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h
index 6ed34d8eac5f..94900c089519 100644
--- a/include/asm-sh/io.h
+++ b/include/asm-sh/io.h
@@ -191,6 +191,8 @@ __BUILD_MEMORY_STRING(w, u16)
191 191
192#define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */ 192#define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */
193 193
194#define IO_SPACE_LIMIT 0xffffffff
195
194/* 196/*
195 * This function provides a method for the generic case where a board-specific 197 * This function provides a method for the generic case where a board-specific
196 * ioport_map simply needs to return the port + some arbitrary port base. 198 * ioport_map simply needs to return the port + some arbitrary port base.
@@ -226,6 +228,11 @@ static inline unsigned int ctrl_inl(unsigned long addr)
226 return *(volatile unsigned long*)addr; 228 return *(volatile unsigned long*)addr;
227} 229}
228 230
231static inline unsigned long long ctrl_inq(unsigned long addr)
232{
233 return *(volatile unsigned long long*)addr;
234}
235
229static inline void ctrl_outb(unsigned char b, unsigned long addr) 236static inline void ctrl_outb(unsigned char b, unsigned long addr)
230{ 237{
231 *(volatile unsigned char*)addr = b; 238 *(volatile unsigned char*)addr = b;
@@ -241,49 +248,52 @@ static inline void ctrl_outl(unsigned int b, unsigned long addr)
241 *(volatile unsigned long*)addr = b; 248 *(volatile unsigned long*)addr = b;
242} 249}
243 250
251static inline void ctrl_outq(unsigned long long b, unsigned long addr)
252{
253 *(volatile unsigned long long*)addr = b;
254}
255
244static inline void ctrl_delay(void) 256static inline void ctrl_delay(void)
245{ 257{
258#ifdef P2SEG
246 ctrl_inw(P2SEG); 259 ctrl_inw(P2SEG);
260#endif
247} 261}
248 262
249#define IO_SPACE_LIMIT 0xffffffff 263/* Quad-word real-mode I/O, don't ask.. */
264unsigned long long peek_real_address_q(unsigned long long addr);
265unsigned long long poke_real_address_q(unsigned long long addr,
266 unsigned long long val);
250 267
251#ifdef CONFIG_MMU 268/* arch/sh/mm/ioremap_64.c */
252/* 269unsigned long onchip_remap(unsigned long addr, unsigned long size,
253 * Change virtual addresses to physical addresses and vv. 270 const char *name);
254 * These are trivial on the 1:1 Linux/SuperH mapping 271extern void onchip_unmap(unsigned long vaddr);
255 */
256static inline unsigned long virt_to_phys(volatile void *address)
257{
258 return PHYSADDR(address);
259}
260 272
261static inline void *phys_to_virt(unsigned long address) 273#if !defined(CONFIG_MMU)
262{
263 return (void *)P1SEGADDR(address);
264}
265#else
266#define phys_to_virt(address) ((void *)(address))
267#define virt_to_phys(address) ((unsigned long)(address)) 274#define virt_to_phys(address) ((unsigned long)(address))
275#define phys_to_virt(address) ((void *)(address))
276#else
277#define virt_to_phys(address) (__pa(address))
278#define phys_to_virt(address) (__va(address))
268#endif 279#endif
269 280
270/* 281/*
271 * readX/writeX() are used to access memory mapped devices. On some 282 * On 32-bit SH, we traditionally have the whole physical address space
272 * architectures the memory mapped IO stuff needs to be accessed 283 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
273 * differently. On the x86 architecture, we just read/write the 284 * not need to do anything but place the address in the proper segment.
274 * memory location directly. 285 * This is true for P1 and P2 addresses, as well as some P3 ones.
286 * However, most of the P3 addresses and newer cores using extended
287 * addressing need to map through page tables, so the ioremap()
288 * implementation becomes a bit more complicated.
275 * 289 *
276 * On SH, we traditionally have the whole physical address space mapped 290 * See arch/sh/mm/ioremap.c for additional notes on this.
277 * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not
278 * need to do anything but place the address in the proper segment. This
279 * is true for P1 and P2 addresses, as well as some P3 ones. However,
280 * most of the P3 addresses and newer cores using extended addressing
281 * need to map through page tables, so the ioremap() implementation
282 * becomes a bit more complicated. See arch/sh/mm/ioremap.c for
283 * additional notes on this.
284 * 291 *
285 * We cheat a bit and always return uncachable areas until we've fixed 292 * We cheat a bit and always return uncachable areas until we've fixed
286 * the drivers to handle caching properly. 293 * the drivers to handle caching properly.
294 *
295 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
296 * doesn't exist, so everything must go through page tables.
287 */ 297 */
288#ifdef CONFIG_MMU 298#ifdef CONFIG_MMU
289void __iomem *__ioremap(unsigned long offset, unsigned long size, 299void __iomem *__ioremap(unsigned long offset, unsigned long size,
@@ -297,6 +307,7 @@ void __iounmap(void __iomem *addr);
297static inline void __iomem * 307static inline void __iomem *
298__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 308__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
299{ 309{
310#ifdef CONFIG_SUPERH32
300 unsigned long last_addr = offset + size - 1; 311 unsigned long last_addr = offset + size - 1;
301 312
302 /* 313 /*
@@ -311,6 +322,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
311 322
312 return (void __iomem *)P2SEGADDR(offset); 323 return (void __iomem *)P2SEGADDR(offset);
313 } 324 }
325#endif
314 326
315 return __ioremap(offset, size, flags); 327 return __ioremap(offset, size, flags);
316} 328}
diff --git a/include/asm-sh/irqflags.h b/include/asm-sh/irqflags.h
index 9dedc1b693e3..46e71da5be6b 100644
--- a/include/asm-sh/irqflags.h
+++ b/include/asm-sh/irqflags.h
@@ -1,81 +1,11 @@
1#ifndef __ASM_SH_IRQFLAGS_H 1#ifndef __ASM_SH_IRQFLAGS_H
2#define __ASM_SH_IRQFLAGS_H 2#define __ASM_SH_IRQFLAGS_H
3 3
4static inline void raw_local_irq_enable(void) 4#ifdef CONFIG_SUPERH32
5{ 5#include "irqflags_32.h"
6 unsigned long __dummy0, __dummy1; 6#else
7 7#include "irqflags_64.h"
8 __asm__ __volatile__ (
9 "stc sr, %0\n\t"
10 "and %1, %0\n\t"
11#ifdef CONFIG_CPU_HAS_SR_RB
12 "stc r6_bank, %1\n\t"
13 "or %1, %0\n\t"
14#endif 8#endif
15 "ldc %0, sr\n\t"
16 : "=&r" (__dummy0), "=r" (__dummy1)
17 : "1" (~0x000000f0)
18 : "memory"
19 );
20}
21
22static inline void raw_local_irq_disable(void)
23{
24 unsigned long flags;
25
26 __asm__ __volatile__ (
27 "stc sr, %0\n\t"
28 "or #0xf0, %0\n\t"
29 "ldc %0, sr\n\t"
30 : "=&z" (flags)
31 : /* no inputs */
32 : "memory"
33 );
34}
35
36static inline void set_bl_bit(void)
37{
38 unsigned long __dummy0, __dummy1;
39
40 __asm__ __volatile__ (
41 "stc sr, %0\n\t"
42 "or %2, %0\n\t"
43 "and %3, %0\n\t"
44 "ldc %0, sr\n\t"
45 : "=&r" (__dummy0), "=r" (__dummy1)
46 : "r" (0x10000000), "r" (0xffffff0f)
47 : "memory"
48 );
49}
50
51static inline void clear_bl_bit(void)
52{
53 unsigned long __dummy0, __dummy1;
54
55 __asm__ __volatile__ (
56 "stc sr, %0\n\t"
57 "and %2, %0\n\t"
58 "ldc %0, sr\n\t"
59 : "=&r" (__dummy0), "=r" (__dummy1)
60 : "1" (~0x10000000)
61 : "memory"
62 );
63}
64
65static inline unsigned long __raw_local_save_flags(void)
66{
67 unsigned long flags;
68
69 __asm__ __volatile__ (
70 "stc sr, %0\n\t"
71 "and #0xf0, %0\n\t"
72 : "=&z" (flags)
73 : /* no inputs */
74 : "memory"
75 );
76
77 return flags;
78}
79 9
80#define raw_local_save_flags(flags) \ 10#define raw_local_save_flags(flags) \
81 do { (flags) = __raw_local_save_flags(); } while (0) 11 do { (flags) = __raw_local_save_flags(); } while (0)
@@ -92,25 +22,6 @@ static inline int raw_irqs_disabled(void)
92 return raw_irqs_disabled_flags(flags); 22 return raw_irqs_disabled_flags(flags);
93} 23}
94 24
95static inline unsigned long __raw_local_irq_save(void)
96{
97 unsigned long flags, __dummy;
98
99 __asm__ __volatile__ (
100 "stc sr, %1\n\t"
101 "mov %1, %0\n\t"
102 "or #0xf0, %0\n\t"
103 "ldc %0, sr\n\t"
104 "mov %1, %0\n\t"
105 "and #0xf0, %0\n\t"
106 : "=&z" (flags), "=&r" (__dummy)
107 : /* no inputs */
108 : "memory"
109 );
110
111 return flags;
112}
113
114#define raw_local_irq_save(flags) \ 25#define raw_local_irq_save(flags) \
115 do { (flags) = __raw_local_irq_save(); } while (0) 26 do { (flags) = __raw_local_irq_save(); } while (0)
116 27
diff --git a/include/asm-sh/irqflags_32.h b/include/asm-sh/irqflags_32.h
new file mode 100644
index 000000000000..60218f541340
--- /dev/null
+++ b/include/asm-sh/irqflags_32.h
@@ -0,0 +1,99 @@
1#ifndef __ASM_SH_IRQFLAGS_32_H
2#define __ASM_SH_IRQFLAGS_32_H
3
4static inline void raw_local_irq_enable(void)
5{
6 unsigned long __dummy0, __dummy1;
7
8 __asm__ __volatile__ (
9 "stc sr, %0\n\t"
10 "and %1, %0\n\t"
11#ifdef CONFIG_CPU_HAS_SR_RB
12 "stc r6_bank, %1\n\t"
13 "or %1, %0\n\t"
14#endif
15 "ldc %0, sr\n\t"
16 : "=&r" (__dummy0), "=r" (__dummy1)
17 : "1" (~0x000000f0)
18 : "memory"
19 );
20}
21
22static inline void raw_local_irq_disable(void)
23{
24 unsigned long flags;
25
26 __asm__ __volatile__ (
27 "stc sr, %0\n\t"
28 "or #0xf0, %0\n\t"
29 "ldc %0, sr\n\t"
30 : "=&z" (flags)
31 : /* no inputs */
32 : "memory"
33 );
34}
35
36static inline void set_bl_bit(void)
37{
38 unsigned long __dummy0, __dummy1;
39
40 __asm__ __volatile__ (
41 "stc sr, %0\n\t"
42 "or %2, %0\n\t"
43 "and %3, %0\n\t"
44 "ldc %0, sr\n\t"
45 : "=&r" (__dummy0), "=r" (__dummy1)
46 : "r" (0x10000000), "r" (0xffffff0f)
47 : "memory"
48 );
49}
50
51static inline void clear_bl_bit(void)
52{
53 unsigned long __dummy0, __dummy1;
54
55 __asm__ __volatile__ (
56 "stc sr, %0\n\t"
57 "and %2, %0\n\t"
58 "ldc %0, sr\n\t"
59 : "=&r" (__dummy0), "=r" (__dummy1)
60 : "1" (~0x10000000)
61 : "memory"
62 );
63}
64
65static inline unsigned long __raw_local_save_flags(void)
66{
67 unsigned long flags;
68
69 __asm__ __volatile__ (
70 "stc sr, %0\n\t"
71 "and #0xf0, %0\n\t"
72 : "=&z" (flags)
73 : /* no inputs */
74 : "memory"
75 );
76
77 return flags;
78}
79
80static inline unsigned long __raw_local_irq_save(void)
81{
82 unsigned long flags, __dummy;
83
84 __asm__ __volatile__ (
85 "stc sr, %1\n\t"
86 "mov %1, %0\n\t"
87 "or #0xf0, %0\n\t"
88 "ldc %0, sr\n\t"
89 "mov %1, %0\n\t"
90 "and #0xf0, %0\n\t"
91 : "=&z" (flags), "=&r" (__dummy)
92 : /* no inputs */
93 : "memory"
94 );
95
96 return flags;
97}
98
99#endif /* __ASM_SH_IRQFLAGS_32_H */
diff --git a/include/asm-sh/irqflags_64.h b/include/asm-sh/irqflags_64.h
new file mode 100644
index 000000000000..4f6b8a56e7bd
--- /dev/null
+++ b/include/asm-sh/irqflags_64.h
@@ -0,0 +1,85 @@
1#ifndef __ASM_SH_IRQFLAGS_64_H
2#define __ASM_SH_IRQFLAGS_64_H
3
4#include <asm/cpu/registers.h>
5
6#define SR_MASK_LL 0x00000000000000f0LL
7#define SR_BL_LL 0x0000000010000000LL
8
9static inline void raw_local_irq_enable(void)
10{
11 unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL;
12
13 __asm__ __volatile__("getcon " __SR ", %0\n\t"
14 "and %0, %1, %0\n\t"
15 "putcon %0, " __SR "\n\t"
16 : "=&r" (__dummy0)
17 : "r" (__dummy1));
18}
19
20static inline void raw_local_irq_disable(void)
21{
22 unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
23
24 __asm__ __volatile__("getcon " __SR ", %0\n\t"
25 "or %0, %1, %0\n\t"
26 "putcon %0, " __SR "\n\t"
27 : "=&r" (__dummy0)
28 : "r" (__dummy1));
29}
30
31static inline void set_bl_bit(void)
32{
33 unsigned long long __dummy0, __dummy1 = SR_BL_LL;
34
35 __asm__ __volatile__("getcon " __SR ", %0\n\t"
36 "or %0, %1, %0\n\t"
37 "putcon %0, " __SR "\n\t"
38 : "=&r" (__dummy0)
39 : "r" (__dummy1));
40
41}
42
43static inline void clear_bl_bit(void)
44{
45 unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
46
47 __asm__ __volatile__("getcon " __SR ", %0\n\t"
48 "and %0, %1, %0\n\t"
49 "putcon %0, " __SR "\n\t"
50 : "=&r" (__dummy0)
51 : "r" (__dummy1));
52}
53
54static inline unsigned long __raw_local_save_flags(void)
55{
56 unsigned long long __dummy = SR_MASK_LL;
57 unsigned long flags;
58
59 __asm__ __volatile__ (
60 "getcon " __SR ", %0\n\t"
61 "and %0, %1, %0"
62 : "=&r" (flags)
63 : "r" (__dummy));
64
65 return flags;
66}
67
68static inline unsigned long __raw_local_irq_save(void)
69{
70 unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
71 unsigned long flags;
72
73 __asm__ __volatile__ (
74 "getcon " __SR ", %1\n\t"
75 "or %1, r63, %0\n\t"
76 "or %1, %2, %1\n\t"
77 "putcon %1, " __SR "\n\t"
78 "and %0, %2, %0"
79 : "=&r" (flags), "=&r" (__dummy0)
80 : "r" (__dummy1));
81
82 return flags;
83}
84
85#endif /* __ASM_SH_IRQFLAGS_64_H */
diff --git a/include/asm-sh/machvec.h b/include/asm-sh/machvec.h
index 088698bacf2f..ddb18ad23303 100644
--- a/include/asm-sh/machvec.h
+++ b/include/asm-sh/machvec.h
@@ -56,9 +56,6 @@ struct sh_machine_vector {
56 56
57 void (*mv_heartbeat)(void); 57 void (*mv_heartbeat)(void);
58 58
59 void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, gfp_t);
60 int (*mv_consistent_free)(struct device *, size_t, void *, dma_addr_t);
61
62 void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); 59 void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);
63 void (*mv_ioport_unmap)(void __iomem *); 60 void (*mv_ioport_unmap)(void __iomem *);
64}; 61};
diff --git a/include/asm-sh/microdev.h b/include/asm-sh/microdev.h
index 018332a9e590..1aed15856e11 100644
--- a/include/asm-sh/microdev.h
+++ b/include/asm-sh/microdev.h
@@ -17,7 +17,7 @@ extern void microdev_print_fpga_intc_status(void);
17/* 17/*
18 * The following are useful macros for manipulating the interrupt 18 * The following are useful macros for manipulating the interrupt
19 * controller (INTC) on the CPU-board FPGA. should be noted that there 19 * controller (INTC) on the CPU-board FPGA. should be noted that there
20 * is an INTC on the FPGA, and a seperate INTC on the SH4-202 core - 20 * is an INTC on the FPGA, and a separate INTC on the SH4-202 core -
21 * these are two different things, both of which need to be prorammed to 21 * these are two different things, both of which need to be prorammed to
22 * correctly route - unfortunately, they have the same name and 22 * correctly route - unfortunately, they have the same name and
23 * abbreviations! 23 * abbreviations!
@@ -25,7 +25,7 @@ extern void microdev_print_fpga_intc_status(void);
25#define MICRODEV_FPGA_INTC_BASE 0xa6110000ul /* INTC base address on CPU-board FPGA */ 25#define MICRODEV_FPGA_INTC_BASE 0xa6110000ul /* INTC base address on CPU-board FPGA */
26#define MICRODEV_FPGA_INTENB_REG (MICRODEV_FPGA_INTC_BASE+0ul) /* Interrupt Enable Register on INTC on CPU-board FPGA */ 26#define MICRODEV_FPGA_INTENB_REG (MICRODEV_FPGA_INTC_BASE+0ul) /* Interrupt Enable Register on INTC on CPU-board FPGA */
27#define MICRODEV_FPGA_INTDSB_REG (MICRODEV_FPGA_INTC_BASE+8ul) /* Interrupt Disable Register on INTC on CPU-board FPGA */ 27#define MICRODEV_FPGA_INTDSB_REG (MICRODEV_FPGA_INTC_BASE+8ul) /* Interrupt Disable Register on INTC on CPU-board FPGA */
28#define MICRODEV_FPGA_INTC_MASK(n) (1ul<<(n)) /* Interupt mask to enable/disable INTC in CPU-board FPGA */ 28#define MICRODEV_FPGA_INTC_MASK(n) (1ul<<(n)) /* Interrupt mask to enable/disable INTC in CPU-board FPGA */
29#define MICRODEV_FPGA_INTPRI_REG(n) (MICRODEV_FPGA_INTC_BASE+0x10+((n)/8)*8)/* Interrupt Priority Register on INTC on CPU-board FPGA */ 29#define MICRODEV_FPGA_INTPRI_REG(n) (MICRODEV_FPGA_INTC_BASE+0x10+((n)/8)*8)/* Interrupt Priority Register on INTC on CPU-board FPGA */
30#define MICRODEV_FPGA_INTPRI_LEVEL(n,x) ((x)<<(((n)%8)*4)) /* MICRODEV_FPGA_INTPRI_LEVEL(int_number, int_level) */ 30#define MICRODEV_FPGA_INTPRI_LEVEL(n,x) ((x)<<(((n)%8)*4)) /* MICRODEV_FPGA_INTPRI_LEVEL(int_number, int_level) */
31#define MICRODEV_FPGA_INTPRI_MASK(n) (MICRODEV_FPGA_INTPRI_LEVEL((n),0xful)) /* Interrupt Priority Mask on INTC on CPU-board FPGA */ 31#define MICRODEV_FPGA_INTPRI_MASK(n) (MICRODEV_FPGA_INTPRI_LEVEL((n),0xful)) /* Interrupt Priority Mask on INTC on CPU-board FPGA */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 199662bb35c6..fe58d00b250c 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * Copyright (C) 1999 Niibe Yutaka 2 * Copyright (C) 1999 Niibe Yutaka
3 * Copyright (C) 2003 - 2006 Paul Mundt 3 * Copyright (C) 2003 - 2007 Paul Mundt
4 * 4 *
5 * ASID handling idea taken from MIPS implementation. 5 * ASID handling idea taken from MIPS implementation.
6 */ 6 */
7#ifndef __ASM_SH_MMU_CONTEXT_H 7#ifndef __ASM_SH_MMU_CONTEXT_H
8#define __ASM_SH_MMU_CONTEXT_H 8#define __ASM_SH_MMU_CONTEXT_H
9#ifdef __KERNEL__
10 9
10#ifdef __KERNEL__
11#include <asm/cpu/mmu_context.h> 11#include <asm/cpu/mmu_context.h>
12#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
@@ -19,7 +19,6 @@
19 * (a) TLB cache version (or round, cycle whatever expression you like) 19 * (a) TLB cache version (or round, cycle whatever expression you like)
20 * (b) ASID (Address Space IDentifier) 20 * (b) ASID (Address Space IDentifier)
21 */ 21 */
22
23#define MMU_CONTEXT_ASID_MASK 0x000000ff 22#define MMU_CONTEXT_ASID_MASK 0x000000ff
24#define MMU_CONTEXT_VERSION_MASK 0xffffff00 23#define MMU_CONTEXT_VERSION_MASK 0xffffff00
25#define MMU_CONTEXT_FIRST_VERSION 0x00000100 24#define MMU_CONTEXT_FIRST_VERSION 0x00000100
@@ -28,10 +27,11 @@
28/* ASID is 8-bit value, so it can't be 0x100 */ 27/* ASID is 8-bit value, so it can't be 0x100 */
29#define MMU_NO_ASID 0x100 28#define MMU_NO_ASID 0x100
30 29
31#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
32#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \
33 MMU_CONTEXT_ASID_MASK)
34#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 30#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
31#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
32
33#define cpu_asid(cpu, mm) \
34 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
35 35
36/* 36/*
37 * Virtual Page Number mask 37 * Virtual Page Number mask
@@ -39,6 +39,12 @@
39#define MMU_VPN_MASK 0xfffff000 39#define MMU_VPN_MASK 0xfffff000
40 40
41#ifdef CONFIG_MMU 41#ifdef CONFIG_MMU
42#if defined(CONFIG_SUPERH32)
43#include "mmu_context_32.h"
44#else
45#include "mmu_context_64.h"
46#endif
47
42/* 48/*
43 * Get MMU context if needed. 49 * Get MMU context if needed.
44 */ 50 */
@@ -59,6 +65,14 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
59 */ 65 */
60 flush_tlb_all(); 66 flush_tlb_all();
61 67
68#ifdef CONFIG_SUPERH64
69 /*
70 * The SH-5 cache uses the ASIDs, requiring both the I and D
71 * cache to be flushed when the ASID is exhausted. Weak.
72 */
73 flush_cache_all();
74#endif
75
62 /* 76 /*
63 * Fix version; Note that we avoid version #0 77 * Fix version; Note that we avoid version #0
64 * to distingush NO_CONTEXT. 78 * to distingush NO_CONTEXT.
@@ -86,39 +100,6 @@ static inline int init_new_context(struct task_struct *tsk,
86} 100}
87 101
88/* 102/*
89 * Destroy context related info for an mm_struct that is about
90 * to be put to rest.
91 */
92static inline void destroy_context(struct mm_struct *mm)
93{
94 /* Do nothing */
95}
96
97static inline void set_asid(unsigned long asid)
98{
99 unsigned long __dummy;
100
101 __asm__ __volatile__ ("mov.l %2, %0\n\t"
102 "and %3, %0\n\t"
103 "or %1, %0\n\t"
104 "mov.l %0, %2"
105 : "=&r" (__dummy)
106 : "r" (asid), "m" (__m(MMU_PTEH)),
107 "r" (0xffffff00));
108}
109
110static inline unsigned long get_asid(void)
111{
112 unsigned long asid;
113
114 __asm__ __volatile__ ("mov.l %1, %0"
115 : "=r" (asid)
116 : "m" (__m(MMU_PTEH)));
117 asid &= MMU_CONTEXT_ASID_MASK;
118 return asid;
119}
120
121/*
122 * After we have set current->mm to a new value, this activates 103 * After we have set current->mm to a new value, this activates
123 * the context for the new mm so we see the new mappings. 104 * the context for the new mm so we see the new mappings.
124 */ 105 */
@@ -128,17 +109,6 @@ static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
128 set_asid(cpu_asid(cpu, mm)); 109 set_asid(cpu_asid(cpu, mm));
129} 110}
130 111
131/* MMU_TTB is used for optimizing the fault handling. */
132static inline void set_TTB(pgd_t *pgd)
133{
134 ctrl_outl((unsigned long)pgd, MMU_TTB);
135}
136
137static inline pgd_t *get_TTB(void)
138{
139 return (pgd_t *)ctrl_inl(MMU_TTB);
140}
141
142static inline void switch_mm(struct mm_struct *prev, 112static inline void switch_mm(struct mm_struct *prev,
143 struct mm_struct *next, 113 struct mm_struct *next,
144 struct task_struct *tsk) 114 struct task_struct *tsk)
@@ -153,17 +123,7 @@ static inline void switch_mm(struct mm_struct *prev,
153 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) 123 if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
154 activate_context(next, cpu); 124 activate_context(next, cpu);
155} 125}
156 126#else
157#define deactivate_mm(tsk,mm) do { } while (0)
158
159#define activate_mm(prev, next) \
160 switch_mm((prev),(next),NULL)
161
162static inline void
163enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
164{
165}
166#else /* !CONFIG_MMU */
167#define get_mmu_context(mm) do { } while (0) 127#define get_mmu_context(mm) do { } while (0)
168#define init_new_context(tsk,mm) (0) 128#define init_new_context(tsk,mm) (0)
169#define destroy_context(mm) do { } while (0) 129#define destroy_context(mm) do { } while (0)
@@ -173,10 +133,11 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
173#define get_TTB() (0) 133#define get_TTB() (0)
174#define activate_context(mm,cpu) do { } while (0) 134#define activate_context(mm,cpu) do { } while (0)
175#define switch_mm(prev,next,tsk) do { } while (0) 135#define switch_mm(prev,next,tsk) do { } while (0)
136#endif /* CONFIG_MMU */
137
138#define activate_mm(prev, next) switch_mm((prev),(next),NULL)
176#define deactivate_mm(tsk,mm) do { } while (0) 139#define deactivate_mm(tsk,mm) do { } while (0)
177#define activate_mm(prev,next) do { } while (0)
178#define enter_lazy_tlb(mm,tsk) do { } while (0) 140#define enter_lazy_tlb(mm,tsk) do { } while (0)
179#endif /* CONFIG_MMU */
180 141
181#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) 142#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
182/* 143/*
diff --git a/include/asm-sh/mmu_context_32.h b/include/asm-sh/mmu_context_32.h
new file mode 100644
index 000000000000..f4f9aebd68b7
--- /dev/null
+++ b/include/asm-sh/mmu_context_32.h
@@ -0,0 +1,47 @@
1#ifndef __ASM_SH_MMU_CONTEXT_32_H
2#define __ASM_SH_MMU_CONTEXT_32_H
3
4/*
5 * Destroy context related info for an mm_struct that is about
6 * to be put to rest.
7 */
8static inline void destroy_context(struct mm_struct *mm)
9{
10 /* Do nothing */
11}
12
13static inline void set_asid(unsigned long asid)
14{
15 unsigned long __dummy;
16
17 __asm__ __volatile__ ("mov.l %2, %0\n\t"
18 "and %3, %0\n\t"
19 "or %1, %0\n\t"
20 "mov.l %0, %2"
21 : "=&r" (__dummy)
22 : "r" (asid), "m" (__m(MMU_PTEH)),
23 "r" (0xffffff00));
24}
25
26static inline unsigned long get_asid(void)
27{
28 unsigned long asid;
29
30 __asm__ __volatile__ ("mov.l %1, %0"
31 : "=r" (asid)
32 : "m" (__m(MMU_PTEH)));
33 asid &= MMU_CONTEXT_ASID_MASK;
34 return asid;
35}
36
37/* MMU_TTB is used for optimizing the fault handling. */
38static inline void set_TTB(pgd_t *pgd)
39{
40 ctrl_outl((unsigned long)pgd, MMU_TTB);
41}
42
43static inline pgd_t *get_TTB(void)
44{
45 return (pgd_t *)ctrl_inl(MMU_TTB);
46}
47#endif /* __ASM_SH_MMU_CONTEXT_32_H */
diff --git a/include/asm-sh/mmu_context_64.h b/include/asm-sh/mmu_context_64.h
new file mode 100644
index 000000000000..020be744b088
--- /dev/null
+++ b/include/asm-sh/mmu_context_64.h
@@ -0,0 +1,75 @@
1#ifndef __ASM_SH_MMU_CONTEXT_64_H
2#define __ASM_SH_MMU_CONTEXT_64_H
3
4/*
5 * sh64-specific mmu_context interface.
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 * Copyright (C) 2003 - 2007 Paul Mundt
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <asm/cpu/registers.h>
15#include <asm/cacheflush.h>
16
17#define SR_ASID_MASK 0xffffffffff00ffffULL
18#define SR_ASID_SHIFT 16
19
20/*
21 * Destroy context related info for an mm_struct that is about
22 * to be put to rest.
23 */
24static inline void destroy_context(struct mm_struct *mm)
25{
26 /* Well, at least free TLB entries */
27 flush_tlb_mm(mm);
28}
29
30static inline unsigned long get_asid(void)
31{
32 unsigned long long sr;
33
34 asm volatile ("getcon " __SR ", %0\n\t"
35 : "=r" (sr));
36
37 sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
38 return (unsigned long) sr;
39}
40
41/* Set ASID into SR */
42static inline void set_asid(unsigned long asid)
43{
44 unsigned long long sr, pc;
45
46 asm volatile ("getcon " __SR ", %0" : "=r" (sr));
47
48 sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
49
50 /*
51 * It is possible that this function may be inlined and so to avoid
52 * the assembler reporting duplicate symbols we make use of the
53 * gas trick of generating symbols using numerics and forward
54 * reference.
55 */
56 asm volatile ("movi 1, %1\n\t"
57 "shlli %1, 28, %1\n\t"
58 "or %0, %1, %1\n\t"
59 "putcon %1, " __SR "\n\t"
60 "putcon %0, " __SSR "\n\t"
61 "movi 1f, %1\n\t"
62 "ori %1, 1 , %1\n\t"
63 "putcon %1, " __SPC "\n\t"
64 "rte\n"
65 "1:\n\t"
66 : "=r" (sr), "=r" (pc) : "0" (sr));
67}
68
69/* No spare register to twiddle, so use a software cache */
70extern pgd_t *mmu_pdtp_cache;
71
72#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
73#define get_TTB() (mmu_pdtp_cache)
74
75#endif /* __ASM_SH_MMU_CONTEXT_64_H */
diff --git a/include/asm-sh/module.h b/include/asm-sh/module.h
index 118d5a2b228f..46eccd331660 100644
--- a/include/asm-sh/module.h
+++ b/include/asm-sh/module.h
@@ -20,6 +20,8 @@ struct mod_arch_specific {
20# define MODULE_PROC_FAMILY "SH3LE " 20# define MODULE_PROC_FAMILY "SH3LE "
21# elif defined CONFIG_CPU_SH4 21# elif defined CONFIG_CPU_SH4
22# define MODULE_PROC_FAMILY "SH4LE " 22# define MODULE_PROC_FAMILY "SH4LE "
23# elif defined CONFIG_CPU_SH5
24# define MODULE_PROC_FAMILY "SH5LE "
23# else 25# else
24# error unknown processor family 26# error unknown processor family
25# endif 27# endif
@@ -30,6 +32,8 @@ struct mod_arch_specific {
30# define MODULE_PROC_FAMILY "SH3BE " 32# define MODULE_PROC_FAMILY "SH3BE "
31# elif defined CONFIG_CPU_SH4 33# elif defined CONFIG_CPU_SH4
32# define MODULE_PROC_FAMILY "SH4BE " 34# define MODULE_PROC_FAMILY "SH4BE "
35# elif defined CONFIG_CPU_SH5
36# define MODULE_PROC_FAMILY "SH5BE "
33# else 37# else
34# error unknown processor family 38# error unknown processor family
35# endif 39# endif
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index d00a8fde7c7f..002e64a4f049 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -5,13 +5,7 @@
5 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
6 */ 6 */
7 7
8/* 8#include <linux/const.h>
9 [ P0/U0 (virtual) ] 0x00000000 <------ User space
10 [ P1 (fixed) cached ] 0x80000000 <------ Kernel space
11 [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
12 [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
13 [ P4 control ] 0xE0000000
14 */
15 9
16#ifdef __KERNEL__ 10#ifdef __KERNEL__
17 11
@@ -26,15 +20,13 @@
26# error "Bogus kernel page size?" 20# error "Bogus kernel page size?"
27#endif 21#endif
28 22
29#ifdef __ASSEMBLY__ 23#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
30#define PAGE_SIZE (1 << PAGE_SHIFT)
31#else
32#define PAGE_SIZE (1UL << PAGE_SHIFT)
33#endif
34
35#define PAGE_MASK (~(PAGE_SIZE-1)) 24#define PAGE_MASK (~(PAGE_SIZE-1))
36#define PTE_MASK PAGE_MASK 25#define PTE_MASK PAGE_MASK
37 26
27/* to align the pointer to the (next) page boundary */
28#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
29
38#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 30#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
39#define HPAGE_SHIFT 16 31#define HPAGE_SHIFT 16
40#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 32#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
@@ -45,6 +37,8 @@
45#define HPAGE_SHIFT 22 37#define HPAGE_SHIFT 22
46#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 38#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
47#define HPAGE_SHIFT 26 39#define HPAGE_SHIFT 26
40#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
41#define HPAGE_SHIFT 29
48#endif 42#endif
49 43
50#ifdef CONFIG_HUGETLB_PAGE 44#ifdef CONFIG_HUGETLB_PAGE
@@ -55,20 +49,12 @@
55 49
56#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
57 51
58extern void (*clear_page)(void *to);
59extern void (*copy_page)(void *to, void *from);
60
61extern unsigned long shm_align_mask; 52extern unsigned long shm_align_mask;
62extern unsigned long max_low_pfn, min_low_pfn; 53extern unsigned long max_low_pfn, min_low_pfn;
63extern unsigned long memory_start, memory_end; 54extern unsigned long memory_start, memory_end;
64 55
65#ifdef CONFIG_MMU 56extern void clear_page(void *to);
66extern void clear_page_slow(void *to); 57extern void copy_page(void *to, void *from);
67extern void copy_page_slow(void *to, void *from);
68#else
69extern void clear_page_nommu(void *to);
70extern void copy_page_nommu(void *to, void *from);
71#endif
72 58
73#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ 59#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
74 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) 60 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
@@ -96,12 +82,18 @@ typedef struct { unsigned long long pgd; } pgd_t;
96 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 82 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
97#define __pte(x) \ 83#define __pte(x) \
98 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 84 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
99#else 85#elif defined(CONFIG_SUPERH32)
100typedef struct { unsigned long pte_low; } pte_t; 86typedef struct { unsigned long pte_low; } pte_t;
101typedef struct { unsigned long pgprot; } pgprot_t; 87typedef struct { unsigned long pgprot; } pgprot_t;
102typedef struct { unsigned long pgd; } pgd_t; 88typedef struct { unsigned long pgd; } pgd_t;
103#define pte_val(x) ((x).pte_low) 89#define pte_val(x) ((x).pte_low)
104#define __pte(x) ((pte_t) { (x) } ) 90#define __pte(x) ((pte_t) { (x) } )
91#else
92typedef struct { unsigned long long pte_low; } pte_t;
93typedef struct { unsigned long pgprot; } pgprot_t;
94typedef struct { unsigned long pgd; } pgd_t;
95#define pte_val(x) ((x).pte_low)
96#define __pte(x) ((pte_t) { (x) } )
105#endif 97#endif
106 98
107#define pgd_val(x) ((x).pgd) 99#define pgd_val(x) ((x).pgd)
@@ -112,28 +104,44 @@ typedef struct { unsigned long pgd; } pgd_t;
112 104
113#endif /* !__ASSEMBLY__ */ 105#endif /* !__ASSEMBLY__ */
114 106
115/* to align the pointer to the (next) page boundary */
116#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
117
118/* 107/*
119 * IF YOU CHANGE THIS, PLEASE ALSO CHANGE 108 * __MEMORY_START and SIZE are the physical addresses and size of RAM.
120 *
121 * arch/sh/kernel/vmlinux.lds.S
122 *
123 * which has the same constant encoded..
124 */ 109 */
125
126#define __MEMORY_START CONFIG_MEMORY_START 110#define __MEMORY_START CONFIG_MEMORY_START
127#define __MEMORY_SIZE CONFIG_MEMORY_SIZE 111#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
128 112
113/*
114 * PAGE_OFFSET is the virtual address of the start of kernel address
115 * space.
116 */
129#define PAGE_OFFSET CONFIG_PAGE_OFFSET 117#define PAGE_OFFSET CONFIG_PAGE_OFFSET
130#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
131#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
132#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
133 118
119/*
120 * Virtual to physical RAM address translation.
121 *
122 * In 29 bit mode, the physical offset of RAM from address 0 is visible in
123 * the kernel virtual address space, and thus we don't have to take
124 * this into account when translating. However in 32 bit mode this offset
125 * is not visible (it is part of the PMB mapping) and so needs to be
126 * added or subtracted as required.
127 */
128#ifdef CONFIG_32BIT
129#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
130#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
131#else
132#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
133#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
134#endif
135
136#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
134#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 137#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
135 138
136/* PFN start number, because of __MEMORY_START */ 139/*
140 * PFN = physical frame number (ie PFN 0 == physical address 0)
141 * PFN_START is the PFN of the first page of RAM. By defining this we
142 * don't have struct page entries for the portion of address space
143 * between physical address 0 and the start of RAM.
144 */
137#define PFN_START (__MEMORY_START >> PAGE_SHIFT) 145#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
138#define ARCH_PFN_OFFSET (PFN_START) 146#define ARCH_PFN_OFFSET (PFN_START)
139#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 147#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
@@ -154,11 +162,21 @@ typedef struct { unsigned long pgd; } pgd_t;
154#endif 162#endif
155 163
156/* 164/*
157 * Slub defaults to 8-byte alignment, we're only interested in 4. 165 * Some drivers need to perform DMA into kmalloc'ed buffers
158 * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. 166 * and so we have to increase the kmalloc minalign for this.
159 */ 167 */
160#define ARCH_KMALLOC_MINALIGN 4 168#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
161#define ARCH_SLAB_MINALIGN 4 169
170#ifdef CONFIG_SUPERH64
171/*
172 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
173 * happily generate {ld/st}.q pairs, requiring us to have 8-byte
174 * alignment to avoid traps. The kmalloc alignment is gauranteed by
175 * virtue of L1_CACHE_BYTES, requiring this to only be special cased
176 * for slab caches.
177 */
178#define ARCH_SLAB_MINALIGN 8
179#endif
162 180
163#endif /* __KERNEL__ */ 181#endif /* __KERNEL__ */
164#endif /* __ASM_SH_PAGE_H */ 182#endif /* __ASM_SH_PAGE_H */
diff --git a/include/asm-sh/param.h b/include/asm-sh/param.h
index 1012296e07ab..ae245afdfd6a 100644
--- a/include/asm-sh/param.h
+++ b/include/asm-sh/param.h
@@ -2,11 +2,7 @@
2#define __ASM_SH_PARAM_H 2#define __ASM_SH_PARAM_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5# ifdef CONFIG_SH_WDT 5# define HZ CONFIG_HZ
6# define HZ 1000 /* Needed for high-res WOVF */
7# else
8# define HZ CONFIG_HZ
9# endif
10# define USER_HZ 100 /* User interfaces are in "ticks" */ 6# define USER_HZ 100 /* User interfaces are in "ticks" */
11# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */ 7# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
12#endif 8#endif
diff --git a/include/asm-sh/pci.h b/include/asm-sh/pci.h
index 2757ce096ff7..df1d383e18a5 100644
--- a/include/asm-sh/pci.h
+++ b/include/asm-sh/pci.h
@@ -38,9 +38,12 @@ extern struct pci_channel board_pci_channels[];
38#if defined(CONFIG_CPU_SUBTYPE_SH7780) || defined(CONFIG_CPU_SUBTYPE_SH7785) 38#if defined(CONFIG_CPU_SUBTYPE_SH7780) || defined(CONFIG_CPU_SUBTYPE_SH7785)
39#define PCI_IO_AREA 0xFE400000 39#define PCI_IO_AREA 0xFE400000
40#define PCI_IO_SIZE 0x00400000 40#define PCI_IO_SIZE 0x00400000
41#elif defined(CONFIG_CPU_SH5)
42extern unsigned long PCI_IO_AREA;
43#define PCI_IO_SIZE 0x00010000
41#else 44#else
42#define PCI_IO_AREA 0xFE240000 45#define PCI_IO_AREA 0xFE240000
43#define PCI_IO_SIZE 0X00040000 46#define PCI_IO_SIZE 0x00040000
44#endif 47#endif
45 48
46#define PCI_MEM_SIZE 0x01000000 49#define PCI_MEM_SIZE 0x01000000
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 8f1e8be8d15d..a4a8f8b93463 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -3,7 +3,7 @@
3 * use the SuperH page table tree. 3 * use the SuperH page table tree.
4 * 4 *
5 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2002 - 2005 Paul Mundt 6 * Copyright (C) 2002 - 2007 Paul Mundt
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file "COPYING" in the main directory of this 9 * Public License. See the file "COPYING" in the main directory of this
@@ -29,10 +29,27 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
29#endif /* !__ASSEMBLY__ */ 29#endif /* !__ASSEMBLY__ */
30 30
31/* 31/*
32 * Effective and physical address definitions, to aid with sign
33 * extension.
34 */
35#define NEFF 32
36#define NEFF_SIGN (1LL << (NEFF - 1))
37#define NEFF_MASK (-1LL << NEFF)
38
39#ifdef CONFIG_29BIT
40#define NPHYS 29
41#else
42#define NPHYS 32
43#endif
44
45#define NPHYS_SIGN (1LL << (NPHYS - 1))
46#define NPHYS_MASK (-1LL << NPHYS)
47
48/*
32 * traditional two-level paging structure 49 * traditional two-level paging structure
33 */ 50 */
34/* PTE bits */ 51/* PTE bits */
35#ifdef CONFIG_X2TLB 52#if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64)
36# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ 53# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */
37#else 54#else
38# define PTE_MAGNITUDE 2 /* 32-bit PTEs */ 55# define PTE_MAGNITUDE 2 /* 32-bit PTEs */
@@ -52,283 +69,27 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
52#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 69#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
53#define FIRST_USER_ADDRESS 0 70#define FIRST_USER_ADDRESS 0
54 71
55#define PTE_PHYS_MASK (0x20000000 - PAGE_SIZE) 72#ifdef CONFIG_32BIT
56 73#define PHYS_ADDR_MASK 0xffffffff
57#define VMALLOC_START (P3SEG)
58#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
59
60/*
61 * Linux PTEL encoding.
62 *
63 * Hardware and software bit definitions for the PTEL value (see below for
64 * notes on SH-X2 MMUs and 64-bit PTEs):
65 *
66 * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
67 *
68 * - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the
69 * hardware PTEL value can't have the SH-bit set when MMUCR.IX is set,
70 * which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT).
71 *
72 * In order to keep this relatively clean, do not use these for defining
73 * SH-3 specific flags until all of the other unused bits have been
74 * exhausted.
75 *
76 * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE.
77 *
78 * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages.
79 * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused.
80 *
81 * - Bits 31, 30, and 29 remain unused by everyone and can be used for future
82 * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS.
83 *
84 * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
85 *
86 * SH-X2 MMUs and extended PTEs
87 *
88 * SH-X2 supports an extended mode TLB with split data arrays due to the
89 * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
90 * SZ bit placeholders still exist in data array 1, but are implemented as
91 * reserved bits, with the real logic existing in data array 2.
92 *
93 * The downside to this is that we can no longer fit everything in to a 32-bit
94 * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
95 * side, this gives us quite a few spare bits to play with for future usage.
96 */
97/* Legacy and compat mode bits */
98#define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */
99#define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */
100#define _PAGE_DIRTY 0x004 /* D-bit : page changed */
101#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
102#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
103#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
104#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
105#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
106#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
107#define _PAGE_PROTNONE 0x200 /* software: if not present */
108#define _PAGE_ACCESSED 0x400 /* software: page referenced */
109#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
110
111#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
112#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER)
113
114/* Extended mode bits */
115#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */
116#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */
117#define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */
118#define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */
119
120#define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */
121#define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */
122#define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */
123
124#define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */
125#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */
126#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
127
128/* Wrapper for extended mode pgprot twiddling */
129#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
130
131/* software: moves to PTEA.TC (Timing Control) */
132#define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */
133#define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */
134
135/* software: moves to PTEA.SA[2:0] (Space Attributes) */
136#define _PAGE_PCC_IODYN 0x00000001 /* IO space, dynamically sized bus */
137#define _PAGE_PCC_IO8 0x20000000 /* IO space, 8 bit bus */
138#define _PAGE_PCC_IO16 0x20000001 /* IO space, 16 bit bus */
139#define _PAGE_PCC_COM8 0x40000000 /* Common Memory space, 8 bit bus */
140#define _PAGE_PCC_COM16 0x40000001 /* Common Memory space, 16 bit bus */
141#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
142#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
143
144/* Mask which drops unused bits from the PTEL value */
145#if defined(CONFIG_CPU_SH3)
146#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
147 _PAGE_FILE | _PAGE_SZ1 | \
148 _PAGE_HW_SHARED)
149#elif defined(CONFIG_X2TLB)
150/* Get rid of the legacy PR/SZ bits when using extended mode */
151#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
152 _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
153#else 74#else
154#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) 75#define PHYS_ADDR_MASK 0x1fffffff
155#endif 76#endif
156 77
157#define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) 78#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK)
158 79
159/* Hardware flags, page size encoding */ 80#ifdef CONFIG_SUPERH32
160#if defined(CONFIG_X2TLB) 81#define VMALLOC_START (P3SEG)
161# if defined(CONFIG_PAGE_SIZE_4KB)
162# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0)
163# elif defined(CONFIG_PAGE_SIZE_8KB)
164# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1)
165# elif defined(CONFIG_PAGE_SIZE_64KB)
166# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2)
167# endif
168#else 82#else
169# if defined(CONFIG_PAGE_SIZE_4KB) 83#define VMALLOC_START (0xf0000000)
170# define _PAGE_FLAGS_HARD _PAGE_SZ0
171# elif defined(CONFIG_PAGE_SIZE_64KB)
172# define _PAGE_FLAGS_HARD _PAGE_SZ1
173# endif
174#endif 84#endif
85#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
175 86
176#if defined(CONFIG_X2TLB) 87#if defined(CONFIG_SUPERH32)
177# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 88#include <asm/pgtable_32.h>
178# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2)
179# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
180# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
181# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
182# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
183# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
184# define _PAGE_SZHUGE (_PAGE_EXT_ESZ3)
185# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
186# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
187# endif
188#else 89#else
189# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 90#include <asm/pgtable_64.h>
190# define _PAGE_SZHUGE (_PAGE_SZ1)
191# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
192# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1)
193# endif
194#endif
195
196/*
197 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
198 * to make pte_mkhuge() happy.
199 */
200#ifndef _PAGE_SZHUGE
201# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD)
202#endif
203
204#define _PAGE_CHG_MASK \
205 (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
206
207#ifndef __ASSEMBLY__
208
209#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
210#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
211 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
212
213#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
214 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
215 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
216 _PAGE_EXT_KERN_WRITE | \
217 _PAGE_EXT_USER_READ | \
218 _PAGE_EXT_USER_WRITE))
219
220#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
221 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
222 _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \
223 _PAGE_EXT_KERN_READ | \
224 _PAGE_EXT_USER_EXEC | \
225 _PAGE_EXT_USER_READ))
226
227#define PAGE_COPY PAGE_EXECREAD
228
229#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
230 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
231 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
232 _PAGE_EXT_USER_READ))
233
234#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
235 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
236 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
237 _PAGE_EXT_USER_WRITE))
238
239#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
240 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
241 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
242 _PAGE_EXT_KERN_READ | \
243 _PAGE_EXT_KERN_EXEC | \
244 _PAGE_EXT_USER_WRITE | \
245 _PAGE_EXT_USER_READ | \
246 _PAGE_EXT_USER_EXEC))
247
248#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
249 _PAGE_DIRTY | _PAGE_ACCESSED | \
250 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
251 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
252 _PAGE_EXT_KERN_WRITE | \
253 _PAGE_EXT_KERN_EXEC))
254
255#define PAGE_KERNEL_NOCACHE \
256 __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
257 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
258 _PAGE_FLAGS_HARD | \
259 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
260 _PAGE_EXT_KERN_WRITE | \
261 _PAGE_EXT_KERN_EXEC))
262
263#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
264 _PAGE_DIRTY | _PAGE_ACCESSED | \
265 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
266 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
267 _PAGE_EXT_KERN_EXEC))
268
269#define PAGE_KERNEL_PCC(slot, type) \
270 __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
271 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
272 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
273 _PAGE_EXT_KERN_WRITE | \
274 _PAGE_EXT_KERN_EXEC) \
275 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
276 (type))
277
278#elif defined(CONFIG_MMU) /* SH-X TLB */
279#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
280 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
281
282#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
283 _PAGE_CACHABLE | _PAGE_ACCESSED | \
284 _PAGE_FLAGS_HARD)
285
286#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
287 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
288
289#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
290 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
291
292#define PAGE_EXECREAD PAGE_READONLY
293#define PAGE_RWX PAGE_SHARED
294#define PAGE_WRITEONLY PAGE_SHARED
295
296#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
297 _PAGE_DIRTY | _PAGE_ACCESSED | \
298 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
299
300#define PAGE_KERNEL_NOCACHE \
301 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
302 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
303 _PAGE_FLAGS_HARD)
304
305#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
306 _PAGE_DIRTY | _PAGE_ACCESSED | \
307 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
308
309#define PAGE_KERNEL_PCC(slot, type) \
310 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
311 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
312 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
313 (type))
314#else /* no mmu */
315#define PAGE_NONE __pgprot(0)
316#define PAGE_SHARED __pgprot(0)
317#define PAGE_COPY __pgprot(0)
318#define PAGE_EXECREAD __pgprot(0)
319#define PAGE_RWX __pgprot(0)
320#define PAGE_READONLY __pgprot(0)
321#define PAGE_WRITEONLY __pgprot(0)
322#define PAGE_KERNEL __pgprot(0)
323#define PAGE_KERNEL_NOCACHE __pgprot(0)
324#define PAGE_KERNEL_RO __pgprot(0)
325
326#define PAGE_KERNEL_PCC(slot, type) \
327 __pgprot(0)
328#endif 91#endif
329 92
330#endif /* __ASSEMBLY__ */
331
332/* 93/*
333 * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page 94 * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
334 * protection for execute, and considers it the same as a read. Also, write 95 * protection for execute, and considers it the same as a read. Also, write
@@ -357,208 +118,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
357#define __S110 PAGE_RWX 118#define __S110 PAGE_RWX
358#define __S111 PAGE_RWX 119#define __S111 PAGE_RWX
359 120
360#ifndef __ASSEMBLY__
361
362/*
363 * Certain architectures need to do special things when PTEs
364 * within a page table are directly modified. Thus, the following
365 * hook is made available.
366 */
367#ifdef CONFIG_X2TLB
368static inline void set_pte(pte_t *ptep, pte_t pte)
369{
370 ptep->pte_high = pte.pte_high;
371 smp_wmb();
372 ptep->pte_low = pte.pte_low;
373}
374#else
375#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
376#endif
377
378#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
379
380/*
381 * (pmds are folded into pgds so this doesn't get actually called,
382 * but the define is needed for a generic inline function.)
383 */
384#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
385
386#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
387
388#define pfn_pte(pfn, prot) \
389 __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
390#define pfn_pmd(pfn, prot) \
391 __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
392
393#define pte_none(x) (!pte_val(x))
394#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
395
396#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
397
398#define pmd_none(x) (!pmd_val(x))
399#define pmd_present(x) (pmd_val(x))
400#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
401#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
402
403#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
404#define pte_page(x) pfn_to_page(pte_pfn(x))
405
406/*
407 * The following only work if pte_present() is true.
408 * Undefined behaviour if not..
409 */
410#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
411#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
412#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
413#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
414
415#ifdef CONFIG_X2TLB
416#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
417#else
418#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
419#endif
420
421#define PTE_BIT_FUNC(h,fn,op) \
422static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
423
424#ifdef CONFIG_X2TLB
425/*
426 * We cheat a bit in the SH-X2 TLB case. As the permission bits are
427 * individually toggled (and user permissions are entirely decoupled from
428 * kernel permissions), we attempt to couple them a bit more sanely here.
429 */
430PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
431PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
432PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
433#else
434PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
435PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
436PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
437#endif
438
439PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
440PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
441PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
442PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
443
444/*
445 * Macro and implementation to make a page protection as uncachable.
446 */
447#define pgprot_writecombine(prot) \
448 __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
449
450#define pgprot_noncached pgprot_writecombine
451
452/*
453 * Conversion functions: convert a page and protection to a page entry,
454 * and a page entry and page directory to the page they refer to.
455 *
456 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
457 */
458#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
459
460static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
461{
462 pte.pte_low &= _PAGE_CHG_MASK;
463 pte.pte_low |= pgprot_val(newprot);
464
465#ifdef CONFIG_X2TLB
466 pte.pte_high |= pgprot_val(newprot) >> 32;
467#endif
468
469 return pte;
470}
471
472#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
473#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
474
475/* to find an entry in a page-table-directory. */
476#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
477#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
478
479/* to find an entry in a kernel page-table-directory */
480#define pgd_offset_k(address) pgd_offset(&init_mm, address)
481
482/* Find an entry in the third-level page table.. */
483#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
484#define pte_offset_kernel(dir, address) \
485 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
486#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
487#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
488
489#define pte_unmap(pte) do { } while (0)
490#define pte_unmap_nested(pte) do { } while (0)
491
492#ifdef CONFIG_X2TLB
493#define pte_ERROR(e) \
494 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
495 &(e), (e).pte_high, (e).pte_low)
496#define pgd_ERROR(e) \
497 printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
498#else
499#define pte_ERROR(e) \
500 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
501#define pgd_ERROR(e) \
502 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
503#endif
504
505struct vm_area_struct;
506extern void update_mmu_cache(struct vm_area_struct * vma,
507 unsigned long address, pte_t pte);
508
509/*
510 * Encode and de-code a swap entry
511 *
512 * Constraints:
513 * _PAGE_FILE at bit 0
514 * _PAGE_PRESENT at bit 8
515 * _PAGE_PROTNONE at bit 9
516 *
517 * For the normal case, we encode the swap type into bits 0:7 and the
518 * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
519 * preserved bits in the low 32-bits and use the upper 32 as the swap
520 * offset (along with a 5-bit type), following the same approach as x86
521 * PAE. This keeps the logic quite simple, and allows for a full 32
522 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
523 * in the pte_low case.
524 *
525 * As is evident by the Alpha code, if we ever get a 64-bit unsigned
526 * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
527 * much cleaner..
528 *
529 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
530 * and _PAGE_PROTNONE bits
531 */
532#ifdef CONFIG_X2TLB
533#define __swp_type(x) ((x).val & 0x1f)
534#define __swp_offset(x) ((x).val >> 5)
535#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
536#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
537#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
538
539/*
540 * Encode and decode a nonlinear file mapping entry
541 */
542#define pte_to_pgoff(pte) ((pte).pte_high)
543#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
544
545#define PTE_FILE_MAX_BITS 32
546#else
547#define __swp_type(x) ((x).val & 0xff)
548#define __swp_offset(x) ((x).val >> 10)
549#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
550
551#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
552#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
553
554/*
555 * Encode and decode a nonlinear file mapping entry
556 */
557#define PTE_FILE_MAX_BITS 29
558#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
559#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
560#endif
561
562typedef pte_t *pte_addr_t; 121typedef pte_t *pte_addr_t;
563 122
564#define kern_addr_valid(addr) (1) 123#define kern_addr_valid(addr) (1)
@@ -566,27 +125,28 @@ typedef pte_t *pte_addr_t;
566#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 125#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
567 remap_pfn_range(vma, vaddr, pfn, size, prot) 126 remap_pfn_range(vma, vaddr, pfn, size, prot)
568 127
569struct mm_struct; 128#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
570 129
571/* 130/*
572 * No page table caches to initialise 131 * No page table caches to initialise
573 */ 132 */
574#define pgtable_cache_init() do { } while (0) 133#define pgtable_cache_init() do { } while (0)
575 134
576#ifndef CONFIG_MMU
577extern unsigned int kobjsize(const void *objp);
578#endif /* !CONFIG_MMU */
579
580#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ 135#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
581 defined(CONFIG_SH7705_CACHE_32KB)) 136 defined(CONFIG_SH7705_CACHE_32KB))
137struct mm_struct;
582#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 138#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
583extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 139pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
584#endif 140#endif
585 141
142struct vm_area_struct;
143extern void update_mmu_cache(struct vm_area_struct * vma,
144 unsigned long address, pte_t pte);
586extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 145extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
587extern void paging_init(void); 146extern void paging_init(void);
147extern void page_table_range_init(unsigned long start, unsigned long end,
148 pgd_t *pgd);
588 149
589#include <asm-generic/pgtable.h> 150#include <asm-generic/pgtable.h>
590 151
591#endif /* !__ASSEMBLY__ */ 152#endif /* __ASM_SH_PGTABLE_H */
592#endif /* __ASM_SH_PAGE_H */
diff --git a/include/asm-sh/pgtable_32.h b/include/asm-sh/pgtable_32.h
new file mode 100644
index 000000000000..3e3557c53c55
--- /dev/null
+++ b/include/asm-sh/pgtable_32.h
@@ -0,0 +1,474 @@
1#ifndef __ASM_SH_PGTABLE_32_H
2#define __ASM_SH_PGTABLE_32_H
3
4/*
5 * Linux PTEL encoding.
6 *
7 * Hardware and software bit definitions for the PTEL value (see below for
8 * notes on SH-X2 MMUs and 64-bit PTEs):
9 *
10 * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
11 *
12 * - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the
13 * hardware PTEL value can't have the SH-bit set when MMUCR.IX is set,
14 * which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT).
15 *
16 * In order to keep this relatively clean, do not use these for defining
17 * SH-3 specific flags until all of the other unused bits have been
18 * exhausted.
19 *
20 * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE.
21 *
22 * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages.
23 * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused.
24 *
25 * - On 29 bit platforms, bits 31 to 29 are used for the space attributes
26 * and timing control which (together with bit 0) are moved into the
27 * old-style PTEA on the parts that support it.
28 *
29 * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
30 *
31 * SH-X2 MMUs and extended PTEs
32 *
33 * SH-X2 supports an extended mode TLB with split data arrays due to the
34 * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
35 * SZ bit placeholders still exist in data array 1, but are implemented as
36 * reserved bits, with the real logic existing in data array 2.
37 *
38 * The downside to this is that we can no longer fit everything in to a 32-bit
39 * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
40 * side, this gives us quite a few spare bits to play with for future usage.
41 */
42/* Legacy and compat mode bits */
43#define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */
44#define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */
45#define _PAGE_DIRTY 0x004 /* D-bit : page changed */
46#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
47#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
48#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
49#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
50#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
51#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
52#define _PAGE_PROTNONE 0x200 /* software: if not present */
53#define _PAGE_ACCESSED 0x400 /* software: page referenced */
54#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
55
56#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
57#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER)
58
59/* Extended mode bits */
60#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */
61#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */
62#define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */
63#define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */
64
65#define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */
66#define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */
67#define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */
68
69#define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */
70#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */
71#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
72
73/* Wrapper for extended mode pgprot twiddling */
74#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
75
76/* software: moves to PTEA.TC (Timing Control) */
77#define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */
78#define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */
79
80/* software: moves to PTEA.SA[2:0] (Space Attributes) */
81#define _PAGE_PCC_IODYN 0x00000001 /* IO space, dynamically sized bus */
82#define _PAGE_PCC_IO8 0x20000000 /* IO space, 8 bit bus */
83#define _PAGE_PCC_IO16 0x20000001 /* IO space, 16 bit bus */
84#define _PAGE_PCC_COM8 0x40000000 /* Common Memory space, 8 bit bus */
85#define _PAGE_PCC_COM16 0x40000001 /* Common Memory space, 16 bit bus */
86#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
87#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
88
89/* Mask which drops unused bits from the PTEL value */
90#if defined(CONFIG_CPU_SH3)
91#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
92 _PAGE_FILE | _PAGE_SZ1 | \
93 _PAGE_HW_SHARED)
94#elif defined(CONFIG_X2TLB)
95/* Get rid of the legacy PR/SZ bits when using extended mode */
96#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
97 _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
98#else
99#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
100#endif
101
102#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS))
103
104/* Hardware flags, page size encoding */
105#if defined(CONFIG_X2TLB)
106# if defined(CONFIG_PAGE_SIZE_4KB)
107# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0)
108# elif defined(CONFIG_PAGE_SIZE_8KB)
109# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1)
110# elif defined(CONFIG_PAGE_SIZE_64KB)
111# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2)
112# endif
113#else
114# if defined(CONFIG_PAGE_SIZE_4KB)
115# define _PAGE_FLAGS_HARD _PAGE_SZ0
116# elif defined(CONFIG_PAGE_SIZE_64KB)
117# define _PAGE_FLAGS_HARD _PAGE_SZ1
118# endif
119#endif
120
121#if defined(CONFIG_X2TLB)
122# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
123# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2)
124# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
125# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
126# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
127# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
128# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
129# define _PAGE_SZHUGE (_PAGE_EXT_ESZ3)
130# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
131# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
132# endif
133#else
134# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
135# define _PAGE_SZHUGE (_PAGE_SZ1)
136# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
137# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1)
138# endif
139#endif
140
141/*
142 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
143 * to make pte_mkhuge() happy.
144 */
145#ifndef _PAGE_SZHUGE
146# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD)
147#endif
148
149#define _PAGE_CHG_MASK \
150 (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
151
152#ifndef __ASSEMBLY__
153
154#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
155#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
156 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
157
158#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
159 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
160 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
161 _PAGE_EXT_KERN_WRITE | \
162 _PAGE_EXT_USER_READ | \
163 _PAGE_EXT_USER_WRITE))
164
165#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
166 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
167 _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \
168 _PAGE_EXT_KERN_READ | \
169 _PAGE_EXT_USER_EXEC | \
170 _PAGE_EXT_USER_READ))
171
172#define PAGE_COPY PAGE_EXECREAD
173
174#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
175 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
176 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
177 _PAGE_EXT_USER_READ))
178
179#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
180 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
181 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
182 _PAGE_EXT_USER_WRITE))
183
184#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
185 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
186 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
187 _PAGE_EXT_KERN_READ | \
188 _PAGE_EXT_KERN_EXEC | \
189 _PAGE_EXT_USER_WRITE | \
190 _PAGE_EXT_USER_READ | \
191 _PAGE_EXT_USER_EXEC))
192
193#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
194 _PAGE_DIRTY | _PAGE_ACCESSED | \
195 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
196 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
197 _PAGE_EXT_KERN_WRITE | \
198 _PAGE_EXT_KERN_EXEC))
199
200#define PAGE_KERNEL_NOCACHE \
201 __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
202 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
203 _PAGE_FLAGS_HARD | \
204 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
205 _PAGE_EXT_KERN_WRITE | \
206 _PAGE_EXT_KERN_EXEC))
207
208#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
209 _PAGE_DIRTY | _PAGE_ACCESSED | \
210 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
211 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
212 _PAGE_EXT_KERN_EXEC))
213
214#define PAGE_KERNEL_PCC(slot, type) \
215 __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
216 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
217 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
218 _PAGE_EXT_KERN_WRITE | \
219 _PAGE_EXT_KERN_EXEC) \
220 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
221 (type))
222
223#elif defined(CONFIG_MMU) /* SH-X TLB */
224#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
225 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
226
227#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
228 _PAGE_CACHABLE | _PAGE_ACCESSED | \
229 _PAGE_FLAGS_HARD)
230
231#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
232 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
233
234#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
235 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
236
237#define PAGE_EXECREAD PAGE_READONLY
238#define PAGE_RWX PAGE_SHARED
239#define PAGE_WRITEONLY PAGE_SHARED
240
241#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
242 _PAGE_DIRTY | _PAGE_ACCESSED | \
243 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
244
245#define PAGE_KERNEL_NOCACHE \
246 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
247 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
248 _PAGE_FLAGS_HARD)
249
250#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
251 _PAGE_DIRTY | _PAGE_ACCESSED | \
252 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
253
254#define PAGE_KERNEL_PCC(slot, type) \
255 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
256 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
257 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
258 (type))
259#else /* no mmu */
260#define PAGE_NONE __pgprot(0)
261#define PAGE_SHARED __pgprot(0)
262#define PAGE_COPY __pgprot(0)
263#define PAGE_EXECREAD __pgprot(0)
264#define PAGE_RWX __pgprot(0)
265#define PAGE_READONLY __pgprot(0)
266#define PAGE_WRITEONLY __pgprot(0)
267#define PAGE_KERNEL __pgprot(0)
268#define PAGE_KERNEL_NOCACHE __pgprot(0)
269#define PAGE_KERNEL_RO __pgprot(0)
270
271#define PAGE_KERNEL_PCC(slot, type) \
272 __pgprot(0)
273#endif
274
275#endif /* __ASSEMBLY__ */
276
277#ifndef __ASSEMBLY__
278
279/*
280 * Certain architectures need to do special things when PTEs
281 * within a page table are directly modified. Thus, the following
282 * hook is made available.
283 */
284#ifdef CONFIG_X2TLB
285static inline void set_pte(pte_t *ptep, pte_t pte)
286{
287 ptep->pte_high = pte.pte_high;
288 smp_wmb();
289 ptep->pte_low = pte.pte_low;
290}
291#else
292#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
293#endif
294
295#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
296
297/*
298 * (pmds are folded into pgds so this doesn't get actually called,
299 * but the define is needed for a generic inline function.)
300 */
301#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
302
303#define pfn_pte(pfn, prot) \
304 __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
305#define pfn_pmd(pfn, prot) \
306 __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
307
308#define pte_none(x) (!pte_val(x))
309#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
310
311#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
312
313#define pmd_none(x) (!pmd_val(x))
314#define pmd_present(x) (pmd_val(x))
315#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
316#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
317
318#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
319#define pte_page(x) pfn_to_page(pte_pfn(x))
320
321/*
322 * The following only work if pte_present() is true.
323 * Undefined behaviour if not..
324 */
325#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
326#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
327#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
328#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
329
330#ifdef CONFIG_X2TLB
331#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
332#else
333#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
334#endif
335
336#define PTE_BIT_FUNC(h,fn,op) \
337static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
338
339#ifdef CONFIG_X2TLB
340/*
341 * We cheat a bit in the SH-X2 TLB case. As the permission bits are
342 * individually toggled (and user permissions are entirely decoupled from
343 * kernel permissions), we attempt to couple them a bit more sanely here.
344 */
345PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
346PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
347PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
348#else
349PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
350PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
351PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
352#endif
353
354PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
355PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
356PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
357PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
358
359/*
360 * Macro and implementation to make a page protection as uncachable.
361 */
362#define pgprot_writecombine(prot) \
363 __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
364
365#define pgprot_noncached pgprot_writecombine
366
367/*
368 * Conversion functions: convert a page and protection to a page entry,
369 * and a page entry and page directory to the page they refer to.
370 *
371 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
372 */
373#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
374
375static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
376{
377 pte.pte_low &= _PAGE_CHG_MASK;
378 pte.pte_low |= pgprot_val(newprot);
379
380#ifdef CONFIG_X2TLB
381 pte.pte_high |= pgprot_val(newprot) >> 32;
382#endif
383
384 return pte;
385}
386
387#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
388#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
389
390/* to find an entry in a page-table-directory. */
391#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
392#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
393
394/* to find an entry in a kernel page-table-directory */
395#define pgd_offset_k(address) pgd_offset(&init_mm, address)
396
397/* Find an entry in the third-level page table.. */
398#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
399#define pte_offset_kernel(dir, address) \
400 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
401#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
402#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
403
404#define pte_unmap(pte) do { } while (0)
405#define pte_unmap_nested(pte) do { } while (0)
406
407#ifdef CONFIG_X2TLB
408#define pte_ERROR(e) \
409 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
410 &(e), (e).pte_high, (e).pte_low)
411#define pgd_ERROR(e) \
412 printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
413#else
414#define pte_ERROR(e) \
415 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
416#define pgd_ERROR(e) \
417 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
418#endif
419
420/*
421 * Encode and de-code a swap entry
422 *
423 * Constraints:
424 * _PAGE_FILE at bit 0
425 * _PAGE_PRESENT at bit 8
426 * _PAGE_PROTNONE at bit 9
427 *
428 * For the normal case, we encode the swap type into bits 0:7 and the
429 * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
430 * preserved bits in the low 32-bits and use the upper 32 as the swap
431 * offset (along with a 5-bit type), following the same approach as x86
432 * PAE. This keeps the logic quite simple, and allows for a full 32
433 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
434 * in the pte_low case.
435 *
436 * As is evident by the Alpha code, if we ever get a 64-bit unsigned
437 * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
438 * much cleaner..
439 *
440 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
441 * and _PAGE_PROTNONE bits
442 */
443#ifdef CONFIG_X2TLB
444#define __swp_type(x) ((x).val & 0x1f)
445#define __swp_offset(x) ((x).val >> 5)
446#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
447#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
448#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
449
450/*
451 * Encode and decode a nonlinear file mapping entry
452 */
453#define pte_to_pgoff(pte) ((pte).pte_high)
454#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
455
456#define PTE_FILE_MAX_BITS 32
457#else
458#define __swp_type(x) ((x).val & 0xff)
459#define __swp_offset(x) ((x).val >> 10)
460#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
461
462#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
463#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
464
465/*
466 * Encode and decode a nonlinear file mapping entry
467 */
468#define PTE_FILE_MAX_BITS 29
469#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
470#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
471#endif
472
473#endif /* __ASSEMBLY__ */
474#endif /* __ASM_SH_PGTABLE_32_H */
diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h
new file mode 100644
index 000000000000..972211671c9a
--- /dev/null
+++ b/include/asm-sh/pgtable_64.h
@@ -0,0 +1,299 @@
1#ifndef __ASM_SH_PGTABLE_64_H
2#define __ASM_SH_PGTABLE_64_H
3
4/*
5 * include/asm-sh/pgtable_64.h
6 *
7 * This file contains the functions and defines necessary to modify and use
8 * the SuperH page table tree.
9 *
10 * Copyright (C) 2000, 2001 Paolo Alberelli
11 * Copyright (C) 2003, 2004 Paul Mundt
12 * Copyright (C) 2003, 2004 Richard Curnow
13 *
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
17 */
18#include <linux/threads.h>
19#include <asm/processor.h>
20#include <asm/page.h>
21
22/*
23 * Error outputs.
24 */
25#define pte_ERROR(e) \
26 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
27#define pgd_ERROR(e) \
28 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
29
30/*
31 * Table setting routines. Used within arch/mm only.
32 */
33#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
34
35static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
36{
37 unsigned long long x = ((unsigned long long) pteval.pte_low);
38 unsigned long long *xp = (unsigned long long *) pteptr;
39 /*
40 * Sign-extend based on NPHYS.
41 */
42 *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
43}
44#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
45
46static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
47{
48 pmd_val(*pmdp) = (unsigned long) ptep;
49}
50
51/*
52 * PGD defines. Top level.
53 */
54
55/* To find an entry in a generic PGD. */
56#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
57#define __pgd_offset(address) pgd_index(address)
58#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
59
60/* To find an entry in a kernel PGD. */
61#define pgd_offset_k(address) pgd_offset(&init_mm, address)
62
63/*
64 * PMD level access routines. Same notes as above.
65 */
66#define _PMD_EMPTY 0x0
67/* Either the PMD is empty or present, it's not paged out */
68#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
69#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
70#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
71#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
72
73#define pmd_page_vaddr(pmd_entry) \
74 ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
75
76#define pmd_page(pmd) \
77 (virt_to_page(pmd_val(pmd)))
78
79/* PMD to PTE dereferencing */
80#define pte_index(address) \
81 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
82
83#define pte_offset_kernel(dir, addr) \
84 ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
85
86#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
87#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
88#define pte_unmap(pte) do { } while (0)
89#define pte_unmap_nested(pte) do { } while (0)
90
91#ifndef __ASSEMBLY__
92#define IOBASE_VADDR 0xff000000
93#define IOBASE_END 0xffffffff
94
95/*
96 * PTEL coherent flags.
97 * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
98 */
99/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
100 positions, to avoid expensive bit shuffling on every refill. The remaining
101 bits are used for s/w purposes and masked out on each refill.
102
103 Note, the PTE slots are used to hold data of type swp_entry_t when a page is
104 swapped out. Only the _PAGE_PRESENT flag is significant when the page is
105 swapped out, and it must be placed so that it doesn't overlap either the
106 type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
107 at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
108 scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
109 [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
110 into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
111#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
112#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
113#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
114#define _PAGE_PRESENT 0x004 /* software: page referenced */
115#define _PAGE_FILE 0x004 /* software: only when !present */
116#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
117#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
118#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
119#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
120#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
121#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
122#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
123#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
124#define _PAGE_ACCESSED 0x800 /* software: page referenced */
125
126/* Mask which drops software flags */
127#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL
128
129/*
130 * HugeTLB support
131 */
132#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
133#define _PAGE_SZHUGE (_PAGE_SIZE0)
134#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
135#define _PAGE_SZHUGE (_PAGE_SIZE1)
136#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
137#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
138#endif
139
140/*
141 * Default flags for a Kernel page.
142 * This is fundametally also SHARED because the main use of this define
143 * (other than for PGD/PMD entries) is for the VMALLOC pool which is
144 * contextless.
145 *
146 * _PAGE_EXECUTE is required for modules
147 *
148 */
149#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
150 _PAGE_EXECUTE | \
151 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
152 _PAGE_SHARED)
153
154/* Default flags for a User page */
155#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
156
157#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
158
159/*
160 * We have full permissions (Read/Write/Execute/Shared).
161 */
162#define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
163 _PAGE_CACHABLE | _PAGE_ACCESSED)
164
165#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
166#define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
167 _PAGE_SHARED)
168#define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
169
170/*
171 * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
172 * protection mode for the stack.
173 */
174#define PAGE_COPY PAGE_EXECREAD
175
176#define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
177#define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
178#define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
179 _PAGE_WRITE | _PAGE_EXECUTE)
180#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
181
182/* Make it a device mapping for maximum safety (e.g. for mapping device
183 registers into user-space via /dev/map). */
184#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
185#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
186
187/*
188 * Handling allocation failures during page table setup.
189 */
190extern void __handle_bad_pmd_kernel(pmd_t * pmd);
191#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
192
193/*
194 * PTE level access routines.
195 *
196 * Note1:
197 * It's the tree walk leaf. This is physical address to be stored.
198 *
199 * Note 2:
200 * Regarding the choice of _PTE_EMPTY:
201
202 We must choose a bit pattern that cannot be valid, whether or not the page
203 is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
204 out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
205 left for us to select. If we force bit[7]==0 when swapped out, we could use
206 the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
207 we force bit[7]==1 when swapped out, we can use all zeroes to indicate
208 empty. This is convenient, because the page tables get cleared to zero
209 when they are allocated.
210
211 */
212#define _PTE_EMPTY 0x0
213#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
214#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
215#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
216
217/*
218 * Some definitions to translate between mem_map, PTEs, and page
219 * addresses:
220 */
221
222/*
223 * Given a PTE, return the index of the mem_map[] entry corresponding
224 * to the page frame the PTE. Get the absolute physical address, make
225 * a relative physical address and translate it to an index.
226 */
227#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
228 __MEMORY_START) >> PAGE_SHIFT)
229
230/*
231 * Given a PTE, return the "struct page *".
232 */
233#define pte_page(x) (mem_map + pte_pagenr(x))
234
235/*
236 * Return number of (down rounded) MB corresponding to x pages.
237 */
238#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
239
240
241/*
242 * The following have defined behavior only work if pte_present() is true.
243 */
244static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
245static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
246static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
247static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
248
249static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
250static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
251static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
252static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
253static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
254static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
255static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
256
257
258/*
259 * Conversion functions: convert a page and protection to a page entry.
260 *
261 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
262 */
263#define mk_pte(page,pgprot) \
264({ \
265 pte_t __pte; \
266 \
267 set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
268 __MEMORY_START | pgprot_val((pgprot)))); \
269 __pte; \
270})
271
272/*
273 * This takes a (absolute) physical page address that is used
274 * by the remapping functions
275 */
276#define mk_pte_phys(physpage, pgprot) \
277({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
278
279static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
280{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
281
282/* Encode and decode a swap entry */
283#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
284#define __swp_offset(x) ((x).val >> 8)
285#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
286#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
287#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
288
289/* Encode and decode a nonlinear file mapping entry */
290#define PTE_FILE_MAX_BITS 29
291#define pte_to_pgoff(pte) (pte_val(pte))
292#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
293
294#endif /* !__ASSEMBLY__ */
295
296#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
297#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
298
299#endif /* __ASM_SH_PGTABLE_64_H */
diff --git a/include/asm-sh/posix_types.h b/include/asm-sh/posix_types.h
index 0a3d2f54ab27..4b9d11c9fc77 100644
--- a/include/asm-sh/posix_types.h
+++ b/include/asm-sh/posix_types.h
@@ -1,122 +1,7 @@
1#ifndef __ASM_SH_POSIX_TYPES_H 1#ifdef __KERNEL__
2#define __ASM_SH_POSIX_TYPES_H 2# ifdef CONFIG_SUPERH32
3 3# include "posix_types_32.h"
4/* 4# else
5 * This file is generally used by user-level software, so you need to 5# include "posix_types_64.h"
6 * be a little careful about namespace pollution etc. Also, we cannot 6# endif
7 * assume GCC is being used. 7#endif /* __KERNEL__ */
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t;
12typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef unsigned short __kernel_ipc_pid_t;
16typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t;
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42#if defined(__KERNEL__) || defined(__USE_ALL)
43 int val[2];
44#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
45 int __val[2];
46#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
47} __kernel_fsid_t;
48
49#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
50
51#undef __FD_SET
52static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
53{
54 unsigned long __tmp = __fd / __NFDBITS;
55 unsigned long __rem = __fd % __NFDBITS;
56 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
57}
58
59#undef __FD_CLR
60static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
61{
62 unsigned long __tmp = __fd / __NFDBITS;
63 unsigned long __rem = __fd % __NFDBITS;
64 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
65}
66
67
68#undef __FD_ISSET
69static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
70{
71 unsigned long __tmp = __fd / __NFDBITS;
72 unsigned long __rem = __fd % __NFDBITS;
73 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
74}
75
76/*
77 * This will unroll the loop for the normal constant case (8 ints,
78 * for a 256-bit fd_set)
79 */
80#undef __FD_ZERO
81static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
82{
83 unsigned long *__tmp = __p->fds_bits;
84 int __i;
85
86 if (__builtin_constant_p(__FDSET_LONGS)) {
87 switch (__FDSET_LONGS) {
88 case 16:
89 __tmp[ 0] = 0; __tmp[ 1] = 0;
90 __tmp[ 2] = 0; __tmp[ 3] = 0;
91 __tmp[ 4] = 0; __tmp[ 5] = 0;
92 __tmp[ 6] = 0; __tmp[ 7] = 0;
93 __tmp[ 8] = 0; __tmp[ 9] = 0;
94 __tmp[10] = 0; __tmp[11] = 0;
95 __tmp[12] = 0; __tmp[13] = 0;
96 __tmp[14] = 0; __tmp[15] = 0;
97 return;
98
99 case 8:
100 __tmp[ 0] = 0; __tmp[ 1] = 0;
101 __tmp[ 2] = 0; __tmp[ 3] = 0;
102 __tmp[ 4] = 0; __tmp[ 5] = 0;
103 __tmp[ 6] = 0; __tmp[ 7] = 0;
104 return;
105
106 case 4:
107 __tmp[ 0] = 0; __tmp[ 1] = 0;
108 __tmp[ 2] = 0; __tmp[ 3] = 0;
109 return;
110 }
111 }
112 __i = __FDSET_LONGS;
113 while (__i) {
114 __i--;
115 *__tmp = 0;
116 __tmp++;
117 }
118}
119
120#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
121
122#endif /* __ASM_SH_POSIX_TYPES_H */
diff --git a/include/asm-sh/posix_types_32.h b/include/asm-sh/posix_types_32.h
new file mode 100644
index 000000000000..0a3d2f54ab27
--- /dev/null
+++ b/include/asm-sh/posix_types_32.h
@@ -0,0 +1,122 @@
1#ifndef __ASM_SH_POSIX_TYPES_H
2#define __ASM_SH_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t;
12typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef unsigned short __kernel_ipc_pid_t;
16typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t;
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42#if defined(__KERNEL__) || defined(__USE_ALL)
43 int val[2];
44#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
45 int __val[2];
46#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
47} __kernel_fsid_t;
48
49#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
50
51#undef __FD_SET
52static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
53{
54 unsigned long __tmp = __fd / __NFDBITS;
55 unsigned long __rem = __fd % __NFDBITS;
56 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
57}
58
59#undef __FD_CLR
60static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
61{
62 unsigned long __tmp = __fd / __NFDBITS;
63 unsigned long __rem = __fd % __NFDBITS;
64 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
65}
66
67
68#undef __FD_ISSET
69static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
70{
71 unsigned long __tmp = __fd / __NFDBITS;
72 unsigned long __rem = __fd % __NFDBITS;
73 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
74}
75
76/*
77 * This will unroll the loop for the normal constant case (8 ints,
78 * for a 256-bit fd_set)
79 */
80#undef __FD_ZERO
81static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
82{
83 unsigned long *__tmp = __p->fds_bits;
84 int __i;
85
86 if (__builtin_constant_p(__FDSET_LONGS)) {
87 switch (__FDSET_LONGS) {
88 case 16:
89 __tmp[ 0] = 0; __tmp[ 1] = 0;
90 __tmp[ 2] = 0; __tmp[ 3] = 0;
91 __tmp[ 4] = 0; __tmp[ 5] = 0;
92 __tmp[ 6] = 0; __tmp[ 7] = 0;
93 __tmp[ 8] = 0; __tmp[ 9] = 0;
94 __tmp[10] = 0; __tmp[11] = 0;
95 __tmp[12] = 0; __tmp[13] = 0;
96 __tmp[14] = 0; __tmp[15] = 0;
97 return;
98
99 case 8:
100 __tmp[ 0] = 0; __tmp[ 1] = 0;
101 __tmp[ 2] = 0; __tmp[ 3] = 0;
102 __tmp[ 4] = 0; __tmp[ 5] = 0;
103 __tmp[ 6] = 0; __tmp[ 7] = 0;
104 return;
105
106 case 4:
107 __tmp[ 0] = 0; __tmp[ 1] = 0;
108 __tmp[ 2] = 0; __tmp[ 3] = 0;
109 return;
110 }
111 }
112 __i = __FDSET_LONGS;
113 while (__i) {
114 __i--;
115 *__tmp = 0;
116 __tmp++;
117 }
118}
119
120#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
121
122#endif /* __ASM_SH_POSIX_TYPES_H */
diff --git a/include/asm-sh/posix_types_64.h b/include/asm-sh/posix_types_64.h
new file mode 100644
index 000000000000..0620317a6f0f
--- /dev/null
+++ b/include/asm-sh/posix_types_64.h
@@ -0,0 +1,131 @@
1#ifndef __ASM_SH64_POSIX_TYPES_H
2#define __ASM_SH64_POSIX_TYPES_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/posix_types.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 * This file is generally used by user-level software, so you need to
15 * be a little careful about namespace pollution etc. Also, we cannot
16 * assume GCC is being used.
17 */
18
19typedef unsigned long __kernel_ino_t;
20typedef unsigned short __kernel_mode_t;
21typedef unsigned short __kernel_nlink_t;
22typedef long __kernel_off_t;
23typedef int __kernel_pid_t;
24typedef unsigned short __kernel_ipc_pid_t;
25typedef unsigned short __kernel_uid_t;
26typedef unsigned short __kernel_gid_t;
27typedef long unsigned int __kernel_size_t;
28typedef int __kernel_ssize_t;
29typedef int __kernel_ptrdiff_t;
30typedef long __kernel_time_t;
31typedef long __kernel_suseconds_t;
32typedef long __kernel_clock_t;
33typedef int __kernel_timer_t;
34typedef int __kernel_clockid_t;
35typedef int __kernel_daddr_t;
36typedef char * __kernel_caddr_t;
37typedef unsigned short __kernel_uid16_t;
38typedef unsigned short __kernel_gid16_t;
39typedef unsigned int __kernel_uid32_t;
40typedef unsigned int __kernel_gid32_t;
41
42typedef unsigned short __kernel_old_uid_t;
43typedef unsigned short __kernel_old_gid_t;
44typedef unsigned short __kernel_old_dev_t;
45
46#ifdef __GNUC__
47typedef long long __kernel_loff_t;
48#endif
49
50typedef struct {
51#if defined(__KERNEL__) || defined(__USE_ALL)
52 int val[2];
53#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
54 int __val[2];
55#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
56} __kernel_fsid_t;
57
58#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
59
60#undef __FD_SET
61static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
62{
63 unsigned long __tmp = __fd / __NFDBITS;
64 unsigned long __rem = __fd % __NFDBITS;
65 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
66}
67
68#undef __FD_CLR
69static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
70{
71 unsigned long __tmp = __fd / __NFDBITS;
72 unsigned long __rem = __fd % __NFDBITS;
73 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
74}
75
76
77#undef __FD_ISSET
78static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
79{
80 unsigned long __tmp = __fd / __NFDBITS;
81 unsigned long __rem = __fd % __NFDBITS;
82 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
83}
84
85/*
86 * This will unroll the loop for the normal constant case (8 ints,
87 * for a 256-bit fd_set)
88 */
89#undef __FD_ZERO
90static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
91{
92 unsigned long *__tmp = __p->fds_bits;
93 int __i;
94
95 if (__builtin_constant_p(__FDSET_LONGS)) {
96 switch (__FDSET_LONGS) {
97 case 16:
98 __tmp[ 0] = 0; __tmp[ 1] = 0;
99 __tmp[ 2] = 0; __tmp[ 3] = 0;
100 __tmp[ 4] = 0; __tmp[ 5] = 0;
101 __tmp[ 6] = 0; __tmp[ 7] = 0;
102 __tmp[ 8] = 0; __tmp[ 9] = 0;
103 __tmp[10] = 0; __tmp[11] = 0;
104 __tmp[12] = 0; __tmp[13] = 0;
105 __tmp[14] = 0; __tmp[15] = 0;
106 return;
107
108 case 8:
109 __tmp[ 0] = 0; __tmp[ 1] = 0;
110 __tmp[ 2] = 0; __tmp[ 3] = 0;
111 __tmp[ 4] = 0; __tmp[ 5] = 0;
112 __tmp[ 6] = 0; __tmp[ 7] = 0;
113 return;
114
115 case 4:
116 __tmp[ 0] = 0; __tmp[ 1] = 0;
117 __tmp[ 2] = 0; __tmp[ 3] = 0;
118 return;
119 }
120 }
121 __i = __FDSET_LONGS;
122 while (__i) {
123 __i--;
124 *__tmp = 0;
125 __tmp++;
126 }
127}
128
129#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
130
131#endif /* __ASM_SH64_POSIX_TYPES_H */
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index fda68480f377..c9b14161f73d 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -1,32 +1,10 @@
1/*
2 * include/asm-sh/processor.h
3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2002, 2003 Paul Mundt
6 */
7
8#ifndef __ASM_SH_PROCESSOR_H 1#ifndef __ASM_SH_PROCESSOR_H
9#define __ASM_SH_PROCESSOR_H 2#define __ASM_SH_PROCESSOR_H
10#ifdef __KERNEL__
11 3
12#include <linux/compiler.h>
13#include <asm/page.h>
14#include <asm/types.h>
15#include <asm/cache.h>
16#include <asm/ptrace.h>
17#include <asm/cpu-features.h> 4#include <asm/cpu-features.h>
5#include <asm/fpu.h>
18 6
19/* 7#ifndef __ASSEMBLY__
20 * Default implementation of macro that returns current
21 * instruction pointer ("program counter").
22 */
23#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; })
24
25/* Core Processor Version Register */
26#define CCN_PVR 0xff000030
27#define CCN_CVR 0xff000040
28#define CCN_PRR 0xff000044
29
30/* 8/*
31 * CPU type and hardware bug flags. Kept separately for each CPU. 9 * CPU type and hardware bug flags. Kept separately for each CPU.
32 * 10 *
@@ -39,247 +17,49 @@ enum cpu_type {
39 CPU_SH7619, 17 CPU_SH7619,
40 18
41 /* SH-2A types */ 19 /* SH-2A types */
42 CPU_SH7206, 20 CPU_SH7203, CPU_SH7206, CPU_SH7263,
43 21
44 /* SH-3 types */ 22 /* SH-3 types */
45 CPU_SH7705, CPU_SH7706, CPU_SH7707, 23 CPU_SH7705, CPU_SH7706, CPU_SH7707,
46 CPU_SH7708, CPU_SH7708S, CPU_SH7708R, 24 CPU_SH7708, CPU_SH7708S, CPU_SH7708R,
47 CPU_SH7709, CPU_SH7709A, CPU_SH7710, CPU_SH7712, 25 CPU_SH7709, CPU_SH7709A, CPU_SH7710, CPU_SH7712,
48 CPU_SH7720, CPU_SH7729, 26 CPU_SH7720, CPU_SH7721, CPU_SH7729,
49 27
50 /* SH-4 types */ 28 /* SH-4 types */
51 CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R, 29 CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R,
52 CPU_SH7760, CPU_SH4_202, CPU_SH4_501, 30 CPU_SH7760, CPU_SH4_202, CPU_SH4_501,
53 31
54 /* SH-4A types */ 32 /* SH-4A types */
55 CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SHX3, 33 CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SHX3,
56 34
57 /* SH4AL-DSP types */ 35 /* SH4AL-DSP types */
58 CPU_SH7343, CPU_SH7722, 36 CPU_SH7343, CPU_SH7722,
59 37
38 /* SH-5 types */
39 CPU_SH5_101, CPU_SH5_103,
40
60 /* Unknown subtype */ 41 /* Unknown subtype */
61 CPU_SH_NONE 42 CPU_SH_NONE
62}; 43};
63 44
64struct sh_cpuinfo { 45/* Forward decl */
65 unsigned int type; 46struct sh_cpuinfo;
66 unsigned long loops_per_jiffy;
67 unsigned long asid_cache;
68
69 struct cache_info icache; /* Primary I-cache */
70 struct cache_info dcache; /* Primary D-cache */
71 struct cache_info scache; /* Secondary cache */
72
73 unsigned long flags;
74} __attribute__ ((aligned(L1_CACHE_BYTES)));
75
76extern struct sh_cpuinfo cpu_data[];
77#define boot_cpu_data cpu_data[0]
78#define current_cpu_data cpu_data[smp_processor_id()]
79#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
80
81/*
82 * User space process size: 2GB.
83 *
84 * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
85 */
86#define TASK_SIZE 0x7c000000UL
87
88/* This decides where the kernel will search for a free chunk of vm
89 * space during mmap's.
90 */
91#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
92
93/*
94 * Bit of SR register
95 *
96 * FD-bit:
97 * When it's set, it means the processor doesn't have right to use FPU,
98 * and it results exception when the floating operation is executed.
99 *
100 * IMASK-bit:
101 * Interrupt level mask
102 */
103#define SR_FD 0x00008000
104#define SR_DSP 0x00001000
105#define SR_IMASK 0x000000f0
106
107/*
108 * FPU structure and data
109 */
110
111struct sh_fpu_hard_struct {
112 unsigned long fp_regs[16];
113 unsigned long xfp_regs[16];
114 unsigned long fpscr;
115 unsigned long fpul;
116
117 long status; /* software status information */
118};
119
120/* Dummy fpu emulator */
121struct sh_fpu_soft_struct {
122 unsigned long fp_regs[16];
123 unsigned long xfp_regs[16];
124 unsigned long fpscr;
125 unsigned long fpul;
126
127 unsigned char lookahead;
128 unsigned long entry_pc;
129};
130
131union sh_fpu_union {
132 struct sh_fpu_hard_struct hard;
133 struct sh_fpu_soft_struct soft;
134};
135
136struct thread_struct {
137 /* Saved registers when thread is descheduled */
138 unsigned long sp;
139 unsigned long pc;
140
141 /* Hardware debugging registers */
142 unsigned long ubc_pc;
143
144 /* floating point info */
145 union sh_fpu_union fpu;
146};
147
148typedef struct {
149 unsigned long seg;
150} mm_segment_t;
151
152/* Count of active tasks with UBC settings */
153extern int ubc_usercnt;
154 47
155#define INIT_THREAD { \ 48/* arch/sh/kernel/setup.c */
156 .sp = sizeof(init_stack) + (long) &init_stack, \ 49const char *get_cpu_subtype(struct sh_cpuinfo *c);
157}
158
159/*
160 * Do necessary setup to start up a newly executed thread.
161 */
162#define start_thread(regs, new_pc, new_sp) \
163 set_fs(USER_DS); \
164 regs->pr = 0; \
165 regs->sr = SR_FD; /* User mode. */ \
166 regs->pc = new_pc; \
167 regs->regs[15] = new_sp
168
169/* Forward declaration, a strange C thing */
170struct task_struct;
171struct mm_struct;
172
173/* Free all resources held by a thread. */
174extern void release_thread(struct task_struct *);
175
176/* Prepare to copy thread state - unlazy all lazy status */
177#define prepare_to_copy(tsk) do { } while (0)
178
179/*
180 * create a kernel thread without removing it from tasklists
181 */
182extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
183
184/* Copy and release all segment info associated with a VM */
185#define copy_segments(p, mm) do { } while(0)
186#define release_segments(mm) do { } while(0)
187
188/*
189 * FPU lazy state save handling.
190 */
191
192static __inline__ void disable_fpu(void)
193{
194 unsigned long __dummy;
195
196 /* Set FD flag in SR */
197 __asm__ __volatile__("stc sr, %0\n\t"
198 "or %1, %0\n\t"
199 "ldc %0, sr"
200 : "=&r" (__dummy)
201 : "r" (SR_FD));
202}
203
204static __inline__ void enable_fpu(void)
205{
206 unsigned long __dummy;
207
208 /* Clear out FD flag in SR */
209 __asm__ __volatile__("stc sr, %0\n\t"
210 "and %1, %0\n\t"
211 "ldc %0, sr"
212 : "=&r" (__dummy)
213 : "r" (~SR_FD));
214}
215
216static __inline__ void release_fpu(struct pt_regs *regs)
217{
218 regs->sr |= SR_FD;
219}
220
221static __inline__ void grab_fpu(struct pt_regs *regs)
222{
223 regs->sr &= ~SR_FD;
224}
225
226extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs);
227
228#define unlazy_fpu(tsk, regs) do { \
229 if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \
230 save_fpu(tsk, regs); \
231 } \
232} while (0)
233
234#define clear_fpu(tsk, regs) do { \
235 if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \
236 clear_tsk_thread_flag(tsk, TIF_USEDFPU); \
237 release_fpu(regs); \
238 } \
239} while (0)
240
241/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
242#define FPSCR_INIT 0x00080000
243
244#define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */
245#define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */
246
247/*
248 * Return saved PC of a blocked thread.
249 */
250#define thread_saved_pc(tsk) (tsk->thread.pc)
251
252void show_trace(struct task_struct *tsk, unsigned long *sp,
253 struct pt_regs *regs);
254extern unsigned long get_wchan(struct task_struct *p);
255
256#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
257#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
258
259#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
260#define cpu_relax() barrier()
261
262#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \
263 defined(CONFIG_CPU_SH4)
264#define PREFETCH_STRIDE L1_CACHE_BYTES
265#define ARCH_HAS_PREFETCH
266#define ARCH_HAS_PREFETCHW
267static inline void prefetch(void *x)
268{
269 __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory");
270}
271
272#define prefetchw(x) prefetch(x)
273#endif
274 50
275#ifdef CONFIG_VSYSCALL 51#ifdef CONFIG_VSYSCALL
276extern int vsyscall_init(void); 52int vsyscall_init(void);
277#else 53#else
278#define vsyscall_init() do { } while (0) 54#define vsyscall_init() do { } while (0)
279#endif 55#endif
280 56
281/* arch/sh/kernel/setup.c */ 57#endif /* __ASSEMBLY__ */
282const char *get_cpu_subtype(struct sh_cpuinfo *c); 58
59#ifdef CONFIG_SUPERH32
60# include "processor_32.h"
61#else
62# include "processor_64.h"
63#endif
283 64
284#endif /* __KERNEL__ */
285#endif /* __ASM_SH_PROCESSOR_H */ 65#endif /* __ASM_SH_PROCESSOR_H */
diff --git a/include/asm-sh/processor_32.h b/include/asm-sh/processor_32.h
new file mode 100644
index 000000000000..a7edaa1a870c
--- /dev/null
+++ b/include/asm-sh/processor_32.h
@@ -0,0 +1,215 @@
1/*
2 * include/asm-sh/processor.h
3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2002, 2003 Paul Mundt
6 */
7
8#ifndef __ASM_SH_PROCESSOR_32_H
9#define __ASM_SH_PROCESSOR_32_H
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <asm/page.h>
14#include <asm/types.h>
15#include <asm/cache.h>
16#include <asm/ptrace.h>
17
18/*
19 * Default implementation of macro that returns current
20 * instruction pointer ("program counter").
21 */
22#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; })
23
24/* Core Processor Version Register */
25#define CCN_PVR 0xff000030
26#define CCN_CVR 0xff000040
27#define CCN_PRR 0xff000044
28
29struct sh_cpuinfo {
30 unsigned int type;
31 unsigned long loops_per_jiffy;
32 unsigned long asid_cache;
33
34 struct cache_info icache; /* Primary I-cache */
35 struct cache_info dcache; /* Primary D-cache */
36 struct cache_info scache; /* Secondary cache */
37
38 unsigned long flags;
39} __attribute__ ((aligned(L1_CACHE_BYTES)));
40
41extern struct sh_cpuinfo cpu_data[];
42#define boot_cpu_data cpu_data[0]
43#define current_cpu_data cpu_data[smp_processor_id()]
44#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
45
46/*
47 * User space process size: 2GB.
48 *
49 * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
50 */
51#define TASK_SIZE 0x7c000000UL
52
53/* This decides where the kernel will search for a free chunk of vm
54 * space during mmap's.
55 */
56#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
57
58/*
59 * Bit of SR register
60 *
61 * FD-bit:
62 * When it's set, it means the processor doesn't have right to use FPU,
63 * and it results exception when the floating operation is executed.
64 *
65 * IMASK-bit:
66 * Interrupt level mask
67 */
68#define SR_DSP 0x00001000
69#define SR_IMASK 0x000000f0
70
71/*
72 * FPU structure and data
73 */
74
75struct sh_fpu_hard_struct {
76 unsigned long fp_regs[16];
77 unsigned long xfp_regs[16];
78 unsigned long fpscr;
79 unsigned long fpul;
80
81 long status; /* software status information */
82};
83
84/* Dummy fpu emulator */
85struct sh_fpu_soft_struct {
86 unsigned long fp_regs[16];
87 unsigned long xfp_regs[16];
88 unsigned long fpscr;
89 unsigned long fpul;
90
91 unsigned char lookahead;
92 unsigned long entry_pc;
93};
94
95union sh_fpu_union {
96 struct sh_fpu_hard_struct hard;
97 struct sh_fpu_soft_struct soft;
98};
99
100struct thread_struct {
101 /* Saved registers when thread is descheduled */
102 unsigned long sp;
103 unsigned long pc;
104
105 /* Hardware debugging registers */
106 unsigned long ubc_pc;
107
108 /* floating point info */
109 union sh_fpu_union fpu;
110};
111
112typedef struct {
113 unsigned long seg;
114} mm_segment_t;
115
116/* Count of active tasks with UBC settings */
117extern int ubc_usercnt;
118
119#define INIT_THREAD { \
120 .sp = sizeof(init_stack) + (long) &init_stack, \
121}
122
123/*
124 * Do necessary setup to start up a newly executed thread.
125 */
126#define start_thread(regs, new_pc, new_sp) \
127 set_fs(USER_DS); \
128 regs->pr = 0; \
129 regs->sr = SR_FD; /* User mode. */ \
130 regs->pc = new_pc; \
131 regs->regs[15] = new_sp
132
133/* Forward declaration, a strange C thing */
134struct task_struct;
135struct mm_struct;
136
137/* Free all resources held by a thread. */
138extern void release_thread(struct task_struct *);
139
140/* Prepare to copy thread state - unlazy all lazy status */
141#define prepare_to_copy(tsk) do { } while (0)
142
143/*
144 * create a kernel thread without removing it from tasklists
145 */
146extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
147
148/* Copy and release all segment info associated with a VM */
149#define copy_segments(p, mm) do { } while(0)
150#define release_segments(mm) do { } while(0)
151
152/*
153 * FPU lazy state save handling.
154 */
155
156static __inline__ void disable_fpu(void)
157{
158 unsigned long __dummy;
159
160 /* Set FD flag in SR */
161 __asm__ __volatile__("stc sr, %0\n\t"
162 "or %1, %0\n\t"
163 "ldc %0, sr"
164 : "=&r" (__dummy)
165 : "r" (SR_FD));
166}
167
168static __inline__ void enable_fpu(void)
169{
170 unsigned long __dummy;
171
172 /* Clear out FD flag in SR */
173 __asm__ __volatile__("stc sr, %0\n\t"
174 "and %1, %0\n\t"
175 "ldc %0, sr"
176 : "=&r" (__dummy)
177 : "r" (~SR_FD));
178}
179
180/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
181#define FPSCR_INIT 0x00080000
182
183#define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */
184#define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */
185
186/*
187 * Return saved PC of a blocked thread.
188 */
189#define thread_saved_pc(tsk) (tsk->thread.pc)
190
191void show_trace(struct task_struct *tsk, unsigned long *sp,
192 struct pt_regs *regs);
193extern unsigned long get_wchan(struct task_struct *p);
194
195#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
196#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
197
198#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
199#define cpu_relax() barrier()
200
201#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \
202 defined(CONFIG_CPU_SH4)
203#define PREFETCH_STRIDE L1_CACHE_BYTES
204#define ARCH_HAS_PREFETCH
205#define ARCH_HAS_PREFETCHW
206static inline void prefetch(void *x)
207{
208 __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory");
209}
210
211#define prefetchw(x) prefetch(x)
212#endif
213
214#endif /* __KERNEL__ */
215#endif /* __ASM_SH_PROCESSOR_32_H */
diff --git a/include/asm-sh/processor_64.h b/include/asm-sh/processor_64.h
new file mode 100644
index 000000000000..99c22b14a85b
--- /dev/null
+++ b/include/asm-sh/processor_64.h
@@ -0,0 +1,275 @@
1#ifndef __ASM_SH_PROCESSOR_64_H
2#define __ASM_SH_PROCESSOR_64_H
3
4/*
5 * include/asm-sh/processor_64.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 * Copyright (C) 2003 Paul Mundt
9 * Copyright (C) 2004 Richard Curnow
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#ifndef __ASSEMBLY__
16
17#include <linux/compiler.h>
18#include <asm/page.h>
19#include <asm/types.h>
20#include <asm/cache.h>
21#include <asm/ptrace.h>
22#include <asm/cpu/registers.h>
23
24/*
25 * Default implementation of macro that returns current
26 * instruction pointer ("program counter").
27 */
28#define current_text_addr() ({ \
29void *pc; \
30unsigned long long __dummy = 0; \
31__asm__("gettr tr0, %1\n\t" \
32 "pta 4, tr0\n\t" \
33 "gettr tr0, %0\n\t" \
34 "ptabs %1, tr0\n\t" \
35 :"=r" (pc), "=r" (__dummy) \
36 : "1" (__dummy)); \
37pc; })
38
39/*
40 * TLB information structure
41 *
42 * Defined for both I and D tlb, per-processor.
43 */
44struct tlb_info {
45 unsigned long long next;
46 unsigned long long first;
47 unsigned long long last;
48
49 unsigned int entries;
50 unsigned int step;
51
52 unsigned long flags;
53};
54
55struct sh_cpuinfo {
56 enum cpu_type type;
57 unsigned long loops_per_jiffy;
58 unsigned long asid_cache;
59
60 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
61
62 /* Cache info */
63 struct cache_info icache;
64 struct cache_info dcache;
65 struct cache_info scache;
66
67 /* TLB info */
68 struct tlb_info itlb;
69 struct tlb_info dtlb;
70
71 unsigned long flags;
72};
73
74extern struct sh_cpuinfo cpu_data[];
75#define boot_cpu_data cpu_data[0]
76#define current_cpu_data cpu_data[smp_processor_id()]
77#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
78
79#endif
80
81/*
82 * User space process size: 2GB - 4k.
83 */
84#define TASK_SIZE 0x7ffff000UL
85
86/* This decides where the kernel will search for a free chunk of vm
87 * space during mmap's.
88 */
89#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
90
91/*
92 * Bit of SR register
93 *
94 * FD-bit:
95 * When it's set, it means the processor doesn't have right to use FPU,
96 * and it results exception when the floating operation is executed.
97 *
98 * IMASK-bit:
99 * Interrupt level mask
100 *
101 * STEP-bit:
102 * Single step bit
103 *
104 */
105#if defined(CONFIG_SH64_SR_WATCH)
106#define SR_MMU 0x84000000
107#else
108#define SR_MMU 0x80000000
109#endif
110
111#define SR_IMASK 0x000000f0
112#define SR_SSTEP 0x08000000
113
114#ifndef __ASSEMBLY__
115
116/*
117 * FPU structure and data : require 8-byte alignment as we need to access it
118 with fld.p, fst.p
119 */
120
121struct sh_fpu_hard_struct {
122 unsigned long fp_regs[64];
123 unsigned int fpscr;
124 /* long status; * software status information */
125};
126
127#if 0
128/* Dummy fpu emulator */
129struct sh_fpu_soft_struct {
130 unsigned long long fp_regs[32];
131 unsigned int fpscr;
132 unsigned char lookahead;
133 unsigned long entry_pc;
134};
135#endif
136
137union sh_fpu_union {
138 struct sh_fpu_hard_struct hard;
139 /* 'hard' itself only produces 32 bit alignment, yet we need
140 to access it using 64 bit load/store as well. */
141 unsigned long long alignment_dummy;
142};
143
144struct thread_struct {
145 unsigned long sp;
146 unsigned long pc;
147 /* This stores the address of the pt_regs built during a context
148 switch, or of the register save area built for a kernel mode
149 exception. It is used for backtracing the stack of a sleeping task
150 or one that traps in kernel mode. */
151 struct pt_regs *kregs;
152 /* This stores the address of the pt_regs constructed on entry from
153 user mode. It is a fixed value over the lifetime of a process, or
154 NULL for a kernel thread. */
155 struct pt_regs *uregs;
156
157 unsigned long trap_no, error_code;
158 unsigned long address;
159 /* Hardware debugging registers may come here */
160
161 /* floating point info */
162 union sh_fpu_union fpu;
163};
164
165typedef struct {
166 unsigned long seg;
167} mm_segment_t;
168
169#define INIT_MMAP \
170{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
171
172extern struct pt_regs fake_swapper_regs;
173
174#define INIT_THREAD { \
175 .sp = sizeof(init_stack) + \
176 (long) &init_stack, \
177 .pc = 0, \
178 .kregs = &fake_swapper_regs, \
179 .uregs = NULL, \
180 .trap_no = 0, \
181 .error_code = 0, \
182 .address = 0, \
183 .fpu = { { { 0, } }, } \
184}
185
186/*
187 * Do necessary setup to start up a newly executed thread.
188 */
189#define SR_USER (SR_MMU | SR_FD)
190
191#define start_thread(regs, new_pc, new_sp) \
192 set_fs(USER_DS); \
193 regs->sr = SR_USER; /* User mode. */ \
194 regs->pc = new_pc - 4; /* Compensate syscall exit */ \
195 regs->pc |= 1; /* Set SHmedia ! */ \
196 regs->regs[18] = 0; \
197 regs->regs[15] = new_sp
198
199/* Forward declaration, a strange C thing */
200struct task_struct;
201struct mm_struct;
202
203/* Free all resources held by a thread. */
204extern void release_thread(struct task_struct *);
205/*
206 * create a kernel thread without removing it from tasklists
207 */
208extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
209
210
211/* Copy and release all segment info associated with a VM */
212#define copy_segments(p, mm) do { } while (0)
213#define release_segments(mm) do { } while (0)
214#define forget_segments() do { } while (0)
215#define prepare_to_copy(tsk) do { } while (0)
216/*
217 * FPU lazy state save handling.
218 */
219
220static inline void disable_fpu(void)
221{
222 unsigned long long __dummy;
223
224 /* Set FD flag in SR */
225 __asm__ __volatile__("getcon " __SR ", %0\n\t"
226 "or %0, %1, %0\n\t"
227 "putcon %0, " __SR "\n\t"
228 : "=&r" (__dummy)
229 : "r" (SR_FD));
230}
231
232static inline void enable_fpu(void)
233{
234 unsigned long long __dummy;
235
236 /* Clear out FD flag in SR */
237 __asm__ __volatile__("getcon " __SR ", %0\n\t"
238 "and %0, %1, %0\n\t"
239 "putcon %0, " __SR "\n\t"
240 : "=&r" (__dummy)
241 : "r" (~SR_FD));
242}
243
244/* Round to nearest, no exceptions on inexact, overflow, underflow,
245 zero-divide, invalid. Configure option for whether to flush denorms to
246 zero, or except if a denorm is encountered. */
247#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
248#define FPSCR_INIT 0x00040000
249#else
250#define FPSCR_INIT 0x00000000
251#endif
252
253#ifdef CONFIG_SH_FPU
254/* Initialise the FP state of a task */
255void fpinit(struct sh_fpu_hard_struct *fpregs);
256#else
257#define fpinit(fpregs) do { } while (0)
258#endif
259
260extern struct task_struct *last_task_used_math;
261
262/*
263 * Return saved PC of a blocked thread.
264 */
265#define thread_saved_pc(tsk) (tsk->thread.pc)
266
267extern unsigned long get_wchan(struct task_struct *p);
268
269#define KSTK_EIP(tsk) ((tsk)->thread.pc)
270#define KSTK_ESP(tsk) ((tsk)->thread.sp)
271
272#define cpu_relax() barrier()
273
274#endif /* __ASSEMBLY__ */
275#endif /* __ASM_SH_PROCESSOR_64_H */
diff --git a/include/asm-sh/ptrace.h b/include/asm-sh/ptrace.h
index b9789c8b4d15..8d6c92b3e770 100644
--- a/include/asm-sh/ptrace.h
+++ b/include/asm-sh/ptrace.h
@@ -5,7 +5,16 @@
5 * Copyright (C) 1999, 2000 Niibe Yutaka 5 * Copyright (C) 1999, 2000 Niibe Yutaka
6 * 6 *
7 */ 7 */
8 8#if defined(__SH5__) || defined(CONFIG_SUPERH64)
9struct pt_regs {
10 unsigned long long pc;
11 unsigned long long sr;
12 unsigned long long syscall_nr;
13 unsigned long long regs[63];
14 unsigned long long tregs[8];
15 unsigned long long pad[2];
16};
17#else
9/* 18/*
10 * GCC defines register number like this: 19 * GCC defines register number like this:
11 * ----------------------------- 20 * -----------------------------
@@ -28,7 +37,7 @@
28 37
29#define REG_PR 17 38#define REG_PR 17
30#define REG_SR 18 39#define REG_SR 18
31#define REG_GBR 19 40#define REG_GBR 19
32#define REG_MACH 20 41#define REG_MACH 20
33#define REG_MACL 21 42#define REG_MACL 21
34 43
@@ -80,10 +89,14 @@ struct pt_dspregs {
80 89
81#define PTRACE_GETDSPREGS 55 90#define PTRACE_GETDSPREGS 55
82#define PTRACE_SETDSPREGS 56 91#define PTRACE_SETDSPREGS 56
92#endif
83 93
84#ifdef __KERNEL__ 94#ifdef __KERNEL__
85#define user_mode(regs) (((regs)->sr & 0x40000000)==0) 95#include <asm/addrspace.h>
86#define instruction_pointer(regs) ((regs)->pc) 96
97#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
98#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
99
87extern void show_regs(struct pt_regs *); 100extern void show_regs(struct pt_regs *);
88 101
89#ifdef CONFIG_SH_DSP 102#ifdef CONFIG_SH_DSP
@@ -100,10 +113,13 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
100{ 113{
101 unsigned long pc = instruction_pointer(regs); 114 unsigned long pc = instruction_pointer(regs);
102 115
103 if (pc >= 0xa0000000UL && pc < 0xc0000000UL) 116#ifdef P2SEG
117 if (pc >= P2SEG && pc < P3SEG)
104 pc -= 0x20000000; 118 pc -= 0x20000000;
119#endif
120
105 return pc; 121 return pc;
106} 122}
107#endif 123#endif /* __KERNEL__ */
108 124
109#endif /* __ASM_SH_PTRACE_H */ 125#endif /* __ASM_SH_PTRACE_H */
diff --git a/include/asm-sh/r7780rp.h b/include/asm-sh/r7780rp.h
index de37f933aa42..bdecea0840a0 100644
--- a/include/asm-sh/r7780rp.h
+++ b/include/asm-sh/r7780rp.h
@@ -121,21 +121,6 @@
121 121
122#define IRLCNTR1 (PA_BCR + 0) /* Interrupt Control Register1 */ 122#define IRLCNTR1 (PA_BCR + 0) /* Interrupt Control Register1 */
123 123
124#define IRQ_PCISLOT1 0 /* PCI Slot #1 IRQ */
125#define IRQ_PCISLOT2 1 /* PCI Slot #2 IRQ */
126#define IRQ_PCISLOT3 2 /* PCI Slot #3 IRQ */
127#define IRQ_PCISLOT4 3 /* PCI Slot #4 IRQ */
128#define IRQ_CFINST 5 /* CF Card Insert IRQ */
129#define IRQ_M66596 6 /* M66596 IRQ */
130#define IRQ_SDCARD 7 /* SD Card IRQ */
131#define IRQ_TUCHPANEL 8 /* Touch Panel IRQ */
132#define IRQ_SCI 9 /* SCI IRQ */
133#define IRQ_2SERIAL 10 /* Serial IRQ */
134#define IRQ_EXTENTION 11 /* EXTn IRQ */
135#define IRQ_ONETH 12 /* On board Ethernet IRQ */
136#define IRQ_PSW 13 /* Push Switch IRQ */
137#define IRQ_ZIGBEE 14 /* Ziggbee IO IRQ */
138
139#define IVDR_CK_ON 8 /* iVDR Clock ON */ 124#define IVDR_CK_ON 8 /* iVDR Clock ON */
140 125
141#elif defined(CONFIG_SH_R7785RP) 126#elif defined(CONFIG_SH_R7785RP)
@@ -192,13 +177,19 @@
192 177
193#define IRQ_AX88796 (HL_FPGA_IRQ_BASE + 0) 178#define IRQ_AX88796 (HL_FPGA_IRQ_BASE + 0)
194#define IRQ_CF (HL_FPGA_IRQ_BASE + 1) 179#define IRQ_CF (HL_FPGA_IRQ_BASE + 1)
195#ifndef IRQ_PSW
196#define IRQ_PSW (HL_FPGA_IRQ_BASE + 2) 180#define IRQ_PSW (HL_FPGA_IRQ_BASE + 2)
197#endif 181#define IRQ_EXT0 (HL_FPGA_IRQ_BASE + 3)
198#define IRQ_EXT1 (HL_FPGA_IRQ_BASE + 3) 182#define IRQ_EXT1 (HL_FPGA_IRQ_BASE + 4)
199#define IRQ_EXT4 (HL_FPGA_IRQ_BASE + 4) 183#define IRQ_EXT2 (HL_FPGA_IRQ_BASE + 5)
200 184#define IRQ_EXT3 (HL_FPGA_IRQ_BASE + 6)
201void make_r7780rp_irq(unsigned int irq); 185#define IRQ_EXT4 (HL_FPGA_IRQ_BASE + 7)
186#define IRQ_EXT5 (HL_FPGA_IRQ_BASE + 8)
187#define IRQ_EXT6 (HL_FPGA_IRQ_BASE + 9)
188#define IRQ_EXT7 (HL_FPGA_IRQ_BASE + 10)
189#define IRQ_SMBUS (HL_FPGA_IRQ_BASE + 11)
190#define IRQ_TP (HL_FPGA_IRQ_BASE + 12)
191#define IRQ_RTC (HL_FPGA_IRQ_BASE + 13)
192#define IRQ_TH_ALERT (HL_FPGA_IRQ_BASE + 14)
202 193
203unsigned char *highlander_init_irq_r7780mp(void); 194unsigned char *highlander_init_irq_r7780mp(void);
204unsigned char *highlander_init_irq_r7780rp(void); 195unsigned char *highlander_init_irq_r7780rp(void);
diff --git a/include/asm-sh/rtc.h b/include/asm-sh/rtc.h
index 858da99d37e0..ec45ba8e11d9 100644
--- a/include/asm-sh/rtc.h
+++ b/include/asm-sh/rtc.h
@@ -11,4 +11,6 @@ struct sh_rtc_platform_info {
11 unsigned long capabilities; 11 unsigned long capabilities;
12}; 12};
13 13
14#include <asm/cpu/rtc.h>
15
14#endif /* _ASM_RTC_H */ 16#endif /* _ASM_RTC_H */
diff --git a/include/asm-sh/scatterlist.h b/include/asm-sh/scatterlist.h
index a7d0d1856a99..2084d0373693 100644
--- a/include/asm-sh/scatterlist.h
+++ b/include/asm-sh/scatterlist.h
@@ -13,7 +13,7 @@ struct scatterlist {
13 unsigned int length; 13 unsigned int length;
14}; 14};
15 15
16#define ISA_DMA_THRESHOLD (0x1fffffff) 16#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK
17 17
18/* These macros should be used after a pci_map_sg call has been done 18/* These macros should be used after a pci_map_sg call has been done
19 * to get bus addresses of each of the SG entries and their lengths. 19 * to get bus addresses of each of the SG entries and their lengths.
diff --git a/include/asm-sh/sdk7780.h b/include/asm-sh/sdk7780.h
new file mode 100644
index 000000000000..697dc865f21b
--- /dev/null
+++ b/include/asm-sh/sdk7780.h
@@ -0,0 +1,81 @@
1#ifndef __ASM_SH_RENESAS_SDK7780_H
2#define __ASM_SH_RENESAS_SDK7780_H
3
4/*
5 * linux/include/asm-sh/sdk7780.h
6 *
7 * Renesas Solutions SH7780 SDK Support
8 * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk>
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <asm/addrspace.h>
15
16/* Box specific addresses. */
17#define SE_AREA0_WIDTH 4 /* Area0: 32bit */
18#define PA_ROM 0xa0000000 /* EPROM */
19#define PA_ROM_SIZE 0x00400000 /* EPROM size 4M byte */
20#define PA_FROM 0xa0800000 /* Flash-ROM */
21#define PA_FROM_SIZE 0x00400000 /* Flash-ROM size 4M byte */
22#define PA_EXT1 0xa4000000
23#define PA_EXT1_SIZE 0x04000000
24#define PA_SDRAM 0xa8000000 /* DDR-SDRAM(Area2/3) 128MB */
25#define PA_SDRAM_SIZE 0x08000000
26
27#define PA_EXT4 0xb0000000
28#define PA_EXT4_SIZE 0x04000000
29#define PA_EXT_USER PA_EXT4 /* User Expansion Space */
30
31#define PA_PERIPHERAL PA_AREA5_IO
32
33/* SRAM/Reserved */
34#define PA_RESERVED (PA_PERIPHERAL + 0)
35/* FPGA base address */
36#define PA_FPGA (PA_PERIPHERAL + 0x01000000)
37/* SMC LAN91C111 */
38#define PA_LAN (PA_PERIPHERAL + 0x01800000)
39
40
41#define FPGA_SRSTR (PA_FPGA + 0x000) /* System reset */
42#define FPGA_IRQ0SR (PA_FPGA + 0x010) /* IRQ0 status */
43#define FPGA_IRQ0MR (PA_FPGA + 0x020) /* IRQ0 mask */
44#define FPGA_BDMR (PA_FPGA + 0x030) /* Board operating mode */
45#define FPGA_INTT0PRTR (PA_FPGA + 0x040) /* Interrupt test mode0 port */
46#define FPGA_INTT0SELR (PA_FPGA + 0x050) /* Int. test mode0 select */
47#define FPGA_INTT1POLR (PA_FPGA + 0x060) /* Int. test mode0 polarity */
48#define FPGA_NMIR (PA_FPGA + 0x070) /* NMI source */
49#define FPGA_NMIMR (PA_FPGA + 0x080) /* NMI mask */
50#define FPGA_IRQR (PA_FPGA + 0x090) /* IRQX source */
51#define FPGA_IRQMR (PA_FPGA + 0x0A0) /* IRQX mask */
52#define FPGA_SLEDR (PA_FPGA + 0x0B0) /* LED control */
53#define PA_LED FPGA_SLEDR
54#define FPGA_MAPSWR (PA_FPGA + 0x0C0) /* Map switch */
55#define FPGA_FPVERR (PA_FPGA + 0x0D0) /* FPGA version */
56#define FPGA_FPDATER (PA_FPGA + 0x0E0) /* FPGA date */
57#define FPGA_RSE (PA_FPGA + 0x100) /* Reset source */
58#define FPGA_EASR (PA_FPGA + 0x110) /* External area select */
59#define FPGA_SPER (PA_FPGA + 0x120) /* Serial port enable */
60#define FPGA_IMSR (PA_FPGA + 0x130) /* Interrupt mode select */
61#define FPGA_PCIMR (PA_FPGA + 0x140) /* PCI Mode */
62#define FPGA_DIPSWMR (PA_FPGA + 0x150) /* DIPSW monitor */
63#define FPGA_FPODR (PA_FPGA + 0x160) /* Output port data */
64#define FPGA_ATAESR (PA_FPGA + 0x170) /* ATA extended bus status */
65#define FPGA_IRQPOLR (PA_FPGA + 0x180) /* IRQx polarity */
66
67
68#define SDK7780_NR_IRL 15
69/* IDE/ATA interrupt */
70#define IRQ_CFCARD 14
71/* SMC interrupt */
72#define IRQ_ETHERNET 6
73
74
75/* arch/sh/boards/renesas/sdk7780/irq.c */
76void init_sdk7780_IRQ(void);
77
78#define __IO_PREFIX sdk7780
79#include <asm/io_generic.h>
80
81#endif /* __ASM_SH_RENESAS_SDK7780_H */
diff --git a/include/asm-sh/sections.h b/include/asm-sh/sections.h
index bd9cbc967c2a..8f8f4ad400df 100644
--- a/include/asm-sh/sections.h
+++ b/include/asm-sh/sections.h
@@ -4,6 +4,7 @@
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern long __machvec_start, __machvec_end; 6extern long __machvec_start, __machvec_end;
7extern char __uncached_start, __uncached_end;
7extern char _ebss[]; 8extern char _ebss[];
8 9
9#endif /* __ASM_SH_SECTIONS_H */ 10#endif /* __ASM_SH_SECTIONS_H */
diff --git a/include/asm-sh/sigcontext.h b/include/asm-sh/sigcontext.h
index eb8effba2e80..8ce1435bc0bf 100644
--- a/include/asm-sh/sigcontext.h
+++ b/include/asm-sh/sigcontext.h
@@ -4,6 +4,18 @@
4struct sigcontext { 4struct sigcontext {
5 unsigned long oldmask; 5 unsigned long oldmask;
6 6
7#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
8 /* CPU registers */
9 unsigned long long sc_regs[63];
10 unsigned long long sc_tregs[8];
11 unsigned long long sc_pc;
12 unsigned long long sc_sr;
13
14 /* FPU registers */
15 unsigned long long sc_fpregs[32];
16 unsigned int sc_fpscr;
17 unsigned int sc_fpvalid;
18#else
7 /* CPU registers */ 19 /* CPU registers */
8 unsigned long sc_regs[16]; 20 unsigned long sc_regs[16];
9 unsigned long sc_pc; 21 unsigned long sc_pc;
@@ -13,7 +25,8 @@ struct sigcontext {
13 unsigned long sc_mach; 25 unsigned long sc_mach;
14 unsigned long sc_macl; 26 unsigned long sc_macl;
15 27
16#if defined(__SH4__) || defined(CONFIG_CPU_SH4) 28#if defined(__SH4__) || defined(CONFIG_CPU_SH4) || \
29 defined(__SH2A__) || defined(CONFIG_CPU_SH2A)
17 /* FPU registers */ 30 /* FPU registers */
18 unsigned long sc_fpregs[16]; 31 unsigned long sc_fpregs[16];
19 unsigned long sc_xfpregs[16]; 32 unsigned long sc_xfpregs[16];
@@ -21,6 +34,7 @@ struct sigcontext {
21 unsigned int sc_fpul; 34 unsigned int sc_fpul;
22 unsigned int sc_ownedfp; 35 unsigned int sc_ownedfp;
23#endif 36#endif
37#endif
24}; 38};
25 39
26#endif /* __ASM_SH_SIGCONTEXT_H */ 40#endif /* __ASM_SH_SIGCONTEXT_H */
diff --git a/include/asm-sh/spi.h b/include/asm-sh/spi.h
new file mode 100644
index 000000000000..e96f5b0953c8
--- /dev/null
+++ b/include/asm-sh/spi.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_SPI_H__
2#define __ASM_SPI_H__
3
4struct sh_spi_info;
5
6struct sh_spi_info {
7 int bus_num;
8 int num_chipselect;
9
10 void (*chip_select)(struct sh_spi_info *spi, int cs, int state);
11};
12
13#endif /* __ASM_SPI_H__ */
diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h
index 6d6ad26e3a2a..e1810cc6e3da 100644
--- a/include/asm-sh/stat.h
+++ b/include/asm-sh/stat.h
@@ -15,6 +15,66 @@ struct __old_kernel_stat {
15 unsigned long st_ctime; 15 unsigned long st_ctime;
16}; 16};
17 17
18#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
19struct stat {
20 unsigned short st_dev;
21 unsigned short __pad1;
22 unsigned long st_ino;
23 unsigned short st_mode;
24 unsigned short st_nlink;
25 unsigned short st_uid;
26 unsigned short st_gid;
27 unsigned short st_rdev;
28 unsigned short __pad2;
29 unsigned long st_size;
30 unsigned long st_blksize;
31 unsigned long st_blocks;
32 unsigned long st_atime;
33 unsigned long st_atime_nsec;
34 unsigned long st_mtime;
35 unsigned long st_mtime_nsec;
36 unsigned long st_ctime;
37 unsigned long st_ctime_nsec;
38 unsigned long __unused4;
39 unsigned long __unused5;
40};
41
42/* This matches struct stat64 in glibc2.1, hence the absolutely
43 * insane amounts of padding around dev_t's.
44 */
45struct stat64 {
46 unsigned short st_dev;
47 unsigned char __pad0[10];
48
49 unsigned long st_ino;
50 unsigned int st_mode;
51 unsigned int st_nlink;
52
53 unsigned long st_uid;
54 unsigned long st_gid;
55
56 unsigned short st_rdev;
57 unsigned char __pad3[10];
58
59 long long st_size;
60 unsigned long st_blksize;
61
62 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
63 unsigned long __pad4; /* future possible st_blocks high bits */
64
65 unsigned long st_atime;
66 unsigned long st_atime_nsec;
67
68 unsigned long st_mtime;
69 unsigned long st_mtime_nsec;
70
71 unsigned long st_ctime;
72 unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
73
74 unsigned long __unused1;
75 unsigned long __unused2;
76};
77#else
18struct stat { 78struct stat {
19 unsigned long st_dev; 79 unsigned long st_dev;
20 unsigned long st_ino; 80 unsigned long st_ino;
@@ -67,11 +127,12 @@ struct stat64 {
67 unsigned long st_mtime_nsec; 127 unsigned long st_mtime_nsec;
68 128
69 unsigned long st_ctime; 129 unsigned long st_ctime;
70 unsigned long st_ctime_nsec; 130 unsigned long st_ctime_nsec;
71 131
72 unsigned long long st_ino; 132 unsigned long long st_ino;
73}; 133};
74 134
75#define STAT_HAVE_NSEC 1 135#define STAT_HAVE_NSEC 1
136#endif
76 137
77#endif /* __ASM_SH_STAT_H */ 138#endif /* __ASM_SH_STAT_H */
diff --git a/include/asm-sh/string.h b/include/asm-sh/string.h
index 55f8db6bc1d7..8c1ea21dc0ae 100644
--- a/include/asm-sh/string.h
+++ b/include/asm-sh/string.h
@@ -1,131 +1,5 @@
1#ifndef __ASM_SH_STRING_H 1#ifdef CONFIG_SUPERH32
2#define __ASM_SH_STRING_H 2# include "string_32.h"
3 3#else
4#ifdef __KERNEL__ 4# include "string_64.h"
5 5#endif
6/*
7 * Copyright (C) 1999 Niibe Yutaka
8 * But consider these trivial functions to be public domain.
9 */
10
11#define __HAVE_ARCH_STRCPY
12static inline char *strcpy(char *__dest, const char *__src)
13{
14 register char *__xdest = __dest;
15 unsigned long __dummy;
16
17 __asm__ __volatile__("1:\n\t"
18 "mov.b @%1+, %2\n\t"
19 "mov.b %2, @%0\n\t"
20 "cmp/eq #0, %2\n\t"
21 "bf/s 1b\n\t"
22 " add #1, %0\n\t"
23 : "=r" (__dest), "=r" (__src), "=&z" (__dummy)
24 : "0" (__dest), "1" (__src)
25 : "memory", "t");
26
27 return __xdest;
28}
29
30#define __HAVE_ARCH_STRNCPY
31static inline char *strncpy(char *__dest, const char *__src, size_t __n)
32{
33 register char *__xdest = __dest;
34 unsigned long __dummy;
35
36 if (__n == 0)
37 return __xdest;
38
39 __asm__ __volatile__(
40 "1:\n"
41 "mov.b @%1+, %2\n\t"
42 "mov.b %2, @%0\n\t"
43 "cmp/eq #0, %2\n\t"
44 "bt/s 2f\n\t"
45 " cmp/eq %5,%1\n\t"
46 "bf/s 1b\n\t"
47 " add #1, %0\n"
48 "2:"
49 : "=r" (__dest), "=r" (__src), "=&z" (__dummy)
50 : "0" (__dest), "1" (__src), "r" (__src+__n)
51 : "memory", "t");
52
53 return __xdest;
54}
55
56#define __HAVE_ARCH_STRCMP
57static inline int strcmp(const char *__cs, const char *__ct)
58{
59 register int __res;
60 unsigned long __dummy;
61
62 __asm__ __volatile__(
63 "mov.b @%1+, %3\n"
64 "1:\n\t"
65 "mov.b @%0+, %2\n\t"
66 "cmp/eq #0, %3\n\t"
67 "bt 2f\n\t"
68 "cmp/eq %2, %3\n\t"
69 "bt/s 1b\n\t"
70 " mov.b @%1+, %3\n\t"
71 "add #-2, %1\n\t"
72 "mov.b @%1, %3\n\t"
73 "sub %3, %2\n"
74 "2:"
75 : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy)
76 : "0" (__cs), "1" (__ct)
77 : "t");
78
79 return __res;
80}
81
82#define __HAVE_ARCH_STRNCMP
83static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
84{
85 register int __res;
86 unsigned long __dummy;
87
88 if (__n == 0)
89 return 0;
90
91 __asm__ __volatile__(
92 "mov.b @%1+, %3\n"
93 "1:\n\t"
94 "mov.b @%0+, %2\n\t"
95 "cmp/eq %6, %0\n\t"
96 "bt/s 2f\n\t"
97 " cmp/eq #0, %3\n\t"
98 "bt/s 3f\n\t"
99 " cmp/eq %3, %2\n\t"
100 "bt/s 1b\n\t"
101 " mov.b @%1+, %3\n\t"
102 "add #-2, %1\n\t"
103 "mov.b @%1, %3\n"
104 "2:\n\t"
105 "sub %3, %2\n"
106 "3:"
107 :"=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy)
108 : "0" (__cs), "1" (__ct), "r" (__cs+__n)
109 : "t");
110
111 return __res;
112}
113
114#define __HAVE_ARCH_MEMSET
115extern void *memset(void *__s, int __c, size_t __count);
116
117#define __HAVE_ARCH_MEMCPY
118extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
119
120#define __HAVE_ARCH_MEMMOVE
121extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
122
123#define __HAVE_ARCH_MEMCHR
124extern void *memchr(const void *__s, int __c, size_t __n);
125
126#define __HAVE_ARCH_STRLEN
127extern size_t strlen(const char *);
128
129#endif /* __KERNEL__ */
130
131#endif /* __ASM_SH_STRING_H */
diff --git a/include/asm-sh/string_32.h b/include/asm-sh/string_32.h
new file mode 100644
index 000000000000..55f8db6bc1d7
--- /dev/null
+++ b/include/asm-sh/string_32.h
@@ -0,0 +1,131 @@
1#ifndef __ASM_SH_STRING_H
2#define __ASM_SH_STRING_H
3
4#ifdef __KERNEL__
5
6/*
7 * Copyright (C) 1999 Niibe Yutaka
8 * But consider these trivial functions to be public domain.
9 */
10
11#define __HAVE_ARCH_STRCPY
12static inline char *strcpy(char *__dest, const char *__src)
13{
14 register char *__xdest = __dest;
15 unsigned long __dummy;
16
17 __asm__ __volatile__("1:\n\t"
18 "mov.b @%1+, %2\n\t"
19 "mov.b %2, @%0\n\t"
20 "cmp/eq #0, %2\n\t"
21 "bf/s 1b\n\t"
22 " add #1, %0\n\t"
23 : "=r" (__dest), "=r" (__src), "=&z" (__dummy)
24 : "0" (__dest), "1" (__src)
25 : "memory", "t");
26
27 return __xdest;
28}
29
30#define __HAVE_ARCH_STRNCPY
31static inline char *strncpy(char *__dest, const char *__src, size_t __n)
32{
33 register char *__xdest = __dest;
34 unsigned long __dummy;
35
36 if (__n == 0)
37 return __xdest;
38
39 __asm__ __volatile__(
40 "1:\n"
41 "mov.b @%1+, %2\n\t"
42 "mov.b %2, @%0\n\t"
43 "cmp/eq #0, %2\n\t"
44 "bt/s 2f\n\t"
45 " cmp/eq %5,%1\n\t"
46 "bf/s 1b\n\t"
47 " add #1, %0\n"
48 "2:"
49 : "=r" (__dest), "=r" (__src), "=&z" (__dummy)
50 : "0" (__dest), "1" (__src), "r" (__src+__n)
51 : "memory", "t");
52
53 return __xdest;
54}
55
56#define __HAVE_ARCH_STRCMP
57static inline int strcmp(const char *__cs, const char *__ct)
58{
59 register int __res;
60 unsigned long __dummy;
61
62 __asm__ __volatile__(
63 "mov.b @%1+, %3\n"
64 "1:\n\t"
65 "mov.b @%0+, %2\n\t"
66 "cmp/eq #0, %3\n\t"
67 "bt 2f\n\t"
68 "cmp/eq %2, %3\n\t"
69 "bt/s 1b\n\t"
70 " mov.b @%1+, %3\n\t"
71 "add #-2, %1\n\t"
72 "mov.b @%1, %3\n\t"
73 "sub %3, %2\n"
74 "2:"
75 : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy)
76 : "0" (__cs), "1" (__ct)
77 : "t");
78
79 return __res;
80}
81
82#define __HAVE_ARCH_STRNCMP
83static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
84{
85 register int __res;
86 unsigned long __dummy;
87
88 if (__n == 0)
89 return 0;
90
91 __asm__ __volatile__(
92 "mov.b @%1+, %3\n"
93 "1:\n\t"
94 "mov.b @%0+, %2\n\t"
95 "cmp/eq %6, %0\n\t"
96 "bt/s 2f\n\t"
97 " cmp/eq #0, %3\n\t"
98 "bt/s 3f\n\t"
99 " cmp/eq %3, %2\n\t"
100 "bt/s 1b\n\t"
101 " mov.b @%1+, %3\n\t"
102 "add #-2, %1\n\t"
103 "mov.b @%1, %3\n"
104 "2:\n\t"
105 "sub %3, %2\n"
106 "3:"
107 :"=r" (__cs), "=r" (__ct), "=&r" (__res), "=&z" (__dummy)
108 : "0" (__cs), "1" (__ct), "r" (__cs+__n)
109 : "t");
110
111 return __res;
112}
113
114#define __HAVE_ARCH_MEMSET
115extern void *memset(void *__s, int __c, size_t __count);
116
117#define __HAVE_ARCH_MEMCPY
118extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
119
120#define __HAVE_ARCH_MEMMOVE
121extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
122
123#define __HAVE_ARCH_MEMCHR
124extern void *memchr(const void *__s, int __c, size_t __n);
125
126#define __HAVE_ARCH_STRLEN
127extern size_t strlen(const char *);
128
129#endif /* __KERNEL__ */
130
131#endif /* __ASM_SH_STRING_H */
diff --git a/include/asm-sh/string_64.h b/include/asm-sh/string_64.h
new file mode 100644
index 000000000000..aa1fef229c78
--- /dev/null
+++ b/include/asm-sh/string_64.h
@@ -0,0 +1,17 @@
1#ifndef __ASM_SH_STRING_64_H
2#define __ASM_SH_STRING_64_H
3
4/*
5 * include/asm-sh/string_64.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#define __HAVE_ARCH_MEMCPY
15extern void *memcpy(void *dest, const void *src, size_t count);
16
17#endif /* __ASM_SH_STRING_64_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 4faa2fb88616..772cd1a0a674 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -12,60 +12,9 @@
12#include <asm/types.h> 12#include <asm/types.h>
13#include <asm/ptrace.h> 13#include <asm/ptrace.h>
14 14
15struct task_struct *__switch_to(struct task_struct *prev, 15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
16 struct task_struct *next);
17 16
18#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ 17#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
19/*
20 * switch_to() should switch tasks to task nr n, first
21 */
22
23#define switch_to(prev, next, last) do { \
24 struct task_struct *__last; \
25 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
26 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
27 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
28 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
29 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
30 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
31 __asm__ __volatile__ (".balign 4\n\t" \
32 "stc.l gbr, @-r15\n\t" \
33 "sts.l pr, @-r15\n\t" \
34 "mov.l r8, @-r15\n\t" \
35 "mov.l r9, @-r15\n\t" \
36 "mov.l r10, @-r15\n\t" \
37 "mov.l r11, @-r15\n\t" \
38 "mov.l r12, @-r15\n\t" \
39 "mov.l r13, @-r15\n\t" \
40 "mov.l r14, @-r15\n\t" \
41 "mov.l r15, @r1 ! save SP\n\t" \
42 "mov.l @r6, r15 ! change to new stack\n\t" \
43 "mova 1f, %0\n\t" \
44 "mov.l %0, @r2 ! save PC\n\t" \
45 "mov.l 2f, %0\n\t" \
46 "jmp @%0 ! call __switch_to\n\t" \
47 " lds r7, pr ! with return to new PC\n\t" \
48 ".balign 4\n" \
49 "2:\n\t" \
50 ".long __switch_to\n" \
51 "1:\n\t" \
52 "mov.l @r15+, r14\n\t" \
53 "mov.l @r15+, r13\n\t" \
54 "mov.l @r15+, r12\n\t" \
55 "mov.l @r15+, r11\n\t" \
56 "mov.l @r15+, r10\n\t" \
57 "mov.l @r15+, r9\n\t" \
58 "mov.l @r15+, r8\n\t" \
59 "lds.l @r15+, pr\n\t" \
60 "ldc.l @r15+, gbr\n\t" \
61 : "=z" (__last) \
62 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
63 "r" (__ts5), "r" (__ts6), "r" (__ts7) \
64 : "r3", "t"); \
65 last = __last; \
66} while (0)
67
68#ifdef CONFIG_CPU_SH4A
69#define __icbi() \ 18#define __icbi() \
70{ \ 19{ \
71 unsigned long __addr; \ 20 unsigned long __addr; \
@@ -91,7 +40,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
91 * Historically we have only done this type of barrier for the MMUCR, but 40 * Historically we have only done this type of barrier for the MMUCR, but
92 * it's also necessary for the CCR, so we make it generic here instead. 41 * it's also necessary for the CCR, so we make it generic here instead.
93 */ 42 */
94#ifdef CONFIG_CPU_SH4A 43#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
95#define mb() __asm__ __volatile__ ("synco": : :"memory") 44#define mb() __asm__ __volatile__ ("synco": : :"memory")
96#define rmb() mb() 45#define rmb() mb()
97#define wmb() __asm__ __volatile__ ("synco": : :"memory") 46#define wmb() __asm__ __volatile__ ("synco": : :"memory")
@@ -119,63 +68,11 @@ struct task_struct *__switch_to(struct task_struct *prev,
119 68
120#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 69#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
121 70
122/* 71#ifdef CONFIG_GUSA_RB
123 * Jump to P2 area. 72#include <asm/cmpxchg-grb.h>
124 * When handling TLB or caches, we need to do it from P2 area. 73#else
125 */ 74#include <asm/cmpxchg-irq.h>
126#define jump_to_P2() \ 75#endif
127do { \
128 unsigned long __dummy; \
129 __asm__ __volatile__( \
130 "mov.l 1f, %0\n\t" \
131 "or %1, %0\n\t" \
132 "jmp @%0\n\t" \
133 " nop\n\t" \
134 ".balign 4\n" \
135 "1: .long 2f\n" \
136 "2:" \
137 : "=&r" (__dummy) \
138 : "r" (0x20000000)); \
139} while (0)
140
141/*
142 * Back to P1 area.
143 */
144#define back_to_P1() \
145do { \
146 unsigned long __dummy; \
147 ctrl_barrier(); \
148 __asm__ __volatile__( \
149 "mov.l 1f, %0\n\t" \
150 "jmp @%0\n\t" \
151 " nop\n\t" \
152 ".balign 4\n" \
153 "1: .long 2f\n" \
154 "2:" \
155 : "=&r" (__dummy)); \
156} while (0)
157
158static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
159{
160 unsigned long flags, retval;
161
162 local_irq_save(flags);
163 retval = *m;
164 *m = val;
165 local_irq_restore(flags);
166 return retval;
167}
168
169static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
170{
171 unsigned long flags, retval;
172
173 local_irq_save(flags);
174 retval = *m;
175 *m = val & 0xff;
176 local_irq_restore(flags);
177 return retval;
178}
179 76
180extern void __xchg_called_with_bad_pointer(void); 77extern void __xchg_called_with_bad_pointer(void);
181 78
@@ -202,20 +99,6 @@ extern void __xchg_called_with_bad_pointer(void);
202#define xchg(ptr,x) \ 99#define xchg(ptr,x) \
203 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) 100 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
204 101
205static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
206 unsigned long new)
207{
208 __u32 retval;
209 unsigned long flags;
210
211 local_irq_save(flags);
212 retval = *m;
213 if (retval == old)
214 *m = new;
215 local_irq_restore(flags); /* implies memory barrier */
216 return retval;
217}
218
219/* This function doesn't exist, so you'll get a linker error 102/* This function doesn't exist, so you'll get a linker error
220 * if something tries to do an invalid cmpxchg(). */ 103 * if something tries to do an invalid cmpxchg(). */
221extern void __cmpxchg_called_with_bad_pointer(void); 104extern void __cmpxchg_called_with_bad_pointer(void);
@@ -255,10 +138,14 @@ static inline void *set_exception_table_evt(unsigned int evt, void *handler)
255 */ 138 */
256#ifdef CONFIG_CPU_SH2A 139#ifdef CONFIG_CPU_SH2A
257extern unsigned int instruction_size(unsigned int insn); 140extern unsigned int instruction_size(unsigned int insn);
258#else 141#elif defined(CONFIG_SUPERH32)
259#define instruction_size(insn) (2) 142#define instruction_size(insn) (2)
143#else
144#define instruction_size(insn) (4)
260#endif 145#endif
261 146
147extern unsigned long cached_to_uncached;
148
262/* XXX 149/* XXX
263 * disable hlt during certain critical i/o operations 150 * disable hlt during certain critical i/o operations
264 */ 151 */
@@ -270,13 +157,35 @@ void default_idle(void);
270void per_cpu_trap_init(void); 157void per_cpu_trap_init(void);
271 158
272asmlinkage void break_point_trap(void); 159asmlinkage void break_point_trap(void);
273asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5, 160
274 unsigned long r6, unsigned long r7, 161#ifdef CONFIG_SUPERH32
275 struct pt_regs __regs); 162#define BUILD_TRAP_HANDLER(name) \
276asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5, 163asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \
277 unsigned long r6, unsigned long r7, 164 unsigned long r6, unsigned long r7, \
278 struct pt_regs __regs); 165 struct pt_regs __regs)
166
167#define TRAP_HANDLER_DECL \
168 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \
169 unsigned int vec = regs->tra; \
170 (void)vec;
171#else
172#define BUILD_TRAP_HANDLER(name) \
173asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
174#define TRAP_HANDLER_DECL
175#endif
176
177BUILD_TRAP_HANDLER(address_error);
178BUILD_TRAP_HANDLER(debug);
179BUILD_TRAP_HANDLER(bug);
180BUILD_TRAP_HANDLER(fpu_error);
181BUILD_TRAP_HANDLER(fpu_state_restore);
279 182
280#define arch_align_stack(x) (x) 183#define arch_align_stack(x) (x)
281 184
185#ifdef CONFIG_SUPERH32
186# include "system_32.h"
187#else
188# include "system_64.h"
189#endif
190
282#endif 191#endif
diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h
new file mode 100644
index 000000000000..7ff08d956ba8
--- /dev/null
+++ b/include/asm-sh/system_32.h
@@ -0,0 +1,99 @@
1#ifndef __ASM_SH_SYSTEM_32_H
2#define __ASM_SH_SYSTEM_32_H
3
4#include <linux/types.h>
5
6struct task_struct *__switch_to(struct task_struct *prev,
7 struct task_struct *next);
8
9/*
10 * switch_to() should switch tasks to task nr n, first
11 */
12#define switch_to(prev, next, last) \
13do { \
14 register u32 *__ts1 __asm__ ("r1") = (u32 *)&prev->thread.sp; \
15 register u32 *__ts2 __asm__ ("r2") = (u32 *)&prev->thread.pc; \
16 register u32 *__ts4 __asm__ ("r4") = (u32 *)prev; \
17 register u32 *__ts5 __asm__ ("r5") = (u32 *)next; \
18 register u32 *__ts6 __asm__ ("r6") = (u32 *)&next->thread.sp; \
19 register u32 __ts7 __asm__ ("r7") = next->thread.pc; \
20 struct task_struct *__last; \
21 \
22 __asm__ __volatile__ ( \
23 ".balign 4\n\t" \
24 "stc.l gbr, @-r15\n\t" \
25 "sts.l pr, @-r15\n\t" \
26 "mov.l r8, @-r15\n\t" \
27 "mov.l r9, @-r15\n\t" \
28 "mov.l r10, @-r15\n\t" \
29 "mov.l r11, @-r15\n\t" \
30 "mov.l r12, @-r15\n\t" \
31 "mov.l r13, @-r15\n\t" \
32 "mov.l r14, @-r15\n\t" \
33 "mov.l r15, @r1\t! save SP\n\t" \
34 "mov.l @r6, r15\t! change to new stack\n\t" \
35 "mova 1f, %0\n\t" \
36 "mov.l %0, @r2\t! save PC\n\t" \
37 "mov.l 2f, %0\n\t" \
38 "jmp @%0\t! call __switch_to\n\t" \
39 " lds r7, pr\t! with return to new PC\n\t" \
40 ".balign 4\n" \
41 "2:\n\t" \
42 ".long __switch_to\n" \
43 "1:\n\t" \
44 "mov.l @r15+, r14\n\t" \
45 "mov.l @r15+, r13\n\t" \
46 "mov.l @r15+, r12\n\t" \
47 "mov.l @r15+, r11\n\t" \
48 "mov.l @r15+, r10\n\t" \
49 "mov.l @r15+, r9\n\t" \
50 "mov.l @r15+, r8\n\t" \
51 "lds.l @r15+, pr\n\t" \
52 "ldc.l @r15+, gbr\n\t" \
53 : "=z" (__last) \
54 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
55 "r" (__ts5), "r" (__ts6), "r" (__ts7) \
56 : "r3", "t"); \
57 \
58 last = __last; \
59} while (0)
60
61#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
62
63/*
64 * Jump to uncached area.
65 * When handling TLB or caches, we need to do it from an uncached area.
66 */
67#define jump_to_uncached() \
68do { \
69 unsigned long __dummy; \
70 \
71 __asm__ __volatile__( \
72 "mova 1f, %0\n\t" \
73 "add %1, %0\n\t" \
74 "jmp @%0\n\t" \
75 " nop\n\t" \
76 ".balign 4\n" \
77 "1:" \
78 : "=&z" (__dummy) \
79 : "r" (cached_to_uncached)); \
80} while (0)
81
82/*
83 * Back to cached area.
84 */
85#define back_to_cached() \
86do { \
87 unsigned long __dummy; \
88 ctrl_barrier(); \
89 __asm__ __volatile__( \
90 "mov.l 1f, %0\n\t" \
91 "jmp @%0\n\t" \
92 " nop\n\t" \
93 ".balign 4\n" \
94 "1: .long 2f\n" \
95 "2:" \
96 : "=&r" (__dummy)); \
97} while (0)
98
99#endif /* __ASM_SH_SYSTEM_32_H */
diff --git a/include/asm-sh/system_64.h b/include/asm-sh/system_64.h
new file mode 100644
index 000000000000..943acf5ea07c
--- /dev/null
+++ b/include/asm-sh/system_64.h
@@ -0,0 +1,40 @@
1#ifndef __ASM_SH_SYSTEM_64_H
2#define __ASM_SH_SYSTEM_64_H
3
4/*
5 * include/asm-sh/system_64.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 * Copyright (C) 2003 Paul Mundt
9 * Copyright (C) 2004 Richard Curnow
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#include <asm/processor.h>
16
17/*
18 * switch_to() should switch tasks to task nr n, first
19 */
20struct task_struct *sh64_switch_to(struct task_struct *prev,
21 struct thread_struct *prev_thread,
22 struct task_struct *next,
23 struct thread_struct *next_thread);
24
25#define switch_to(prev,next,last) \
26do { \
27 if (last_task_used_math != next) { \
28 struct pt_regs *regs = next->thread.uregs; \
29 if (regs) regs->sr |= SR_FD; \
30 } \
31 last = sh64_switch_to(prev, &prev->thread, next, \
32 &next->thread); \
33} while (0)
34
35#define __uses_jump_to_uncached
36
37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0)
39
40#endif /* __ASM_SH_SYSTEM_64_H */
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 1f7e1deb8d92..c6577d3dc46d 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -74,8 +74,10 @@ register unsigned long current_stack_pointer asm("r15") __attribute_used__;
74static inline struct thread_info *current_thread_info(void) 74static inline struct thread_info *current_thread_info(void)
75{ 75{
76 struct thread_info *ti; 76 struct thread_info *ti;
77#ifdef CONFIG_CPU_HAS_SR_RB 77#if defined(CONFIG_SUPERH64)
78 __asm__("stc r7_bank, %0" : "=r" (ti)); 78 __asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti));
79#elif defined(CONFIG_CPU_HAS_SR_RB)
80 __asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
79#else 81#else
80 unsigned long __dummy; 82 unsigned long __dummy;
81 83
@@ -111,6 +113,7 @@ static inline struct thread_info *current_thread_info(void)
111#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 113#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
112#define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ 114#define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */
113#define TIF_SINGLESTEP 4 /* singlestepping active */ 115#define TIF_SINGLESTEP 4 /* singlestepping active */
116#define TIF_SYSCALL_AUDIT 5
114#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 117#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
115#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 118#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
116#define TIF_MEMDIE 18 119#define TIF_MEMDIE 18
@@ -121,6 +124,7 @@ static inline struct thread_info *current_thread_info(void)
121#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 124#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
122#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 125#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
123#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 126#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
127#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
124#define _TIF_USEDFPU (1<<TIF_USEDFPU) 128#define _TIF_USEDFPU (1<<TIF_USEDFPU)
125#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 129#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
126#define _TIF_FREEZE (1<<TIF_FREEZE) 130#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/include/asm-sh/tlb.h b/include/asm-sh/tlb.h
index 53d185bcf872..56ad1fb888a2 100644
--- a/include/asm-sh/tlb.h
+++ b/include/asm-sh/tlb.h
@@ -1,6 +1,12 @@
1#ifndef __ASM_SH_TLB_H 1#ifndef __ASM_SH_TLB_H
2#define __ASM_SH_TLB_H 2#define __ASM_SH_TLB_H
3 3
4#ifdef CONFIG_SUPERH64
5# include "tlb_64.h"
6#endif
7
8#ifndef __ASSEMBLY__
9
4#define tlb_start_vma(tlb, vma) \ 10#define tlb_start_vma(tlb, vma) \
5 flush_cache_range(vma, vma->vm_start, vma->vm_end) 11 flush_cache_range(vma, vma->vm_start, vma->vm_end)
6 12
@@ -15,4 +21,6 @@
15#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 21#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
16 22
17#include <asm-generic/tlb.h> 23#include <asm-generic/tlb.h>
18#endif 24
25#endif /* __ASSEMBLY__ */
26#endif /* __ASM_SH_TLB_H */
diff --git a/include/asm-sh/tlb_64.h b/include/asm-sh/tlb_64.h
new file mode 100644
index 000000000000..0308e05fc57b
--- /dev/null
+++ b/include/asm-sh/tlb_64.h
@@ -0,0 +1,69 @@
1/*
2 * include/asm-sh/tlb_64.h
3 *
4 * Copyright (C) 2003 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#ifndef __ASM_SH_TLB_64_H
11#define __ASM_SH_TLB_64_H
12
13/* ITLB defines */
14#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
15#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
16
17/* DTLB defines */
18#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
19#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
20
21#ifndef __ASSEMBLY__
22
23/**
24 * for_each_dtlb_entry
25 *
26 * @tlb: TLB entry
27 *
28 * Iterate over free (non-wired) DTLB entries
29 */
30#define for_each_dtlb_entry(tlb) \
31 for (tlb = cpu_data->dtlb.first; \
32 tlb <= cpu_data->dtlb.last; \
33 tlb += cpu_data->dtlb.step)
34
35/**
36 * for_each_itlb_entry
37 *
38 * @tlb: TLB entry
39 *
40 * Iterate over free (non-wired) ITLB entries
41 */
42#define for_each_itlb_entry(tlb) \
43 for (tlb = cpu_data->itlb.first; \
44 tlb <= cpu_data->itlb.last; \
45 tlb += cpu_data->itlb.step)
46
47/**
48 * __flush_tlb_slot
49 *
50 * @slot: Address of TLB slot.
51 *
52 * Flushes TLB slot @slot.
53 */
54static inline void __flush_tlb_slot(unsigned long long slot)
55{
56 __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
57}
58
59/* arch/sh64/mm/tlb.c */
60int sh64_tlb_init(void);
61unsigned long long sh64_next_free_dtlb_entry(void);
62unsigned long long sh64_get_wired_dtlb_entry(void);
63int sh64_put_wired_dtlb_entry(unsigned long long entry);
64void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
65 unsigned long asid, unsigned long paddr);
66void sh64_teardown_tlb_slot(unsigned long long config_addr);
67
68#endif /* __ASSEMBLY__ */
69#endif /* __ASM_SH_TLB_64_H */
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h
index 7ba69d9707ef..a6e1d4126e67 100644
--- a/include/asm-sh/types.h
+++ b/include/asm-sh/types.h
@@ -52,6 +52,12 @@ typedef unsigned long long u64;
52 52
53typedef u32 dma_addr_t; 53typedef u32 dma_addr_t;
54 54
55#ifdef CONFIG_SUPERH32
56typedef u16 opcode_t;
57#else
58typedef u32 opcode_t;
59#endif
60
55#endif /* __ASSEMBLY__ */ 61#endif /* __ASSEMBLY__ */
56 62
57#endif /* __KERNEL__ */ 63#endif /* __KERNEL__ */
diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h
index 77c391fa93d6..ff24ce95b238 100644
--- a/include/asm-sh/uaccess.h
+++ b/include/asm-sh/uaccess.h
@@ -1,563 +1,5 @@
1/* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $ 1#ifdef CONFIG_SUPERH32
2 * 2# include "uaccess_32.h"
3 * User space memory access functions
4 *
5 * Copyright (C) 1999, 2002 Niibe Yutaka
6 * Copyright (C) 2003 Paul Mundt
7 *
8 * Based on:
9 * MIPS implementation version 1.15 by
10 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
11 * and i386 version.
12 */
13#ifndef __ASM_SH_UACCESS_H
14#define __ASM_SH_UACCESS_H
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22/*
23 * The fs value determines whether argument validity checking should be
24 * performed or not. If get_fs() == USER_DS, checking is performed, with
25 * get_fs() == KERNEL_DS, checking is bypassed.
26 *
27 * For historical reasons (Data Segment Register?), these macros are misnamed.
28 */
29
30#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
31
32#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
33#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
34
35#define segment_eq(a,b) ((a).seg == (b).seg)
36
37#define get_ds() (KERNEL_DS)
38
39#if !defined(CONFIG_MMU)
40/* NOMMU is always true */
41#define __addr_ok(addr) (1)
42
43static inline mm_segment_t get_fs(void)
44{
45 return USER_DS;
46}
47
48static inline void set_fs(mm_segment_t s)
49{
50}
51
52/*
53 * __access_ok: Check if address with size is OK or not.
54 *
55 * If we don't have an MMU (or if its disabled) the only thing we really have
56 * to look out for is if the address resides somewhere outside of what
57 * available RAM we have.
58 *
59 * TODO: This check could probably also stand to be restricted somewhat more..
60 * though it still does the Right Thing(tm) for the time being.
61 */
62static inline int __access_ok(unsigned long addr, unsigned long size)
63{
64 return ((addr >= memory_start) && ((addr + size) < memory_end));
65}
66#else /* CONFIG_MMU */
67#define __addr_ok(addr) \
68 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
69
70#define get_fs() (current_thread_info()->addr_limit)
71#define set_fs(x) (current_thread_info()->addr_limit = (x))
72
73/*
74 * __access_ok: Check if address with size is OK or not.
75 *
76 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
77 *
78 * sum := addr + size; carry? --> flag = true;
79 * if (sum >= addr_limit) flag = true;
80 */
81static inline int __access_ok(unsigned long addr, unsigned long size)
82{
83 unsigned long flag, sum;
84
85 __asm__("clrt\n\t"
86 "addc %3, %1\n\t"
87 "movt %0\n\t"
88 "cmp/hi %4, %1\n\t"
89 "rotcl %0"
90 :"=&r" (flag), "=r" (sum)
91 :"1" (addr), "r" (size),
92 "r" (current_thread_info()->addr_limit.seg)
93 :"t");
94 return flag == 0;
95
96}
97#endif /* CONFIG_MMU */
98
99static inline int access_ok(int type, const void __user *p, unsigned long size)
100{
101 unsigned long addr = (unsigned long)p;
102 return __access_ok(addr, size);
103}
104
105/*
106 * Uh, these should become the main single-value transfer routines ...
107 * They automatically use the right size if we just have the right
108 * pointer type ...
109 *
110 * As SuperH uses the same address space for kernel and user data, we
111 * can just do these as direct assignments.
112 *
113 * Careful to not
114 * (a) re-use the arguments for side effects (sizeof is ok)
115 * (b) require any knowledge of processes at this stage
116 */
117#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
118#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
119
120/*
121 * The "__xxx" versions do not do address space checking, useful when
122 * doing multiple accesses to the same area (the user has to do the
123 * checks by hand with "access_ok()")
124 */
125#define __put_user(x,ptr) \
126 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
127#define __get_user(x,ptr) \
128 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
129
130struct __large_struct { unsigned long buf[100]; };
131#define __m(x) (*(struct __large_struct __user *)(x))
132
133#define __get_user_size(x,ptr,size,retval) \
134do { \
135 retval = 0; \
136 __chk_user_ptr(ptr); \
137 switch (size) { \
138 case 1: \
139 __get_user_asm(x, ptr, retval, "b"); \
140 break; \
141 case 2: \
142 __get_user_asm(x, ptr, retval, "w"); \
143 break; \
144 case 4: \
145 __get_user_asm(x, ptr, retval, "l"); \
146 break; \
147 default: \
148 __get_user_unknown(); \
149 break; \
150 } \
151} while (0)
152
153#define __get_user_nocheck(x,ptr,size) \
154({ \
155 long __gu_err, __gu_val; \
156 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
157 (x) = (__typeof__(*(ptr)))__gu_val; \
158 __gu_err; \
159})
160
161#ifdef CONFIG_MMU
162#define __get_user_check(x,ptr,size) \
163({ \
164 long __gu_err, __gu_val; \
165 __chk_user_ptr(ptr); \
166 switch (size) { \
167 case 1: \
168 __get_user_1(__gu_val, (ptr), __gu_err); \
169 break; \
170 case 2: \
171 __get_user_2(__gu_val, (ptr), __gu_err); \
172 break; \
173 case 4: \
174 __get_user_4(__gu_val, (ptr), __gu_err); \
175 break; \
176 default: \
177 __get_user_unknown(); \
178 break; \
179 } \
180 \
181 (x) = (__typeof__(*(ptr)))__gu_val; \
182 __gu_err; \
183})
184
185#define __get_user_1(x,addr,err) ({ \
186__asm__("stc r7_bank, %1\n\t" \
187 "mov.l @(8,%1), %1\n\t" \
188 "and %2, %1\n\t" \
189 "cmp/pz %1\n\t" \
190 "bt/s 1f\n\t" \
191 " mov #0, %0\n\t" \
192 "0:\n" \
193 "mov #-14, %0\n\t" \
194 "bra 2f\n\t" \
195 " mov #0, %1\n" \
196 "1:\n\t" \
197 "mov.b @%2, %1\n\t" \
198 "extu.b %1, %1\n" \
199 "2:\n" \
200 ".section __ex_table,\"a\"\n\t" \
201 ".long 1b, 0b\n\t" \
202 ".previous" \
203 : "=&r" (err), "=&r" (x) \
204 : "r" (addr) \
205 : "t"); \
206})
207
208#define __get_user_2(x,addr,err) ({ \
209__asm__("stc r7_bank, %1\n\t" \
210 "mov.l @(8,%1), %1\n\t" \
211 "and %2, %1\n\t" \
212 "cmp/pz %1\n\t" \
213 "bt/s 1f\n\t" \
214 " mov #0, %0\n\t" \
215 "0:\n" \
216 "mov #-14, %0\n\t" \
217 "bra 2f\n\t" \
218 " mov #0, %1\n" \
219 "1:\n\t" \
220 "mov.w @%2, %1\n\t" \
221 "extu.w %1, %1\n" \
222 "2:\n" \
223 ".section __ex_table,\"a\"\n\t" \
224 ".long 1b, 0b\n\t" \
225 ".previous" \
226 : "=&r" (err), "=&r" (x) \
227 : "r" (addr) \
228 : "t"); \
229})
230
231#define __get_user_4(x,addr,err) ({ \
232__asm__("stc r7_bank, %1\n\t" \
233 "mov.l @(8,%1), %1\n\t" \
234 "and %2, %1\n\t" \
235 "cmp/pz %1\n\t" \
236 "bt/s 1f\n\t" \
237 " mov #0, %0\n\t" \
238 "0:\n" \
239 "mov #-14, %0\n\t" \
240 "bra 2f\n\t" \
241 " mov #0, %1\n" \
242 "1:\n\t" \
243 "mov.l @%2, %1\n\t" \
244 "2:\n" \
245 ".section __ex_table,\"a\"\n\t" \
246 ".long 1b, 0b\n\t" \
247 ".previous" \
248 : "=&r" (err), "=&r" (x) \
249 : "r" (addr) \
250 : "t"); \
251})
252#else /* CONFIG_MMU */
253#define __get_user_check(x,ptr,size) \
254({ \
255 long __gu_err, __gu_val; \
256 if (__access_ok((unsigned long)(ptr), (size))) { \
257 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
258 (x) = (__typeof__(*(ptr)))__gu_val; \
259 } else \
260 __gu_err = -EFAULT; \
261 __gu_err; \
262})
263#endif
264
265#define __get_user_asm(x, addr, err, insn) \
266({ \
267__asm__ __volatile__( \
268 "1:\n\t" \
269 "mov." insn " %2, %1\n\t" \
270 "mov #0, %0\n" \
271 "2:\n" \
272 ".section .fixup,\"ax\"\n" \
273 "3:\n\t" \
274 "mov #0, %1\n\t" \
275 "mov.l 4f, %0\n\t" \
276 "jmp @%0\n\t" \
277 " mov %3, %0\n" \
278 "4: .long 2b\n\t" \
279 ".previous\n" \
280 ".section __ex_table,\"a\"\n\t" \
281 ".long 1b, 3b\n\t" \
282 ".previous" \
283 :"=&r" (err), "=&r" (x) \
284 :"m" (__m(addr)), "i" (-EFAULT)); })
285
286extern void __get_user_unknown(void);
287
288#define __put_user_size(x,ptr,size,retval) \
289do { \
290 retval = 0; \
291 __chk_user_ptr(ptr); \
292 switch (size) { \
293 case 1: \
294 __put_user_asm(x, ptr, retval, "b"); \
295 break; \
296 case 2: \
297 __put_user_asm(x, ptr, retval, "w"); \
298 break; \
299 case 4: \
300 __put_user_asm(x, ptr, retval, "l"); \
301 break; \
302 case 8: \
303 __put_user_u64(x, ptr, retval); \
304 break; \
305 default: \
306 __put_user_unknown(); \
307 } \
308} while (0)
309
310#define __put_user_nocheck(x,ptr,size) \
311({ \
312 long __pu_err; \
313 __put_user_size((x),(ptr),(size),__pu_err); \
314 __pu_err; \
315})
316
317#define __put_user_check(x,ptr,size) \
318({ \
319 long __pu_err = -EFAULT; \
320 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
321 \
322 if (__access_ok((unsigned long)__pu_addr,size)) \
323 __put_user_size((x),__pu_addr,(size),__pu_err); \
324 __pu_err; \
325})
326
327#define __put_user_asm(x, addr, err, insn) \
328({ \
329__asm__ __volatile__( \
330 "1:\n\t" \
331 "mov." insn " %1, %2\n\t" \
332 "mov #0, %0\n" \
333 "2:\n" \
334 ".section .fixup,\"ax\"\n" \
335 "3:\n\t" \
336 "nop\n\t" \
337 "mov.l 4f, %0\n\t" \
338 "jmp @%0\n\t" \
339 "mov %3, %0\n" \
340 "4: .long 2b\n\t" \
341 ".previous\n" \
342 ".section __ex_table,\"a\"\n\t" \
343 ".long 1b, 3b\n\t" \
344 ".previous" \
345 :"=&r" (err) \
346 :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \
347 :"memory"); })
348
349#if defined(__LITTLE_ENDIAN__)
350#define __put_user_u64(val,addr,retval) \
351({ \
352__asm__ __volatile__( \
353 "1:\n\t" \
354 "mov.l %R1,%2\n\t" \
355 "mov.l %S1,%T2\n\t" \
356 "mov #0,%0\n" \
357 "2:\n" \
358 ".section .fixup,\"ax\"\n" \
359 "3:\n\t" \
360 "nop\n\t" \
361 "mov.l 4f,%0\n\t" \
362 "jmp @%0\n\t" \
363 " mov %3,%0\n" \
364 "4: .long 2b\n\t" \
365 ".previous\n" \
366 ".section __ex_table,\"a\"\n\t" \
367 ".long 1b, 3b\n\t" \
368 ".previous" \
369 : "=r" (retval) \
370 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
371 : "memory"); })
372#else 3#else
373#define __put_user_u64(val,addr,retval) \ 4# include "uaccess_64.h"
374({ \
375__asm__ __volatile__( \
376 "1:\n\t" \
377 "mov.l %S1,%2\n\t" \
378 "mov.l %R1,%T2\n\t" \
379 "mov #0,%0\n" \
380 "2:\n" \
381 ".section .fixup,\"ax\"\n" \
382 "3:\n\t" \
383 "nop\n\t" \
384 "mov.l 4f,%0\n\t" \
385 "jmp @%0\n\t" \
386 " mov %3,%0\n" \
387 "4: .long 2b\n\t" \
388 ".previous\n" \
389 ".section __ex_table,\"a\"\n\t" \
390 ".long 1b, 3b\n\t" \
391 ".previous" \
392 : "=r" (retval) \
393 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
394 : "memory"); })
395#endif 5#endif
396
397extern void __put_user_unknown(void);
398
399/* Generic arbitrary sized copy. */
400/* Return the number of bytes NOT copied */
401__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
402
403#define copy_to_user(to,from,n) ({ \
404void *__copy_to = (void *) (to); \
405__kernel_size_t __copy_size = (__kernel_size_t) (n); \
406__kernel_size_t __copy_res; \
407if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
408__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
409} else __copy_res = __copy_size; \
410__copy_res; })
411
412#define copy_from_user(to,from,n) ({ \
413void *__copy_to = (void *) (to); \
414void *__copy_from = (void *) (from); \
415__kernel_size_t __copy_size = (__kernel_size_t) (n); \
416__kernel_size_t __copy_res; \
417if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
418__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
419} else __copy_res = __copy_size; \
420__copy_res; })
421
422static __always_inline unsigned long
423__copy_from_user(void *to, const void __user *from, unsigned long n)
424{
425 return __copy_user(to, (__force void *)from, n);
426}
427
428static __always_inline unsigned long __must_check
429__copy_to_user(void __user *to, const void *from, unsigned long n)
430{
431 return __copy_user((__force void *)to, from, n);
432}
433
434#define __copy_to_user_inatomic __copy_to_user
435#define __copy_from_user_inatomic __copy_from_user
436
437/*
438 * Clear the area and return remaining number of bytes
439 * (on failure. Usually it's 0.)
440 */
441extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
442
443#define clear_user(addr,n) ({ \
444void * __cl_addr = (addr); \
445unsigned long __cl_size = (n); \
446if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
447__cl_size = __clear_user(__cl_addr, __cl_size); \
448__cl_size; })
449
450static __inline__ int
451__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
452{
453 __kernel_size_t res;
454 unsigned long __dummy, _d, _s;
455
456 __asm__ __volatile__(
457 "9:\n"
458 "mov.b @%2+, %1\n\t"
459 "cmp/eq #0, %1\n\t"
460 "bt/s 2f\n"
461 "1:\n"
462 "mov.b %1, @%3\n\t"
463 "dt %7\n\t"
464 "bf/s 9b\n\t"
465 " add #1, %3\n\t"
466 "2:\n\t"
467 "sub %7, %0\n"
468 "3:\n"
469 ".section .fixup,\"ax\"\n"
470 "4:\n\t"
471 "mov.l 5f, %1\n\t"
472 "jmp @%1\n\t"
473 " mov %8, %0\n\t"
474 ".balign 4\n"
475 "5: .long 3b\n"
476 ".previous\n"
477 ".section __ex_table,\"a\"\n"
478 " .balign 4\n"
479 " .long 9b,4b\n"
480 ".previous"
481 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d)
482 : "0" (__count), "2" (__src), "3" (__dest), "r" (__count),
483 "i" (-EFAULT)
484 : "memory", "t");
485
486 return res;
487}
488
489#define strncpy_from_user(dest,src,count) ({ \
490unsigned long __sfu_src = (unsigned long) (src); \
491int __sfu_count = (int) (count); \
492long __sfu_res = -EFAULT; \
493if(__access_ok(__sfu_src, __sfu_count)) { \
494__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
495} __sfu_res; })
496
497/*
498 * Return the size of a string (including the ending 0!)
499 */
500static __inline__ long __strnlen_user(const char __user *__s, long __n)
501{
502 unsigned long res;
503 unsigned long __dummy;
504
505 __asm__ __volatile__(
506 "9:\n"
507 "cmp/eq %4, %0\n\t"
508 "bt 2f\n"
509 "1:\t"
510 "mov.b @(%0,%3), %1\n\t"
511 "tst %1, %1\n\t"
512 "bf/s 9b\n\t"
513 " add #1, %0\n"
514 "2:\n"
515 ".section .fixup,\"ax\"\n"
516 "3:\n\t"
517 "mov.l 4f, %1\n\t"
518 "jmp @%1\n\t"
519 " mov #0, %0\n"
520 ".balign 4\n"
521 "4: .long 2b\n"
522 ".previous\n"
523 ".section __ex_table,\"a\"\n"
524 " .balign 4\n"
525 " .long 1b,3b\n"
526 ".previous"
527 : "=z" (res), "=&r" (__dummy)
528 : "0" (0), "r" (__s), "r" (__n)
529 : "t");
530 return res;
531}
532
533static __inline__ long strnlen_user(const char __user *s, long n)
534{
535 if (!__addr_ok(s))
536 return 0;
537 else
538 return __strnlen_user(s, n);
539}
540
541#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
542
543/*
544 * The exception table consists of pairs of addresses: the first is the
545 * address of an instruction that is allowed to fault, and the second is
546 * the address at which the program should continue. No registers are
547 * modified, so it is entirely up to the continuation code to figure out
548 * what to do.
549 *
550 * All the routines below use bits of fixup code that are out of line
551 * with the main instruction path. This means when everything is well,
552 * we don't even have to jump over them. Further, they do not intrude
553 * on our cache or tlb entries.
554 */
555
556struct exception_table_entry
557{
558 unsigned long insn, fixup;
559};
560
561extern int fixup_exception(struct pt_regs *regs);
562
563#endif /* __ASM_SH_UACCESS_H */
diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h
new file mode 100644
index 000000000000..b6082f3c1dc4
--- /dev/null
+++ b/include/asm-sh/uaccess_32.h
@@ -0,0 +1,510 @@
1/* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $
2 *
3 * User space memory access functions
4 *
5 * Copyright (C) 1999, 2002 Niibe Yutaka
6 * Copyright (C) 2003 Paul Mundt
7 *
8 * Based on:
9 * MIPS implementation version 1.15 by
10 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
11 * and i386 version.
12 */
13#ifndef __ASM_SH_UACCESS_H
14#define __ASM_SH_UACCESS_H
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22/*
23 * The fs value determines whether argument validity checking should be
24 * performed or not. If get_fs() == USER_DS, checking is performed, with
25 * get_fs() == KERNEL_DS, checking is bypassed.
26 *
27 * For historical reasons (Data Segment Register?), these macros are misnamed.
28 */
29
30#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
31
32#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
33#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
34
35#define segment_eq(a,b) ((a).seg == (b).seg)
36
37#define get_ds() (KERNEL_DS)
38
39#if !defined(CONFIG_MMU)
40/* NOMMU is always true */
41#define __addr_ok(addr) (1)
42
43static inline mm_segment_t get_fs(void)
44{
45 return USER_DS;
46}
47
48static inline void set_fs(mm_segment_t s)
49{
50}
51
52/*
53 * __access_ok: Check if address with size is OK or not.
54 *
55 * If we don't have an MMU (or if its disabled) the only thing we really have
56 * to look out for is if the address resides somewhere outside of what
57 * available RAM we have.
58 *
59 * TODO: This check could probably also stand to be restricted somewhat more..
60 * though it still does the Right Thing(tm) for the time being.
61 */
62static inline int __access_ok(unsigned long addr, unsigned long size)
63{
64 return ((addr >= memory_start) && ((addr + size) < memory_end));
65}
66#else /* CONFIG_MMU */
67#define __addr_ok(addr) \
68 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
69
70#define get_fs() (current_thread_info()->addr_limit)
71#define set_fs(x) (current_thread_info()->addr_limit = (x))
72
73/*
74 * __access_ok: Check if address with size is OK or not.
75 *
76 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
77 *
78 * sum := addr + size; carry? --> flag = true;
79 * if (sum >= addr_limit) flag = true;
80 */
81static inline int __access_ok(unsigned long addr, unsigned long size)
82{
83 unsigned long flag, sum;
84
85 __asm__("clrt\n\t"
86 "addc %3, %1\n\t"
87 "movt %0\n\t"
88 "cmp/hi %4, %1\n\t"
89 "rotcl %0"
90 :"=&r" (flag), "=r" (sum)
91 :"1" (addr), "r" (size),
92 "r" (current_thread_info()->addr_limit.seg)
93 :"t");
94 return flag == 0;
95}
96#endif /* CONFIG_MMU */
97
98#define access_ok(type, addr, size) \
99 (__chk_user_ptr(addr), \
100 __access_ok((unsigned long __force)(addr), (size)))
101
102/*
103 * Uh, these should become the main single-value transfer routines ...
104 * They automatically use the right size if we just have the right
105 * pointer type ...
106 *
107 * As SuperH uses the same address space for kernel and user data, we
108 * can just do these as direct assignments.
109 *
110 * Careful to not
111 * (a) re-use the arguments for side effects (sizeof is ok)
112 * (b) require any knowledge of processes at this stage
113 */
114#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
115#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
116
117/*
118 * The "__xxx" versions do not do address space checking, useful when
119 * doing multiple accesses to the same area (the user has to do the
120 * checks by hand with "access_ok()")
121 */
122#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
123#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
124
125struct __large_struct { unsigned long buf[100]; };
126#define __m(x) (*(struct __large_struct __user *)(x))
127
128#define __get_user_size(x,ptr,size,retval) \
129do { \
130 retval = 0; \
131 switch (size) { \
132 case 1: \
133 __get_user_asm(x, ptr, retval, "b"); \
134 break; \
135 case 2: \
136 __get_user_asm(x, ptr, retval, "w"); \
137 break; \
138 case 4: \
139 __get_user_asm(x, ptr, retval, "l"); \
140 break; \
141 default: \
142 __get_user_unknown(); \
143 break; \
144 } \
145} while (0)
146
147#define __get_user_nocheck(x,ptr,size) \
148({ \
149 long __gu_err; \
150 unsigned long __gu_val; \
151 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
152 __chk_user_ptr(ptr); \
153 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
154 (x) = (__typeof__(*(ptr)))__gu_val; \
155 __gu_err; \
156})
157
158#define __get_user_check(x,ptr,size) \
159({ \
160 long __gu_err = -EFAULT; \
161 unsigned long __gu_val = 0; \
162 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
163 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
164 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
165 (x) = (__typeof__(*(ptr)))__gu_val; \
166 __gu_err; \
167})
168
169#define __get_user_asm(x, addr, err, insn) \
170({ \
171__asm__ __volatile__( \
172 "1:\n\t" \
173 "mov." insn " %2, %1\n\t" \
174 "2:\n" \
175 ".section .fixup,\"ax\"\n" \
176 "3:\n\t" \
177 "mov #0, %1\n\t" \
178 "mov.l 4f, %0\n\t" \
179 "jmp @%0\n\t" \
180 " mov %3, %0\n\t" \
181 ".balign 4\n" \
182 "4: .long 2b\n\t" \
183 ".previous\n" \
184 ".section __ex_table,\"a\"\n\t" \
185 ".long 1b, 3b\n\t" \
186 ".previous" \
187 :"=&r" (err), "=&r" (x) \
188 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
189
190extern void __get_user_unknown(void);
191
192#define __put_user_size(x,ptr,size,retval) \
193do { \
194 retval = 0; \
195 switch (size) { \
196 case 1: \
197 __put_user_asm(x, ptr, retval, "b"); \
198 break; \
199 case 2: \
200 __put_user_asm(x, ptr, retval, "w"); \
201 break; \
202 case 4: \
203 __put_user_asm(x, ptr, retval, "l"); \
204 break; \
205 case 8: \
206 __put_user_u64(x, ptr, retval); \
207 break; \
208 default: \
209 __put_user_unknown(); \
210 } \
211} while (0)
212
213#define __put_user_nocheck(x,ptr,size) \
214({ \
215 long __pu_err; \
216 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
217 __chk_user_ptr(ptr); \
218 __put_user_size((x), __pu_addr, (size), __pu_err); \
219 __pu_err; \
220})
221
222#define __put_user_check(x,ptr,size) \
223({ \
224 long __pu_err = -EFAULT; \
225 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
226 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
227 __put_user_size((x), __pu_addr, (size), \
228 __pu_err); \
229 __pu_err; \
230})
231
232#define __put_user_asm(x, addr, err, insn) \
233({ \
234__asm__ __volatile__( \
235 "1:\n\t" \
236 "mov." insn " %1, %2\n\t" \
237 "2:\n" \
238 ".section .fixup,\"ax\"\n" \
239 "3:\n\t" \
240 "mov.l 4f, %0\n\t" \
241 "jmp @%0\n\t" \
242 " mov %3, %0\n\t" \
243 ".balign 4\n" \
244 "4: .long 2b\n\t" \
245 ".previous\n" \
246 ".section __ex_table,\"a\"\n\t" \
247 ".long 1b, 3b\n\t" \
248 ".previous" \
249 :"=&r" (err) \
250 :"r" (x), "m" (__m(addr)), "i" (-EFAULT), "0" (err) \
251 :"memory"); })
252
253#if defined(CONFIG_CPU_LITTLE_ENDIAN)
254#define __put_user_u64(val,addr,retval) \
255({ \
256__asm__ __volatile__( \
257 "1:\n\t" \
258 "mov.l %R1,%2\n\t" \
259 "mov.l %S1,%T2\n\t" \
260 "2:\n" \
261 ".section .fixup,\"ax\"\n" \
262 "3:\n\t" \
263 "mov.l 4f,%0\n\t" \
264 "jmp @%0\n\t" \
265 " mov %3,%0\n\t" \
266 ".balign 4\n" \
267 "4: .long 2b\n\t" \
268 ".previous\n" \
269 ".section __ex_table,\"a\"\n\t" \
270 ".long 1b, 3b\n\t" \
271 ".previous" \
272 : "=r" (retval) \
273 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
274 : "memory"); })
275#else
276#define __put_user_u64(val,addr,retval) \
277({ \
278__asm__ __volatile__( \
279 "1:\n\t" \
280 "mov.l %S1,%2\n\t" \
281 "mov.l %R1,%T2\n\t" \
282 "2:\n" \
283 ".section .fixup,\"ax\"\n" \
284 "3:\n\t" \
285 "mov.l 4f,%0\n\t" \
286 "jmp @%0\n\t" \
287 " mov %3,%0\n\t" \
288 ".balign 4\n" \
289 "4: .long 2b\n\t" \
290 ".previous\n" \
291 ".section __ex_table,\"a\"\n\t" \
292 ".long 1b, 3b\n\t" \
293 ".previous" \
294 : "=r" (retval) \
295 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
296 : "memory"); })
297#endif
298
299extern void __put_user_unknown(void);
300
301/* Generic arbitrary sized copy. */
302/* Return the number of bytes NOT copied */
303__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
304
305#define copy_to_user(to,from,n) ({ \
306void *__copy_to = (void *) (to); \
307__kernel_size_t __copy_size = (__kernel_size_t) (n); \
308__kernel_size_t __copy_res; \
309if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
310__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
311} else __copy_res = __copy_size; \
312__copy_res; })
313
314#define copy_from_user(to,from,n) ({ \
315void *__copy_to = (void *) (to); \
316void *__copy_from = (void *) (from); \
317__kernel_size_t __copy_size = (__kernel_size_t) (n); \
318__kernel_size_t __copy_res; \
319if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
320__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
321} else __copy_res = __copy_size; \
322__copy_res; })
323
324static __always_inline unsigned long
325__copy_from_user(void *to, const void __user *from, unsigned long n)
326{
327 return __copy_user(to, (__force void *)from, n);
328}
329
330static __always_inline unsigned long __must_check
331__copy_to_user(void __user *to, const void *from, unsigned long n)
332{
333 return __copy_user((__force void *)to, from, n);
334}
335
336#define __copy_to_user_inatomic __copy_to_user
337#define __copy_from_user_inatomic __copy_from_user
338
339/*
340 * Clear the area and return remaining number of bytes
341 * (on failure. Usually it's 0.)
342 */
343extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
344
345#define clear_user(addr,n) ({ \
346void * __cl_addr = (addr); \
347unsigned long __cl_size = (n); \
348if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
349__cl_size = __clear_user(__cl_addr, __cl_size); \
350__cl_size; })
351
352static __inline__ int
353__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
354{
355 __kernel_size_t res;
356 unsigned long __dummy, _d, _s, _c;
357
358 __asm__ __volatile__(
359 "9:\n"
360 "mov.b @%2+, %1\n\t"
361 "cmp/eq #0, %1\n\t"
362 "bt/s 2f\n"
363 "1:\n"
364 "mov.b %1, @%3\n\t"
365 "dt %4\n\t"
366 "bf/s 9b\n\t"
367 " add #1, %3\n\t"
368 "2:\n\t"
369 "sub %4, %0\n"
370 "3:\n"
371 ".section .fixup,\"ax\"\n"
372 "4:\n\t"
373 "mov.l 5f, %1\n\t"
374 "jmp @%1\n\t"
375 " mov %9, %0\n\t"
376 ".balign 4\n"
377 "5: .long 3b\n"
378 ".previous\n"
379 ".section __ex_table,\"a\"\n"
380 " .balign 4\n"
381 " .long 9b,4b\n"
382 ".previous"
383 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
384 : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
385 "i" (-EFAULT)
386 : "memory", "t");
387
388 return res;
389}
390
391/**
392 * strncpy_from_user: - Copy a NUL terminated string from userspace.
393 * @dst: Destination address, in kernel space. This buffer must be at
394 * least @count bytes long.
395 * @src: Source address, in user space.
396 * @count: Maximum number of bytes to copy, including the trailing NUL.
397 *
398 * Copies a NUL-terminated string from userspace to kernel space.
399 *
400 * On success, returns the length of the string (not including the trailing
401 * NUL).
402 *
403 * If access to userspace fails, returns -EFAULT (some data may have been
404 * copied).
405 *
406 * If @count is smaller than the length of the string, copies @count bytes
407 * and returns @count.
408 */
409#define strncpy_from_user(dest,src,count) ({ \
410unsigned long __sfu_src = (unsigned long) (src); \
411int __sfu_count = (int) (count); \
412long __sfu_res = -EFAULT; \
413if(__access_ok(__sfu_src, __sfu_count)) { \
414__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
415} __sfu_res; })
416
417/*
418 * Return the size of a string (including the ending 0 even when we have
419 * exceeded the maximum string length).
420 */
421static __inline__ long __strnlen_user(const char __user *__s, long __n)
422{
423 unsigned long res;
424 unsigned long __dummy;
425
426 __asm__ __volatile__(
427 "1:\t"
428 "mov.b @(%0,%3), %1\n\t"
429 "cmp/eq %4, %0\n\t"
430 "bt/s 2f\n\t"
431 " add #1, %0\n\t"
432 "tst %1, %1\n\t"
433 "bf 1b\n\t"
434 "2:\n"
435 ".section .fixup,\"ax\"\n"
436 "3:\n\t"
437 "mov.l 4f, %1\n\t"
438 "jmp @%1\n\t"
439 " mov #0, %0\n"
440 ".balign 4\n"
441 "4: .long 2b\n"
442 ".previous\n"
443 ".section __ex_table,\"a\"\n"
444 " .balign 4\n"
445 " .long 1b,3b\n"
446 ".previous"
447 : "=z" (res), "=&r" (__dummy)
448 : "0" (0), "r" (__s), "r" (__n)
449 : "t");
450 return res;
451}
452
453/**
454 * strnlen_user: - Get the size of a string in user space.
455 * @s: The string to measure.
456 * @n: The maximum valid length
457 *
458 * Context: User context only. This function may sleep.
459 *
460 * Get the size of a NUL-terminated string in user space.
461 *
462 * Returns the size of the string INCLUDING the terminating NUL.
463 * On exception, returns 0.
464 * If the string is too long, returns a value greater than @n.
465 */
466static __inline__ long strnlen_user(const char __user *s, long n)
467{
468 if (!__addr_ok(s))
469 return 0;
470 else
471 return __strnlen_user(s, n);
472}
473
474/**
475 * strlen_user: - Get the size of a string in user space.
476 * @str: The string to measure.
477 *
478 * Context: User context only. This function may sleep.
479 *
480 * Get the size of a NUL-terminated string in user space.
481 *
482 * Returns the size of the string INCLUDING the terminating NUL.
483 * On exception, returns 0.
484 *
485 * If there is a limit on the length of a valid string, you may wish to
486 * consider using strnlen_user() instead.
487 */
488#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
489
490/*
491 * The exception table consists of pairs of addresses: the first is the
492 * address of an instruction that is allowed to fault, and the second is
493 * the address at which the program should continue. No registers are
494 * modified, so it is entirely up to the continuation code to figure out
495 * what to do.
496 *
497 * All the routines below use bits of fixup code that are out of line
498 * with the main instruction path. This means when everything is well,
499 * we don't even have to jump over them. Further, they do not intrude
500 * on our cache or tlb entries.
501 */
502
503struct exception_table_entry
504{
505 unsigned long insn, fixup;
506};
507
508extern int fixup_exception(struct pt_regs *regs);
509
510#endif /* __ASM_SH_UACCESS_H */
diff --git a/include/asm-sh/uaccess_64.h b/include/asm-sh/uaccess_64.h
new file mode 100644
index 000000000000..d54ec082d25a
--- /dev/null
+++ b/include/asm-sh/uaccess_64.h
@@ -0,0 +1,302 @@
1#ifndef __ASM_SH_UACCESS_64_H
2#define __ASM_SH_UACCESS_64_H
3
4/*
5 * include/asm-sh/uaccess_64.h
6 *
7 * Copyright (C) 2000, 2001 Paolo Alberelli
8 * Copyright (C) 2003, 2004 Paul Mundt
9 *
10 * User space memory access functions
11 *
12 * Copyright (C) 1999 Niibe Yutaka
13 *
14 * Based on:
15 * MIPS implementation version 1.15 by
16 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
17 * and i386 version.
18 *
19 * This file is subject to the terms and conditions of the GNU General Public
20 * License. See the file "COPYING" in the main directory of this archive
21 * for more details.
22 */
23#include <linux/errno.h>
24#include <linux/sched.h>
25
26#define VERIFY_READ 0
27#define VERIFY_WRITE 1
28
29/*
30 * The fs value determines whether argument validity checking should be
31 * performed or not. If get_fs() == USER_DS, checking is performed, with
32 * get_fs() == KERNEL_DS, checking is bypassed.
33 *
34 * For historical reasons (Data Segment Register?), these macros are misnamed.
35 */
36
37#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
38
39#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
40#define USER_DS MAKE_MM_SEG(0x80000000)
41
42#define get_ds() (KERNEL_DS)
43#define get_fs() (current_thread_info()->addr_limit)
44#define set_fs(x) (current_thread_info()->addr_limit=(x))
45
46#define segment_eq(a,b) ((a).seg == (b).seg)
47
48#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
49
50/*
51 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
52 *
53 * sum := addr + size; carry? --> flag = true;
54 * if (sum >= addr_limit) flag = true;
55 */
56#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1)
57
58#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
59#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
60
61/*
62 * Uh, these should become the main single-value transfer routines ...
63 * They automatically use the right size if we just have the right
64 * pointer type ...
65 *
66 * As MIPS uses the same address space for kernel and user data, we
67 * can just do these as direct assignments.
68 *
69 * Careful to not
70 * (a) re-use the arguments for side effects (sizeof is ok)
71 * (b) require any knowledge of processes at this stage
72 */
73#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
74#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
75
76/*
77 * The "__xxx" versions do not do address space checking, useful when
78 * doing multiple accesses to the same area (the user has to do the
79 * checks by hand with "access_ok()")
80 */
81#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
82#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
83
84/*
85 * The "xxx_ret" versions return constant specified in third argument, if
86 * something bad happens. These macros can be optimized for the
87 * case of just returning from the function xxx_ret is used.
88 */
89
90#define put_user_ret(x,ptr,ret) ({ \
91if (put_user(x,ptr)) return ret; })
92
93#define get_user_ret(x,ptr,ret) ({ \
94if (get_user(x,ptr)) return ret; })
95
96#define __put_user_ret(x,ptr,ret) ({ \
97if (__put_user(x,ptr)) return ret; })
98
99#define __get_user_ret(x,ptr,ret) ({ \
100if (__get_user(x,ptr)) return ret; })
101
102struct __large_struct { unsigned long buf[100]; };
103#define __m(x) (*(struct __large_struct *)(x))
104
105#define __get_user_size(x,ptr,size,retval) \
106do { \
107 retval = 0; \
108 switch (size) { \
109 case 1: \
110 retval = __get_user_asm_b(x, ptr); \
111 break; \
112 case 2: \
113 retval = __get_user_asm_w(x, ptr); \
114 break; \
115 case 4: \
116 retval = __get_user_asm_l(x, ptr); \
117 break; \
118 case 8: \
119 retval = __get_user_asm_q(x, ptr); \
120 break; \
121 default: \
122 __get_user_unknown(); \
123 break; \
124 } \
125} while (0)
126
127#define __get_user_nocheck(x,ptr,size) \
128({ \
129 long __gu_err, __gu_val; \
130 __get_user_size((void *)&__gu_val, (long)(ptr), \
131 (size), __gu_err); \
132 (x) = (__typeof__(*(ptr)))__gu_val; \
133 __gu_err; \
134})
135
136#define __get_user_check(x,ptr,size) \
137({ \
138 long __gu_addr = (long)(ptr); \
139 long __gu_err = -EFAULT, __gu_val; \
140 if (__access_ok(__gu_addr, (size))) \
141 __get_user_size((void *)&__gu_val, __gu_addr, \
142 (size), __gu_err); \
143 (x) = (__typeof__(*(ptr))) __gu_val; \
144 __gu_err; \
145})
146
147extern long __get_user_asm_b(void *, long);
148extern long __get_user_asm_w(void *, long);
149extern long __get_user_asm_l(void *, long);
150extern long __get_user_asm_q(void *, long);
151extern void __get_user_unknown(void);
152
153#define __put_user_size(x,ptr,size,retval) \
154do { \
155 retval = 0; \
156 switch (size) { \
157 case 1: \
158 retval = __put_user_asm_b(x, ptr); \
159 break; \
160 case 2: \
161 retval = __put_user_asm_w(x, ptr); \
162 break; \
163 case 4: \
164 retval = __put_user_asm_l(x, ptr); \
165 break; \
166 case 8: \
167 retval = __put_user_asm_q(x, ptr); \
168 break; \
169 default: \
170 __put_user_unknown(); \
171 } \
172} while (0)
173
174#define __put_user_nocheck(x,ptr,size) \
175({ \
176 long __pu_err; \
177 __typeof__(*(ptr)) __pu_val = (x); \
178 __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \
179 __pu_err; \
180})
181
182#define __put_user_check(x,ptr,size) \
183({ \
184 long __pu_err = -EFAULT; \
185 long __pu_addr = (long)(ptr); \
186 __typeof__(*(ptr)) __pu_val = (x); \
187 \
188 if (__access_ok(__pu_addr, (size))) \
189 __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\
190 __pu_err; \
191})
192
193extern long __put_user_asm_b(void *, long);
194extern long __put_user_asm_w(void *, long);
195extern long __put_user_asm_l(void *, long);
196extern long __put_user_asm_q(void *, long);
197extern void __put_user_unknown(void);
198
199
200/* Generic arbitrary sized copy. */
201/* Return the number of bytes NOT copied */
202/* XXX: should be such that: 4byte and the rest. */
203extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
204
205#define copy_to_user(to,from,n) ({ \
206void *__copy_to = (void *) (to); \
207__kernel_size_t __copy_size = (__kernel_size_t) (n); \
208__kernel_size_t __copy_res; \
209if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
210__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
211} else __copy_res = __copy_size; \
212__copy_res; })
213
214#define copy_to_user_ret(to,from,n,retval) ({ \
215if (copy_to_user(to,from,n)) \
216 return retval; \
217})
218
219#define __copy_to_user(to,from,n) \
220 __copy_user((void *)(to), \
221 (void *)(from), n)
222
223#define __copy_to_user_ret(to,from,n,retval) ({ \
224if (__copy_to_user(to,from,n)) \
225 return retval; \
226})
227
228#define copy_from_user(to,from,n) ({ \
229void *__copy_to = (void *) (to); \
230void *__copy_from = (void *) (from); \
231__kernel_size_t __copy_size = (__kernel_size_t) (n); \
232__kernel_size_t __copy_res; \
233if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
234__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
235} else __copy_res = __copy_size; \
236__copy_res; })
237
238#define copy_from_user_ret(to,from,n,retval) ({ \
239if (copy_from_user(to,from,n)) \
240 return retval; \
241})
242
243#define __copy_from_user(to,from,n) \
244 __copy_user((void *)(to), \
245 (void *)(from), n)
246
247#define __copy_from_user_ret(to,from,n,retval) ({ \
248if (__copy_from_user(to,from,n)) \
249 return retval; \
250})
251
252#define __copy_to_user_inatomic __copy_to_user
253#define __copy_from_user_inatomic __copy_from_user
254
255/* XXX: Not sure it works well..
256 should be such that: 4byte clear and the rest. */
257extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
258
259#define clear_user(addr,n) ({ \
260void * __cl_addr = (addr); \
261unsigned long __cl_size = (n); \
262if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
263__cl_size = __clear_user(__cl_addr, __cl_size); \
264__cl_size; })
265
266extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
267
268#define strncpy_from_user(dest,src,count) ({ \
269unsigned long __sfu_src = (unsigned long) (src); \
270int __sfu_count = (int) (count); \
271long __sfu_res = -EFAULT; \
272if(__access_ok(__sfu_src, __sfu_count)) { \
273__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
274} __sfu_res; })
275
276#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
277
278/*
279 * Return the size of a string (including the ending 0!)
280 */
281extern long __strnlen_user(const char *__s, long __n);
282
283static inline long strnlen_user(const char *s, long n)
284{
285 if (!__addr_ok(s))
286 return 0;
287 else
288 return __strnlen_user(s, n);
289}
290
291struct exception_table_entry
292{
293 unsigned long insn, fixup;
294};
295
296#define ARCH_HAS_SEARCH_EXTABLE
297
298/* Returns 0 if exception not found and fixup.unit otherwise. */
299extern unsigned long search_exception_table(unsigned long addr);
300extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
301
302#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index b182b1cb05fd..4b21f369c28c 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -1,376 +1,5 @@
1#ifndef __ASM_SH_UNISTD_H 1#ifdef CONFIG_SUPERH32
2#define __ASM_SH_UNISTD_H 2# include "unistd_32.h"
3 3#else
4/* 4# include "unistd_64.h"
5 * Copyright (C) 1999 Niibe Yutaka
6 */
7
8/*
9 * This file contains the system call numbers.
10 */
11
12#define __NR_restart_syscall 0
13#define __NR_exit 1
14#define __NR_fork 2
15#define __NR_read 3
16#define __NR_write 4
17#define __NR_open 5
18#define __NR_close 6
19#define __NR_waitpid 7
20#define __NR_creat 8
21#define __NR_link 9
22#define __NR_unlink 10
23#define __NR_execve 11
24#define __NR_chdir 12
25#define __NR_time 13
26#define __NR_mknod 14
27#define __NR_chmod 15
28#define __NR_lchown 16
29#define __NR_break 17
30#define __NR_oldstat 18
31#define __NR_lseek 19
32#define __NR_getpid 20
33#define __NR_mount 21
34#define __NR_umount 22
35#define __NR_setuid 23
36#define __NR_getuid 24
37#define __NR_stime 25
38#define __NR_ptrace 26
39#define __NR_alarm 27
40#define __NR_oldfstat 28
41#define __NR_pause 29
42#define __NR_utime 30
43#define __NR_stty 31
44#define __NR_gtty 32
45#define __NR_access 33
46#define __NR_nice 34
47#define __NR_ftime 35
48#define __NR_sync 36
49#define __NR_kill 37
50#define __NR_rename 38
51#define __NR_mkdir 39
52#define __NR_rmdir 40
53#define __NR_dup 41
54#define __NR_pipe 42
55#define __NR_times 43
56#define __NR_prof 44
57#define __NR_brk 45
58#define __NR_setgid 46
59#define __NR_getgid 47
60#define __NR_signal 48
61#define __NR_geteuid 49
62#define __NR_getegid 50
63#define __NR_acct 51
64#define __NR_umount2 52
65#define __NR_lock 53
66#define __NR_ioctl 54
67#define __NR_fcntl 55
68#define __NR_mpx 56
69#define __NR_setpgid 57
70#define __NR_ulimit 58
71#define __NR_oldolduname 59
72#define __NR_umask 60
73#define __NR_chroot 61
74#define __NR_ustat 62
75#define __NR_dup2 63
76#define __NR_getppid 64
77#define __NR_getpgrp 65
78#define __NR_setsid 66
79#define __NR_sigaction 67
80#define __NR_sgetmask 68
81#define __NR_ssetmask 69
82#define __NR_setreuid 70
83#define __NR_setregid 71
84#define __NR_sigsuspend 72
85#define __NR_sigpending 73
86#define __NR_sethostname 74
87#define __NR_setrlimit 75
88#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
89#define __NR_getrusage 77
90#define __NR_gettimeofday 78
91#define __NR_settimeofday 79
92#define __NR_getgroups 80
93#define __NR_setgroups 81
94#define __NR_select 82
95#define __NR_symlink 83
96#define __NR_oldlstat 84
97#define __NR_readlink 85
98#define __NR_uselib 86
99#define __NR_swapon 87
100#define __NR_reboot 88
101#define __NR_readdir 89
102#define __NR_mmap 90
103#define __NR_munmap 91
104#define __NR_truncate 92
105#define __NR_ftruncate 93
106#define __NR_fchmod 94
107#define __NR_fchown 95
108#define __NR_getpriority 96
109#define __NR_setpriority 97
110#define __NR_profil 98
111#define __NR_statfs 99
112#define __NR_fstatfs 100
113#define __NR_ioperm 101
114#define __NR_socketcall 102
115#define __NR_syslog 103
116#define __NR_setitimer 104
117#define __NR_getitimer 105
118#define __NR_stat 106
119#define __NR_lstat 107
120#define __NR_fstat 108
121#define __NR_olduname 109
122#define __NR_iopl 110
123#define __NR_vhangup 111
124#define __NR_idle 112
125#define __NR_vm86old 113
126#define __NR_wait4 114
127#define __NR_swapoff 115
128#define __NR_sysinfo 116
129#define __NR_ipc 117
130#define __NR_fsync 118
131#define __NR_sigreturn 119
132#define __NR_clone 120
133#define __NR_setdomainname 121
134#define __NR_uname 122
135#define __NR_modify_ldt 123
136#define __NR_adjtimex 124
137#define __NR_mprotect 125
138#define __NR_sigprocmask 126
139#define __NR_create_module 127
140#define __NR_init_module 128
141#define __NR_delete_module 129
142#define __NR_get_kernel_syms 130
143#define __NR_quotactl 131
144#define __NR_getpgid 132
145#define __NR_fchdir 133
146#define __NR_bdflush 134
147#define __NR_sysfs 135
148#define __NR_personality 136
149#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
150#define __NR_setfsuid 138
151#define __NR_setfsgid 139
152#define __NR__llseek 140
153#define __NR_getdents 141
154#define __NR__newselect 142
155#define __NR_flock 143
156#define __NR_msync 144
157#define __NR_readv 145
158#define __NR_writev 146
159#define __NR_getsid 147
160#define __NR_fdatasync 148
161#define __NR__sysctl 149
162#define __NR_mlock 150
163#define __NR_munlock 151
164#define __NR_mlockall 152
165#define __NR_munlockall 153
166#define __NR_sched_setparam 154
167#define __NR_sched_getparam 155
168#define __NR_sched_setscheduler 156
169#define __NR_sched_getscheduler 157
170#define __NR_sched_yield 158
171#define __NR_sched_get_priority_max 159
172#define __NR_sched_get_priority_min 160
173#define __NR_sched_rr_get_interval 161
174#define __NR_nanosleep 162
175#define __NR_mremap 163
176#define __NR_setresuid 164
177#define __NR_getresuid 165
178#define __NR_vm86 166
179#define __NR_query_module 167
180#define __NR_poll 168
181#define __NR_nfsservctl 169
182#define __NR_setresgid 170
183#define __NR_getresgid 171
184#define __NR_prctl 172
185#define __NR_rt_sigreturn 173
186#define __NR_rt_sigaction 174
187#define __NR_rt_sigprocmask 175
188#define __NR_rt_sigpending 176
189#define __NR_rt_sigtimedwait 177
190#define __NR_rt_sigqueueinfo 178
191#define __NR_rt_sigsuspend 179
192#define __NR_pread64 180
193#define __NR_pwrite64 181
194#define __NR_chown 182
195#define __NR_getcwd 183
196#define __NR_capget 184
197#define __NR_capset 185
198#define __NR_sigaltstack 186
199#define __NR_sendfile 187
200#define __NR_streams1 188 /* some people actually want it */
201#define __NR_streams2 189 /* some people actually want it */
202#define __NR_vfork 190
203#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
204#define __NR_mmap2 192
205#define __NR_truncate64 193
206#define __NR_ftruncate64 194
207#define __NR_stat64 195
208#define __NR_lstat64 196
209#define __NR_fstat64 197
210#define __NR_lchown32 198
211#define __NR_getuid32 199
212#define __NR_getgid32 200
213#define __NR_geteuid32 201
214#define __NR_getegid32 202
215#define __NR_setreuid32 203
216#define __NR_setregid32 204
217#define __NR_getgroups32 205
218#define __NR_setgroups32 206
219#define __NR_fchown32 207
220#define __NR_setresuid32 208
221#define __NR_getresuid32 209
222#define __NR_setresgid32 210
223#define __NR_getresgid32 211
224#define __NR_chown32 212
225#define __NR_setuid32 213
226#define __NR_setgid32 214
227#define __NR_setfsuid32 215
228#define __NR_setfsgid32 216
229#define __NR_pivot_root 217
230#define __NR_mincore 218
231#define __NR_madvise 219
232#define __NR_getdents64 220
233#define __NR_fcntl64 221
234/* 223 is unused */
235#define __NR_gettid 224
236#define __NR_readahead 225
237#define __NR_setxattr 226
238#define __NR_lsetxattr 227
239#define __NR_fsetxattr 228
240#define __NR_getxattr 229
241#define __NR_lgetxattr 230
242#define __NR_fgetxattr 231
243#define __NR_listxattr 232
244#define __NR_llistxattr 233
245#define __NR_flistxattr 234
246#define __NR_removexattr 235
247#define __NR_lremovexattr 236
248#define __NR_fremovexattr 237
249#define __NR_tkill 238
250#define __NR_sendfile64 239
251#define __NR_futex 240
252#define __NR_sched_setaffinity 241
253#define __NR_sched_getaffinity 242
254#define __NR_set_thread_area 243
255#define __NR_get_thread_area 244
256#define __NR_io_setup 245
257#define __NR_io_destroy 246
258#define __NR_io_getevents 247
259#define __NR_io_submit 248
260#define __NR_io_cancel 249
261#define __NR_fadvise64 250
262
263#define __NR_exit_group 252
264#define __NR_lookup_dcookie 253
265#define __NR_epoll_create 254
266#define __NR_epoll_ctl 255
267#define __NR_epoll_wait 256
268#define __NR_remap_file_pages 257
269#define __NR_set_tid_address 258
270#define __NR_timer_create 259
271#define __NR_timer_settime (__NR_timer_create+1)
272#define __NR_timer_gettime (__NR_timer_create+2)
273#define __NR_timer_getoverrun (__NR_timer_create+3)
274#define __NR_timer_delete (__NR_timer_create+4)
275#define __NR_clock_settime (__NR_timer_create+5)
276#define __NR_clock_gettime (__NR_timer_create+6)
277#define __NR_clock_getres (__NR_timer_create+7)
278#define __NR_clock_nanosleep (__NR_timer_create+8)
279#define __NR_statfs64 268
280#define __NR_fstatfs64 269
281#define __NR_tgkill 270
282#define __NR_utimes 271
283#define __NR_fadvise64_64 272
284#define __NR_vserver 273
285#define __NR_mbind 274
286#define __NR_get_mempolicy 275
287#define __NR_set_mempolicy 276
288#define __NR_mq_open 277
289#define __NR_mq_unlink (__NR_mq_open+1)
290#define __NR_mq_timedsend (__NR_mq_open+2)
291#define __NR_mq_timedreceive (__NR_mq_open+3)
292#define __NR_mq_notify (__NR_mq_open+4)
293#define __NR_mq_getsetattr (__NR_mq_open+5)
294#define __NR_kexec_load 283
295#define __NR_waitid 284
296#define __NR_add_key 285
297#define __NR_request_key 286
298#define __NR_keyctl 287
299#define __NR_ioprio_set 288
300#define __NR_ioprio_get 289
301#define __NR_inotify_init 290
302#define __NR_inotify_add_watch 291
303#define __NR_inotify_rm_watch 292
304/* 293 is unused */
305#define __NR_migrate_pages 294
306#define __NR_openat 295
307#define __NR_mkdirat 296
308#define __NR_mknodat 297
309#define __NR_fchownat 298
310#define __NR_futimesat 299
311#define __NR_fstatat64 300
312#define __NR_unlinkat 301
313#define __NR_renameat 302
314#define __NR_linkat 303
315#define __NR_symlinkat 304
316#define __NR_readlinkat 305
317#define __NR_fchmodat 306
318#define __NR_faccessat 307
319#define __NR_pselect6 308
320#define __NR_ppoll 309
321#define __NR_unshare 310
322#define __NR_set_robust_list 311
323#define __NR_get_robust_list 312
324#define __NR_splice 313
325#define __NR_sync_file_range 314
326#define __NR_tee 315
327#define __NR_vmsplice 316
328#define __NR_move_pages 317
329#define __NR_getcpu 318
330#define __NR_epoll_pwait 319
331#define __NR_utimensat 320
332#define __NR_signalfd 321
333#define __NR_timerfd 322
334#define __NR_eventfd 323
335#define __NR_fallocate 324
336
337#define NR_syscalls 325
338
339#ifdef __KERNEL__
340
341#define __ARCH_WANT_IPC_PARSE_VERSION
342#define __ARCH_WANT_OLD_READDIR
343#define __ARCH_WANT_OLD_STAT
344#define __ARCH_WANT_STAT64
345#define __ARCH_WANT_SYS_ALARM
346#define __ARCH_WANT_SYS_GETHOSTNAME
347#define __ARCH_WANT_SYS_PAUSE
348#define __ARCH_WANT_SYS_SGETMASK
349#define __ARCH_WANT_SYS_SIGNAL
350#define __ARCH_WANT_SYS_TIME
351#define __ARCH_WANT_SYS_UTIME
352#define __ARCH_WANT_SYS_WAITPID
353#define __ARCH_WANT_SYS_SOCKETCALL
354#define __ARCH_WANT_SYS_FADVISE64
355#define __ARCH_WANT_SYS_GETPGRP
356#define __ARCH_WANT_SYS_LLSEEK
357#define __ARCH_WANT_SYS_NICE
358#define __ARCH_WANT_SYS_OLD_GETRLIMIT
359#define __ARCH_WANT_SYS_OLDUMOUNT
360#define __ARCH_WANT_SYS_SIGPENDING
361#define __ARCH_WANT_SYS_SIGPROCMASK
362#define __ARCH_WANT_SYS_RT_SIGACTION
363#define __ARCH_WANT_SYS_RT_SIGSUSPEND
364
365/*
366 * "Conditional" syscalls
367 *
368 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
369 * but it doesn't work on all toolchains, so we just do it by hand
370 */
371#ifndef cond_syscall
372#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
373#endif 5#endif
374
375#endif /* __KERNEL__ */
376#endif /* __ASM_SH_UNISTD_H */
diff --git a/include/asm-sh/unistd_32.h b/include/asm-sh/unistd_32.h
new file mode 100644
index 000000000000..b182b1cb05fd
--- /dev/null
+++ b/include/asm-sh/unistd_32.h
@@ -0,0 +1,376 @@
1#ifndef __ASM_SH_UNISTD_H
2#define __ASM_SH_UNISTD_H
3
4/*
5 * Copyright (C) 1999 Niibe Yutaka
6 */
7
8/*
9 * This file contains the system call numbers.
10 */
11
12#define __NR_restart_syscall 0
13#define __NR_exit 1
14#define __NR_fork 2
15#define __NR_read 3
16#define __NR_write 4
17#define __NR_open 5
18#define __NR_close 6
19#define __NR_waitpid 7
20#define __NR_creat 8
21#define __NR_link 9
22#define __NR_unlink 10
23#define __NR_execve 11
24#define __NR_chdir 12
25#define __NR_time 13
26#define __NR_mknod 14
27#define __NR_chmod 15
28#define __NR_lchown 16
29#define __NR_break 17
30#define __NR_oldstat 18
31#define __NR_lseek 19
32#define __NR_getpid 20
33#define __NR_mount 21
34#define __NR_umount 22
35#define __NR_setuid 23
36#define __NR_getuid 24
37#define __NR_stime 25
38#define __NR_ptrace 26
39#define __NR_alarm 27
40#define __NR_oldfstat 28
41#define __NR_pause 29
42#define __NR_utime 30
43#define __NR_stty 31
44#define __NR_gtty 32
45#define __NR_access 33
46#define __NR_nice 34
47#define __NR_ftime 35
48#define __NR_sync 36
49#define __NR_kill 37
50#define __NR_rename 38
51#define __NR_mkdir 39
52#define __NR_rmdir 40
53#define __NR_dup 41
54#define __NR_pipe 42
55#define __NR_times 43
56#define __NR_prof 44
57#define __NR_brk 45
58#define __NR_setgid 46
59#define __NR_getgid 47
60#define __NR_signal 48
61#define __NR_geteuid 49
62#define __NR_getegid 50
63#define __NR_acct 51
64#define __NR_umount2 52
65#define __NR_lock 53
66#define __NR_ioctl 54
67#define __NR_fcntl 55
68#define __NR_mpx 56
69#define __NR_setpgid 57
70#define __NR_ulimit 58
71#define __NR_oldolduname 59
72#define __NR_umask 60
73#define __NR_chroot 61
74#define __NR_ustat 62
75#define __NR_dup2 63
76#define __NR_getppid 64
77#define __NR_getpgrp 65
78#define __NR_setsid 66
79#define __NR_sigaction 67
80#define __NR_sgetmask 68
81#define __NR_ssetmask 69
82#define __NR_setreuid 70
83#define __NR_setregid 71
84#define __NR_sigsuspend 72
85#define __NR_sigpending 73
86#define __NR_sethostname 74
87#define __NR_setrlimit 75
88#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
89#define __NR_getrusage 77
90#define __NR_gettimeofday 78
91#define __NR_settimeofday 79
92#define __NR_getgroups 80
93#define __NR_setgroups 81
94#define __NR_select 82
95#define __NR_symlink 83
96#define __NR_oldlstat 84
97#define __NR_readlink 85
98#define __NR_uselib 86
99#define __NR_swapon 87
100#define __NR_reboot 88
101#define __NR_readdir 89
102#define __NR_mmap 90
103#define __NR_munmap 91
104#define __NR_truncate 92
105#define __NR_ftruncate 93
106#define __NR_fchmod 94
107#define __NR_fchown 95
108#define __NR_getpriority 96
109#define __NR_setpriority 97
110#define __NR_profil 98
111#define __NR_statfs 99
112#define __NR_fstatfs 100
113#define __NR_ioperm 101
114#define __NR_socketcall 102
115#define __NR_syslog 103
116#define __NR_setitimer 104
117#define __NR_getitimer 105
118#define __NR_stat 106
119#define __NR_lstat 107
120#define __NR_fstat 108
121#define __NR_olduname 109
122#define __NR_iopl 110
123#define __NR_vhangup 111
124#define __NR_idle 112
125#define __NR_vm86old 113
126#define __NR_wait4 114
127#define __NR_swapoff 115
128#define __NR_sysinfo 116
129#define __NR_ipc 117
130#define __NR_fsync 118
131#define __NR_sigreturn 119
132#define __NR_clone 120
133#define __NR_setdomainname 121
134#define __NR_uname 122
135#define __NR_modify_ldt 123
136#define __NR_adjtimex 124
137#define __NR_mprotect 125
138#define __NR_sigprocmask 126
139#define __NR_create_module 127
140#define __NR_init_module 128
141#define __NR_delete_module 129
142#define __NR_get_kernel_syms 130
143#define __NR_quotactl 131
144#define __NR_getpgid 132
145#define __NR_fchdir 133
146#define __NR_bdflush 134
147#define __NR_sysfs 135
148#define __NR_personality 136
149#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
150#define __NR_setfsuid 138
151#define __NR_setfsgid 139
152#define __NR__llseek 140
153#define __NR_getdents 141
154#define __NR__newselect 142
155#define __NR_flock 143
156#define __NR_msync 144
157#define __NR_readv 145
158#define __NR_writev 146
159#define __NR_getsid 147
160#define __NR_fdatasync 148
161#define __NR__sysctl 149
162#define __NR_mlock 150
163#define __NR_munlock 151
164#define __NR_mlockall 152
165#define __NR_munlockall 153
166#define __NR_sched_setparam 154
167#define __NR_sched_getparam 155
168#define __NR_sched_setscheduler 156
169#define __NR_sched_getscheduler 157
170#define __NR_sched_yield 158
171#define __NR_sched_get_priority_max 159
172#define __NR_sched_get_priority_min 160
173#define __NR_sched_rr_get_interval 161
174#define __NR_nanosleep 162
175#define __NR_mremap 163
176#define __NR_setresuid 164
177#define __NR_getresuid 165
178#define __NR_vm86 166
179#define __NR_query_module 167
180#define __NR_poll 168
181#define __NR_nfsservctl 169
182#define __NR_setresgid 170
183#define __NR_getresgid 171
184#define __NR_prctl 172
185#define __NR_rt_sigreturn 173
186#define __NR_rt_sigaction 174
187#define __NR_rt_sigprocmask 175
188#define __NR_rt_sigpending 176
189#define __NR_rt_sigtimedwait 177
190#define __NR_rt_sigqueueinfo 178
191#define __NR_rt_sigsuspend 179
192#define __NR_pread64 180
193#define __NR_pwrite64 181
194#define __NR_chown 182
195#define __NR_getcwd 183
196#define __NR_capget 184
197#define __NR_capset 185
198#define __NR_sigaltstack 186
199#define __NR_sendfile 187
200#define __NR_streams1 188 /* some people actually want it */
201#define __NR_streams2 189 /* some people actually want it */
202#define __NR_vfork 190
203#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
204#define __NR_mmap2 192
205#define __NR_truncate64 193
206#define __NR_ftruncate64 194
207#define __NR_stat64 195
208#define __NR_lstat64 196
209#define __NR_fstat64 197
210#define __NR_lchown32 198
211#define __NR_getuid32 199
212#define __NR_getgid32 200
213#define __NR_geteuid32 201
214#define __NR_getegid32 202
215#define __NR_setreuid32 203
216#define __NR_setregid32 204
217#define __NR_getgroups32 205
218#define __NR_setgroups32 206
219#define __NR_fchown32 207
220#define __NR_setresuid32 208
221#define __NR_getresuid32 209
222#define __NR_setresgid32 210
223#define __NR_getresgid32 211
224#define __NR_chown32 212
225#define __NR_setuid32 213
226#define __NR_setgid32 214
227#define __NR_setfsuid32 215
228#define __NR_setfsgid32 216
229#define __NR_pivot_root 217
230#define __NR_mincore 218
231#define __NR_madvise 219
232#define __NR_getdents64 220
233#define __NR_fcntl64 221
234/* 223 is unused */
235#define __NR_gettid 224
236#define __NR_readahead 225
237#define __NR_setxattr 226
238#define __NR_lsetxattr 227
239#define __NR_fsetxattr 228
240#define __NR_getxattr 229
241#define __NR_lgetxattr 230
242#define __NR_fgetxattr 231
243#define __NR_listxattr 232
244#define __NR_llistxattr 233
245#define __NR_flistxattr 234
246#define __NR_removexattr 235
247#define __NR_lremovexattr 236
248#define __NR_fremovexattr 237
249#define __NR_tkill 238
250#define __NR_sendfile64 239
251#define __NR_futex 240
252#define __NR_sched_setaffinity 241
253#define __NR_sched_getaffinity 242
254#define __NR_set_thread_area 243
255#define __NR_get_thread_area 244
256#define __NR_io_setup 245
257#define __NR_io_destroy 246
258#define __NR_io_getevents 247
259#define __NR_io_submit 248
260#define __NR_io_cancel 249
261#define __NR_fadvise64 250
262
263#define __NR_exit_group 252
264#define __NR_lookup_dcookie 253
265#define __NR_epoll_create 254
266#define __NR_epoll_ctl 255
267#define __NR_epoll_wait 256
268#define __NR_remap_file_pages 257
269#define __NR_set_tid_address 258
270#define __NR_timer_create 259
271#define __NR_timer_settime (__NR_timer_create+1)
272#define __NR_timer_gettime (__NR_timer_create+2)
273#define __NR_timer_getoverrun (__NR_timer_create+3)
274#define __NR_timer_delete (__NR_timer_create+4)
275#define __NR_clock_settime (__NR_timer_create+5)
276#define __NR_clock_gettime (__NR_timer_create+6)
277#define __NR_clock_getres (__NR_timer_create+7)
278#define __NR_clock_nanosleep (__NR_timer_create+8)
279#define __NR_statfs64 268
280#define __NR_fstatfs64 269
281#define __NR_tgkill 270
282#define __NR_utimes 271
283#define __NR_fadvise64_64 272
284#define __NR_vserver 273
285#define __NR_mbind 274
286#define __NR_get_mempolicy 275
287#define __NR_set_mempolicy 276
288#define __NR_mq_open 277
289#define __NR_mq_unlink (__NR_mq_open+1)
290#define __NR_mq_timedsend (__NR_mq_open+2)
291#define __NR_mq_timedreceive (__NR_mq_open+3)
292#define __NR_mq_notify (__NR_mq_open+4)
293#define __NR_mq_getsetattr (__NR_mq_open+5)
294#define __NR_kexec_load 283
295#define __NR_waitid 284
296#define __NR_add_key 285
297#define __NR_request_key 286
298#define __NR_keyctl 287
299#define __NR_ioprio_set 288
300#define __NR_ioprio_get 289
301#define __NR_inotify_init 290
302#define __NR_inotify_add_watch 291
303#define __NR_inotify_rm_watch 292
304/* 293 is unused */
305#define __NR_migrate_pages 294
306#define __NR_openat 295
307#define __NR_mkdirat 296
308#define __NR_mknodat 297
309#define __NR_fchownat 298
310#define __NR_futimesat 299
311#define __NR_fstatat64 300
312#define __NR_unlinkat 301
313#define __NR_renameat 302
314#define __NR_linkat 303
315#define __NR_symlinkat 304
316#define __NR_readlinkat 305
317#define __NR_fchmodat 306
318#define __NR_faccessat 307
319#define __NR_pselect6 308
320#define __NR_ppoll 309
321#define __NR_unshare 310
322#define __NR_set_robust_list 311
323#define __NR_get_robust_list 312
324#define __NR_splice 313
325#define __NR_sync_file_range 314
326#define __NR_tee 315
327#define __NR_vmsplice 316
328#define __NR_move_pages 317
329#define __NR_getcpu 318
330#define __NR_epoll_pwait 319
331#define __NR_utimensat 320
332#define __NR_signalfd 321
333#define __NR_timerfd 322
334#define __NR_eventfd 323
335#define __NR_fallocate 324
336
337#define NR_syscalls 325
338
339#ifdef __KERNEL__
340
341#define __ARCH_WANT_IPC_PARSE_VERSION
342#define __ARCH_WANT_OLD_READDIR
343#define __ARCH_WANT_OLD_STAT
344#define __ARCH_WANT_STAT64
345#define __ARCH_WANT_SYS_ALARM
346#define __ARCH_WANT_SYS_GETHOSTNAME
347#define __ARCH_WANT_SYS_PAUSE
348#define __ARCH_WANT_SYS_SGETMASK
349#define __ARCH_WANT_SYS_SIGNAL
350#define __ARCH_WANT_SYS_TIME
351#define __ARCH_WANT_SYS_UTIME
352#define __ARCH_WANT_SYS_WAITPID
353#define __ARCH_WANT_SYS_SOCKETCALL
354#define __ARCH_WANT_SYS_FADVISE64
355#define __ARCH_WANT_SYS_GETPGRP
356#define __ARCH_WANT_SYS_LLSEEK
357#define __ARCH_WANT_SYS_NICE
358#define __ARCH_WANT_SYS_OLD_GETRLIMIT
359#define __ARCH_WANT_SYS_OLDUMOUNT
360#define __ARCH_WANT_SYS_SIGPENDING
361#define __ARCH_WANT_SYS_SIGPROCMASK
362#define __ARCH_WANT_SYS_RT_SIGACTION
363#define __ARCH_WANT_SYS_RT_SIGSUSPEND
364
365/*
366 * "Conditional" syscalls
367 *
368 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
369 * but it doesn't work on all toolchains, so we just do it by hand
370 */
371#ifndef cond_syscall
372#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
373#endif
374
375#endif /* __KERNEL__ */
376#endif /* __ASM_SH_UNISTD_H */
diff --git a/include/asm-sh/unistd_64.h b/include/asm-sh/unistd_64.h
new file mode 100644
index 000000000000..944511882cac
--- /dev/null
+++ b/include/asm-sh/unistd_64.h
@@ -0,0 +1,415 @@
1#ifndef __ASM_SH_UNISTD_64_H
2#define __ASM_SH_UNISTD_64_H
3
4/*
5 * include/asm-sh/unistd_64.h
6 *
7 * This file contains the system call numbers.
8 *
9 * Copyright (C) 2000, 2001 Paolo Alberelli
10 * Copyright (C) 2003 - 2007 Paul Mundt
11 * Copyright (C) 2004 Sean McGoogan
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#define __NR_restart_syscall 0
18#define __NR_exit 1
19#define __NR_fork 2
20#define __NR_read 3
21#define __NR_write 4
22#define __NR_open 5
23#define __NR_close 6
24#define __NR_waitpid 7
25#define __NR_creat 8
26#define __NR_link 9
27#define __NR_unlink 10
28#define __NR_execve 11
29#define __NR_chdir 12
30#define __NR_time 13
31#define __NR_mknod 14
32#define __NR_chmod 15
33#define __NR_lchown 16
34#define __NR_break 17
35#define __NR_oldstat 18
36#define __NR_lseek 19
37#define __NR_getpid 20
38#define __NR_mount 21
39#define __NR_umount 22
40#define __NR_setuid 23
41#define __NR_getuid 24
42#define __NR_stime 25
43#define __NR_ptrace 26
44#define __NR_alarm 27
45#define __NR_oldfstat 28
46#define __NR_pause 29
47#define __NR_utime 30
48#define __NR_stty 31
49#define __NR_gtty 32
50#define __NR_access 33
51#define __NR_nice 34
52#define __NR_ftime 35
53#define __NR_sync 36
54#define __NR_kill 37
55#define __NR_rename 38
56#define __NR_mkdir 39
57#define __NR_rmdir 40
58#define __NR_dup 41
59#define __NR_pipe 42
60#define __NR_times 43
61#define __NR_prof 44
62#define __NR_brk 45
63#define __NR_setgid 46
64#define __NR_getgid 47
65#define __NR_signal 48
66#define __NR_geteuid 49
67#define __NR_getegid 50
68#define __NR_acct 51
69#define __NR_umount2 52
70#define __NR_lock 53
71#define __NR_ioctl 54
72#define __NR_fcntl 55
73#define __NR_mpx 56
74#define __NR_setpgid 57
75#define __NR_ulimit 58
76#define __NR_oldolduname 59
77#define __NR_umask 60
78#define __NR_chroot 61
79#define __NR_ustat 62
80#define __NR_dup2 63
81#define __NR_getppid 64
82#define __NR_getpgrp 65
83#define __NR_setsid 66
84#define __NR_sigaction 67
85#define __NR_sgetmask 68
86#define __NR_ssetmask 69
87#define __NR_setreuid 70
88#define __NR_setregid 71
89#define __NR_sigsuspend 72
90#define __NR_sigpending 73
91#define __NR_sethostname 74
92#define __NR_setrlimit 75
93#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
94#define __NR_getrusage 77
95#define __NR_gettimeofday 78
96#define __NR_settimeofday 79
97#define __NR_getgroups 80
98#define __NR_setgroups 81
99#define __NR_select 82
100#define __NR_symlink 83
101#define __NR_oldlstat 84
102#define __NR_readlink 85
103#define __NR_uselib 86
104#define __NR_swapon 87
105#define __NR_reboot 88
106#define __NR_readdir 89
107#define __NR_mmap 90
108#define __NR_munmap 91
109#define __NR_truncate 92
110#define __NR_ftruncate 93
111#define __NR_fchmod 94
112#define __NR_fchown 95
113#define __NR_getpriority 96
114#define __NR_setpriority 97
115#define __NR_profil 98
116#define __NR_statfs 99
117#define __NR_fstatfs 100
118#define __NR_ioperm 101
119#define __NR_socketcall 102 /* old implementation of socket systemcall */
120#define __NR_syslog 103
121#define __NR_setitimer 104
122#define __NR_getitimer 105
123#define __NR_stat 106
124#define __NR_lstat 107
125#define __NR_fstat 108
126#define __NR_olduname 109
127#define __NR_iopl 110
128#define __NR_vhangup 111
129#define __NR_idle 112
130#define __NR_vm86old 113
131#define __NR_wait4 114
132#define __NR_swapoff 115
133#define __NR_sysinfo 116
134#define __NR_ipc 117
135#define __NR_fsync 118
136#define __NR_sigreturn 119
137#define __NR_clone 120
138#define __NR_setdomainname 121
139#define __NR_uname 122
140#define __NR_modify_ldt 123
141#define __NR_adjtimex 124
142#define __NR_mprotect 125
143#define __NR_sigprocmask 126
144#define __NR_create_module 127
145#define __NR_init_module 128
146#define __NR_delete_module 129
147#define __NR_get_kernel_syms 130
148#define __NR_quotactl 131
149#define __NR_getpgid 132
150#define __NR_fchdir 133
151#define __NR_bdflush 134
152#define __NR_sysfs 135
153#define __NR_personality 136
154#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
155#define __NR_setfsuid 138
156#define __NR_setfsgid 139
157#define __NR__llseek 140
158#define __NR_getdents 141
159#define __NR__newselect 142
160#define __NR_flock 143
161#define __NR_msync 144
162#define __NR_readv 145
163#define __NR_writev 146
164#define __NR_getsid 147
165#define __NR_fdatasync 148
166#define __NR__sysctl 149
167#define __NR_mlock 150
168#define __NR_munlock 151
169#define __NR_mlockall 152
170#define __NR_munlockall 153
171#define __NR_sched_setparam 154
172#define __NR_sched_getparam 155
173#define __NR_sched_setscheduler 156
174#define __NR_sched_getscheduler 157
175#define __NR_sched_yield 158
176#define __NR_sched_get_priority_max 159
177#define __NR_sched_get_priority_min 160
178#define __NR_sched_rr_get_interval 161
179#define __NR_nanosleep 162
180#define __NR_mremap 163
181#define __NR_setresuid 164
182#define __NR_getresuid 165
183#define __NR_vm86 166
184#define __NR_query_module 167
185#define __NR_poll 168
186#define __NR_nfsservctl 169
187#define __NR_setresgid 170
188#define __NR_getresgid 171
189#define __NR_prctl 172
190#define __NR_rt_sigreturn 173
191#define __NR_rt_sigaction 174
192#define __NR_rt_sigprocmask 175
193#define __NR_rt_sigpending 176
194#define __NR_rt_sigtimedwait 177
195#define __NR_rt_sigqueueinfo 178
196#define __NR_rt_sigsuspend 179
197#define __NR_pread64 180
198#define __NR_pwrite64 181
199#define __NR_chown 182
200#define __NR_getcwd 183
201#define __NR_capget 184
202#define __NR_capset 185
203#define __NR_sigaltstack 186
204#define __NR_sendfile 187
205#define __NR_streams1 188 /* some people actually want it */
206#define __NR_streams2 189 /* some people actually want it */
207#define __NR_vfork 190
208#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
209#define __NR_mmap2 192
210#define __NR_truncate64 193
211#define __NR_ftruncate64 194
212#define __NR_stat64 195
213#define __NR_lstat64 196
214#define __NR_fstat64 197
215#define __NR_lchown32 198
216#define __NR_getuid32 199
217#define __NR_getgid32 200
218#define __NR_geteuid32 201
219#define __NR_getegid32 202
220#define __NR_setreuid32 203
221#define __NR_setregid32 204
222#define __NR_getgroups32 205
223#define __NR_setgroups32 206
224#define __NR_fchown32 207
225#define __NR_setresuid32 208
226#define __NR_getresuid32 209
227#define __NR_setresgid32 210
228#define __NR_getresgid32 211
229#define __NR_chown32 212
230#define __NR_setuid32 213
231#define __NR_setgid32 214
232#define __NR_setfsuid32 215
233#define __NR_setfsgid32 216
234#define __NR_pivot_root 217
235#define __NR_mincore 218
236#define __NR_madvise 219
237
238/* Non-multiplexed socket family */
239#define __NR_socket 220
240#define __NR_bind 221
241#define __NR_connect 222
242#define __NR_listen 223
243#define __NR_accept 224
244#define __NR_getsockname 225
245#define __NR_getpeername 226
246#define __NR_socketpair 227
247#define __NR_send 228
248#define __NR_sendto 229
249#define __NR_recv 230
250#define __NR_recvfrom 231
251#define __NR_shutdown 232
252#define __NR_setsockopt 233
253#define __NR_getsockopt 234
254#define __NR_sendmsg 235
255#define __NR_recvmsg 236
256
257/* Non-multiplexed IPC family */
258#define __NR_semop 237
259#define __NR_semget 238
260#define __NR_semctl 239
261#define __NR_msgsnd 240
262#define __NR_msgrcv 241
263#define __NR_msgget 242
264#define __NR_msgctl 243
265#if 0
266#define __NR_shmatcall 244
267#endif
268#define __NR_shmdt 245
269#define __NR_shmget 246
270#define __NR_shmctl 247
271
272#define __NR_getdents64 248
273#define __NR_fcntl64 249
274/* 223 is unused */
275#define __NR_gettid 252
276#define __NR_readahead 253
277#define __NR_setxattr 254
278#define __NR_lsetxattr 255
279#define __NR_fsetxattr 256
280#define __NR_getxattr 257
281#define __NR_lgetxattr 258
282#define __NR_fgetxattr 269
283#define __NR_listxattr 260
284#define __NR_llistxattr 261
285#define __NR_flistxattr 262
286#define __NR_removexattr 263
287#define __NR_lremovexattr 264
288#define __NR_fremovexattr 265
289#define __NR_tkill 266
290#define __NR_sendfile64 267
291#define __NR_futex 268
292#define __NR_sched_setaffinity 269
293#define __NR_sched_getaffinity 270
294#define __NR_set_thread_area 271
295#define __NR_get_thread_area 272
296#define __NR_io_setup 273
297#define __NR_io_destroy 274
298#define __NR_io_getevents 275
299#define __NR_io_submit 276
300#define __NR_io_cancel 277
301#define __NR_fadvise64 278
302#define __NR_exit_group 280
303
304#define __NR_lookup_dcookie 281
305#define __NR_epoll_create 282
306#define __NR_epoll_ctl 283
307#define __NR_epoll_wait 284
308#define __NR_remap_file_pages 285
309#define __NR_set_tid_address 286
310#define __NR_timer_create 287
311#define __NR_timer_settime (__NR_timer_create+1)
312#define __NR_timer_gettime (__NR_timer_create+2)
313#define __NR_timer_getoverrun (__NR_timer_create+3)
314#define __NR_timer_delete (__NR_timer_create+4)
315#define __NR_clock_settime (__NR_timer_create+5)
316#define __NR_clock_gettime (__NR_timer_create+6)
317#define __NR_clock_getres (__NR_timer_create+7)
318#define __NR_clock_nanosleep (__NR_timer_create+8)
319#define __NR_statfs64 296
320#define __NR_fstatfs64 297
321#define __NR_tgkill 298
322#define __NR_utimes 299
323#define __NR_fadvise64_64 300
324#define __NR_vserver 301
325#define __NR_mbind 302
326#define __NR_get_mempolicy 303
327#define __NR_set_mempolicy 304
328#define __NR_mq_open 305
329#define __NR_mq_unlink (__NR_mq_open+1)
330#define __NR_mq_timedsend (__NR_mq_open+2)
331#define __NR_mq_timedreceive (__NR_mq_open+3)
332#define __NR_mq_notify (__NR_mq_open+4)
333#define __NR_mq_getsetattr (__NR_mq_open+5)
334#define __NR_kexec_load 311
335#define __NR_waitid 312
336#define __NR_add_key 313
337#define __NR_request_key 314
338#define __NR_keyctl 315
339#define __NR_ioprio_set 316
340#define __NR_ioprio_get 317
341#define __NR_inotify_init 318
342#define __NR_inotify_add_watch 319
343#define __NR_inotify_rm_watch 320
344/* 321 is unused */
345#define __NR_migrate_pages 322
346#define __NR_openat 323
347#define __NR_mkdirat 324
348#define __NR_mknodat 325
349#define __NR_fchownat 326
350#define __NR_futimesat 327
351#define __NR_fstatat64 328
352#define __NR_unlinkat 329
353#define __NR_renameat 330
354#define __NR_linkat 331
355#define __NR_symlinkat 332
356#define __NR_readlinkat 333
357#define __NR_fchmodat 334
358#define __NR_faccessat 335
359#define __NR_pselect6 336
360#define __NR_ppoll 337
361#define __NR_unshare 338
362#define __NR_set_robust_list 339
363#define __NR_get_robust_list 340
364#define __NR_splice 341
365#define __NR_sync_file_range 342
366#define __NR_tee 343
367#define __NR_vmsplice 344
368#define __NR_move_pages 345
369#define __NR_getcpu 346
370#define __NR_epoll_pwait 347
371#define __NR_utimensat 348
372#define __NR_signalfd 349
373#define __NR_timerfd 350
374#define __NR_eventfd 351
375#define __NR_fallocate 352
376
377#ifdef __KERNEL__
378
379#define NR_syscalls 353
380
381#define __ARCH_WANT_IPC_PARSE_VERSION
382#define __ARCH_WANT_OLD_READDIR
383#define __ARCH_WANT_OLD_STAT
384#define __ARCH_WANT_STAT64
385#define __ARCH_WANT_SYS_ALARM
386#define __ARCH_WANT_SYS_GETHOSTNAME
387#define __ARCH_WANT_SYS_PAUSE
388#define __ARCH_WANT_SYS_SGETMASK
389#define __ARCH_WANT_SYS_SIGNAL
390#define __ARCH_WANT_SYS_TIME
391#define __ARCH_WANT_SYS_UTIME
392#define __ARCH_WANT_SYS_WAITPID
393#define __ARCH_WANT_SYS_SOCKETCALL
394#define __ARCH_WANT_SYS_FADVISE64
395#define __ARCH_WANT_SYS_GETPGRP
396#define __ARCH_WANT_SYS_LLSEEK
397#define __ARCH_WANT_SYS_NICE
398#define __ARCH_WANT_SYS_OLD_GETRLIMIT
399#define __ARCH_WANT_SYS_OLDUMOUNT
400#define __ARCH_WANT_SYS_SIGPENDING
401#define __ARCH_WANT_SYS_SIGPROCMASK
402#define __ARCH_WANT_SYS_RT_SIGACTION
403
404/*
405 * "Conditional" syscalls
406 *
407 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
408 * but it doesn't work on all toolchains, so we just do it by hand
409 */
410#ifndef cond_syscall
411#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
412#endif
413
414#endif /* __KERNEL__ */
415#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/include/asm-sh/user.h b/include/asm-sh/user.h
index d1b8511d9d9f..1a4f43c75126 100644
--- a/include/asm-sh/user.h
+++ b/include/asm-sh/user.h
@@ -27,12 +27,19 @@
27 * to write an integer number of pages. 27 * to write an integer number of pages.
28 */ 28 */
29 29
30#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
31struct user_fpu_struct {
32 unsigned long fp_regs[32];
33 unsigned int fpscr;
34};
35#else
30struct user_fpu_struct { 36struct user_fpu_struct {
31 unsigned long fp_regs[16]; 37 unsigned long fp_regs[16];
32 unsigned long xfp_regs[16]; 38 unsigned long xfp_regs[16];
33 unsigned long fpscr; 39 unsigned long fpscr;
34 unsigned long fpul; 40 unsigned long fpul;
35}; 41};
42#endif
36 43
37struct user { 44struct user {
38 struct pt_regs regs; /* entire machine state */ 45 struct pt_regs regs; /* entire machine state */
diff --git a/include/asm-sh/voyagergx.h b/include/asm-sh/voyagergx.h
deleted file mode 100644
index d825596562df..000000000000
--- a/include/asm-sh/voyagergx.h
+++ /dev/null
@@ -1,341 +0,0 @@
1/* -------------------------------------------------------------------- */
2/* voyagergx.h */
3/* -------------------------------------------------------------------- */
4/* This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 Copyright 2003 (c) Lineo uSolutions,Inc.
19*/
20/* -------------------------------------------------------------------- */
21
22#ifndef _VOYAGER_GX_REG_H
23#define _VOYAGER_GX_REG_H
24
25#define VOYAGER_BASE 0xb3e00000
26#define VOYAGER_USBH_BASE (0x40000 + VOYAGER_BASE)
27#define VOYAGER_UART_BASE (0x30000 + VOYAGER_BASE)
28#define VOYAGER_AC97_BASE (0xa0000 + VOYAGER_BASE)
29
30#define VOYAGER_IRQ_NUM 26
31#define VOYAGER_IRQ_BASE 200
32
33#define IRQ_SM501_UP (VOYAGER_IRQ_BASE + 0)
34#define IRQ_SM501_G54 (VOYAGER_IRQ_BASE + 1)
35#define IRQ_SM501_G53 (VOYAGER_IRQ_BASE + 2)
36#define IRQ_SM501_G52 (VOYAGER_IRQ_BASE + 3)
37#define IRQ_SM501_G51 (VOYAGER_IRQ_BASE + 4)
38#define IRQ_SM501_G50 (VOYAGER_IRQ_BASE + 5)
39#define IRQ_SM501_G49 (VOYAGER_IRQ_BASE + 6)
40#define IRQ_SM501_G48 (VOYAGER_IRQ_BASE + 7)
41#define IRQ_SM501_I2C (VOYAGER_IRQ_BASE + 8)
42#define IRQ_SM501_PW (VOYAGER_IRQ_BASE + 9)
43#define IRQ_SM501_DMA (VOYAGER_IRQ_BASE + 10)
44#define IRQ_SM501_PCI (VOYAGER_IRQ_BASE + 11)
45#define IRQ_SM501_I2S (VOYAGER_IRQ_BASE + 12)
46#define IRQ_SM501_AC (VOYAGER_IRQ_BASE + 13)
47#define IRQ_SM501_US (VOYAGER_IRQ_BASE + 14)
48#define IRQ_SM501_U1 (VOYAGER_IRQ_BASE + 15)
49#define IRQ_SM501_U0 (VOYAGER_IRQ_BASE + 16)
50#define IRQ_SM501_CV (VOYAGER_IRQ_BASE + 17)
51#define IRQ_SM501_MC (VOYAGER_IRQ_BASE + 18)
52#define IRQ_SM501_S1 (VOYAGER_IRQ_BASE + 19)
53#define IRQ_SM501_S0 (VOYAGER_IRQ_BASE + 20)
54#define IRQ_SM501_UH (VOYAGER_IRQ_BASE + 21)
55#define IRQ_SM501_2D (VOYAGER_IRQ_BASE + 22)
56#define IRQ_SM501_ZD (VOYAGER_IRQ_BASE + 23)
57#define IRQ_SM501_PV (VOYAGER_IRQ_BASE + 24)
58#define IRQ_SM501_CI (VOYAGER_IRQ_BASE + 25)
59
60/* ----- MISC controle register ------------------------------ */
61#define MISC_CTRL (0x000004 + VOYAGER_BASE)
62#define MISC_CTRL_USBCLK_48 (3 << 28)
63#define MISC_CTRL_USBCLK_96 (2 << 28)
64#define MISC_CTRL_USBCLK_CRYSTAL (1 << 28)
65
66/* ----- GPIO[31:0] register --------------------------------- */
67#define GPIO_MUX_LOW (0x000008 + VOYAGER_BASE)
68#define GPIO_MUX_LOW_AC97 0x1F000000
69#define GPIO_MUX_LOW_8051 0x0000ffff
70#define GPIO_MUX_LOW_PWM (1 << 29)
71
72/* ----- GPIO[63:32] register --------------------------------- */
73#define GPIO_MUX_HIGH (0x00000C + VOYAGER_BASE)
74
75/* ----- DRAM controle register ------------------------------- */
76#define DRAM_CTRL (0x000010 + VOYAGER_BASE)
77#define DRAM_CTRL_EMBEDDED (1 << 31)
78#define DRAM_CTRL_CPU_BURST_1 (0 << 28)
79#define DRAM_CTRL_CPU_BURST_2 (1 << 28)
80#define DRAM_CTRL_CPU_BURST_4 (2 << 28)
81#define DRAM_CTRL_CPU_BURST_8 (3 << 28)
82#define DRAM_CTRL_CPU_CAS_LATENCY (1 << 27)
83#define DRAM_CTRL_CPU_SIZE_2 (0 << 24)
84#define DRAM_CTRL_CPU_SIZE_4 (1 << 24)
85#define DRAM_CTRL_CPU_SIZE_64 (4 << 24)
86#define DRAM_CTRL_CPU_SIZE_32 (5 << 24)
87#define DRAM_CTRL_CPU_SIZE_16 (6 << 24)
88#define DRAM_CTRL_CPU_SIZE_8 (7 << 24)
89#define DRAM_CTRL_CPU_COLUMN_SIZE_1024 (0 << 22)
90#define DRAM_CTRL_CPU_COLUMN_SIZE_512 (2 << 22)
91#define DRAM_CTRL_CPU_COLUMN_SIZE_256 (3 << 22)
92#define DRAM_CTRL_CPU_ACTIVE_PRECHARGE (1 << 21)
93#define DRAM_CTRL_CPU_RESET (1 << 20)
94#define DRAM_CTRL_CPU_BANKS (1 << 19)
95#define DRAM_CTRL_CPU_WRITE_PRECHARGE (1 << 18)
96#define DRAM_CTRL_BLOCK_WRITE (1 << 17)
97#define DRAM_CTRL_REFRESH_COMMAND (1 << 16)
98#define DRAM_CTRL_SIZE_4 (0 << 13)
99#define DRAM_CTRL_SIZE_8 (1 << 13)
100#define DRAM_CTRL_SIZE_16 (2 << 13)
101#define DRAM_CTRL_SIZE_32 (3 << 13)
102#define DRAM_CTRL_SIZE_64 (4 << 13)
103#define DRAM_CTRL_SIZE_2 (5 << 13)
104#define DRAM_CTRL_COLUMN_SIZE_256 (0 << 11)
105#define DRAM_CTRL_COLUMN_SIZE_512 (2 << 11)
106#define DRAM_CTRL_COLUMN_SIZE_1024 (3 << 11)
107#define DRAM_CTRL_BLOCK_WRITE_TIME (1 << 10)
108#define DRAM_CTRL_BLOCK_WRITE_PRECHARGE (1 << 9)
109#define DRAM_CTRL_ACTIVE_PRECHARGE (1 << 8)
110#define DRAM_CTRL_RESET (1 << 7)
111#define DRAM_CTRL_REMAIN_ACTIVE (1 << 6)
112#define DRAM_CTRL_BANKS (1 << 1)
113#define DRAM_CTRL_WRITE_PRECHARGE (1 << 0)
114
115/* ----- Arvitration control register -------------------------- */
116#define ARBITRATION_CTRL (0x000014 + VOYAGER_BASE)
117#define ARBITRATION_CTRL_CPUMEM (1 << 29)
118#define ARBITRATION_CTRL_INTMEM (1 << 28)
119#define ARBITRATION_CTRL_USB_OFF (0 << 24)
120#define ARBITRATION_CTRL_USB_PRIORITY_1 (1 << 24)
121#define ARBITRATION_CTRL_USB_PRIORITY_2 (2 << 24)
122#define ARBITRATION_CTRL_USB_PRIORITY_3 (3 << 24)
123#define ARBITRATION_CTRL_USB_PRIORITY_4 (4 << 24)
124#define ARBITRATION_CTRL_USB_PRIORITY_5 (5 << 24)
125#define ARBITRATION_CTRL_USB_PRIORITY_6 (6 << 24)
126#define ARBITRATION_CTRL_USB_PRIORITY_7 (7 << 24)
127#define ARBITRATION_CTRL_PANEL_OFF (0 << 20)
128#define ARBITRATION_CTRL_PANEL_PRIORITY_1 (1 << 20)
129#define ARBITRATION_CTRL_PANEL_PRIORITY_2 (2 << 20)
130#define ARBITRATION_CTRL_PANEL_PRIORITY_3 (3 << 20)
131#define ARBITRATION_CTRL_PANEL_PRIORITY_4 (4 << 20)
132#define ARBITRATION_CTRL_PANEL_PRIORITY_5 (5 << 20)
133#define ARBITRATION_CTRL_PANEL_PRIORITY_6 (6 << 20)
134#define ARBITRATION_CTRL_PANEL_PRIORITY_7 (7 << 20)
135#define ARBITRATION_CTRL_ZVPORT_OFF (0 << 16)
136#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_1 (1 << 16)
137#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_2 (2 << 16)
138#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_3 (3 << 16)
139#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_4 (4 << 16)
140#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_5 (5 << 16)
141#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_6 (6 << 16)
142#define ARBITRATION_CTRL_ZVPORTL_PRIORITY_7 (7 << 16)
143#define ARBITRATION_CTRL_CMD_INTPR_OFF (0 << 12)
144#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_1 (1 << 12)
145#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_2 (2 << 12)
146#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_3 (3 << 12)
147#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_4 (4 << 12)
148#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_5 (5 << 12)
149#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_6 (6 << 12)
150#define ARBITRATION_CTRL_CMD_INTPR_PRIORITY_7 (7 << 12)
151#define ARBITRATION_CTRL_DMA_OFF (0 << 8)
152#define ARBITRATION_CTRL_DMA_PRIORITY_1 (1 << 8)
153#define ARBITRATION_CTRL_DMA_PRIORITY_2 (2 << 8)
154#define ARBITRATION_CTRL_DMA_PRIORITY_3 (3 << 8)
155#define ARBITRATION_CTRL_DMA_PRIORITY_4 (4 << 8)
156#define ARBITRATION_CTRL_DMA_PRIORITY_5 (5 << 8)
157#define ARBITRATION_CTRL_DMA_PRIORITY_6 (6 << 8)
158#define ARBITRATION_CTRL_DMA_PRIORITY_7 (7 << 8)
159#define ARBITRATION_CTRL_VIDEO_OFF (0 << 4)
160#define ARBITRATION_CTRL_VIDEO_PRIORITY_1 (1 << 4)
161#define ARBITRATION_CTRL_VIDEO_PRIORITY_2 (2 << 4)
162#define ARBITRATION_CTRL_VIDEO_PRIORITY_3 (3 << 4)
163#define ARBITRATION_CTRL_VIDEO_PRIORITY_4 (4 << 4)
164#define ARBITRATION_CTRL_VIDEO_PRIORITY_5 (5 << 4)
165#define ARBITRATION_CTRL_VIDEO_PRIORITY_6 (6 << 4)
166#define ARBITRATION_CTRL_VIDEO_PRIORITY_7 (7 << 4)
167#define ARBITRATION_CTRL_CRT_OFF (0 << 0)
168#define ARBITRATION_CTRL_CRT_PRIORITY_1 (1 << 0)
169#define ARBITRATION_CTRL_CRT_PRIORITY_2 (2 << 0)
170#define ARBITRATION_CTRL_CRT_PRIORITY_3 (3 << 0)
171#define ARBITRATION_CTRL_CRT_PRIORITY_4 (4 << 0)
172#define ARBITRATION_CTRL_CRT_PRIORITY_5 (5 << 0)
173#define ARBITRATION_CTRL_CRT_PRIORITY_6 (6 << 0)
174#define ARBITRATION_CTRL_CRT_PRIORITY_7 (7 << 0)
175
176/* ----- Command list status register -------------------------- */
177#define CMD_INTPR_STATUS (0x000024 + VOYAGER_BASE)
178
179/* ----- Interrupt status register ----------------------------- */
180#define INT_STATUS (0x00002c + VOYAGER_BASE)
181#define INT_STATUS_UH (1 << 6)
182#define INT_STATUS_MC (1 << 10)
183#define INT_STATUS_U0 (1 << 12)
184#define INT_STATUS_U1 (1 << 13)
185#define INT_STATUS_AC (1 << 17)
186
187/* ----- Interrupt mask register ------------------------------ */
188#define VOYAGER_INT_MASK (0x000030 + VOYAGER_BASE)
189#define VOYAGER_INT_MASK_AC (1 << 17)
190
191/* ----- Current Gate register ---------------------------------*/
192#define CURRENT_GATE (0x000038 + VOYAGER_BASE)
193
194/* ----- Power mode 0 gate register --------------------------- */
195#define POWER_MODE0_GATE (0x000040 + VOYAGER_BASE)
196#define POWER_MODE0_GATE_G (1 << 6)
197#define POWER_MODE0_GATE_U0 (1 << 7)
198#define POWER_MODE0_GATE_U1 (1 << 8)
199#define POWER_MODE0_GATE_UH (1 << 11)
200#define POWER_MODE0_GATE_AC (1 << 18)
201
202/* ----- Power mode 1 gate register --------------------------- */
203#define POWER_MODE1_GATE (0x000048 + VOYAGER_BASE)
204#define POWER_MODE1_GATE_G (1 << 6)
205#define POWER_MODE1_GATE_U0 (1 << 7)
206#define POWER_MODE1_GATE_U1 (1 << 8)
207#define POWER_MODE1_GATE_UH (1 << 11)
208#define POWER_MODE1_GATE_AC (1 << 18)
209
210/* ----- Power mode 0 clock register -------------------------- */
211#define POWER_MODE0_CLOCK (0x000044 + VOYAGER_BASE)
212
213/* ----- Power mode 1 clock register -------------------------- */
214#define POWER_MODE1_CLOCK (0x00004C + VOYAGER_BASE)
215
216/* ----- Power mode controll register ------------------------- */
217#define POWER_MODE_CTRL (0x000054 + VOYAGER_BASE)
218
219/* ----- Miscellaneous Timing register ------------------------ */
220#define SYSTEM_DRAM_CTRL (0x000068 + VOYAGER_BASE)
221
222/* ----- PWM register ------------------------------------------*/
223#define PWM_0 (0x010020 + VOYAGER_BASE)
224#define PWM_0_HC(x) (((x)&0x0fff)<<20)
225#define PWM_0_LC(x) (((x)&0x0fff)<<8 )
226#define PWM_0_CLK_DEV(x) (((x)&0x000f)<<4 )
227#define PWM_0_EN (1<<0)
228
229/* ----- I2C register ----------------------------------------- */
230#define I2C_BYTECOUNT (0x010040 + VOYAGER_BASE)
231#define I2C_CONTROL (0x010041 + VOYAGER_BASE)
232#define I2C_STATUS (0x010042 + VOYAGER_BASE)
233#define I2C_RESET (0x010042 + VOYAGER_BASE)
234#define I2C_SADDRESS (0x010043 + VOYAGER_BASE)
235#define I2C_DATA (0x010044 + VOYAGER_BASE)
236
237/* ----- Controle register bits ----------------------------------------- */
238#define I2C_CONTROL_E (1 << 0)
239#define I2C_CONTROL_MODE (1 << 1)
240#define I2C_CONTROL_STATUS (1 << 2)
241#define I2C_CONTROL_INT (1 << 4)
242#define I2C_CONTROL_INTACK (1 << 5)
243#define I2C_CONTROL_REPEAT (1 << 6)
244
245/* ----- Status register bits ----------------------------------------- */
246#define I2C_STATUS_BUSY (1 << 0)
247#define I2C_STATUS_ACK (1 << 1)
248#define I2C_STATUS_ERROR (1 << 2)
249#define I2C_STATUS_COMPLETE (1 << 3)
250
251/* ----- Reset register ---------------------------------------------- */
252#define I2C_RESET_ERROR (1 << 2)
253
254/* ----- transmission frequencies ------------------------------------- */
255#define I2C_SADDRESS_SELECT (1 << 0)
256
257/* ----- Display Controll register ----------------------------------------- */
258#define PANEL_DISPLAY_CTRL (0x080000 + VOYAGER_BASE)
259#define PANEL_DISPLAY_CTRL_BIAS (1<<26)
260#define PANEL_PAN_CTRL (0x080004 + VOYAGER_BASE)
261#define PANEL_COLOR_KEY (0x080008 + VOYAGER_BASE)
262#define PANEL_FB_ADDRESS (0x08000C + VOYAGER_BASE)
263#define PANEL_FB_WIDTH (0x080010 + VOYAGER_BASE)
264#define PANEL_WINDOW_WIDTH (0x080014 + VOYAGER_BASE)
265#define PANEL_WINDOW_HEIGHT (0x080018 + VOYAGER_BASE)
266#define PANEL_PLANE_TL (0x08001C + VOYAGER_BASE)
267#define PANEL_PLANE_BR (0x080020 + VOYAGER_BASE)
268#define PANEL_HORIZONTAL_TOTAL (0x080024 + VOYAGER_BASE)
269#define PANEL_HORIZONTAL_SYNC (0x080028 + VOYAGER_BASE)
270#define PANEL_VERTICAL_TOTAL (0x08002C + VOYAGER_BASE)
271#define PANEL_VERTICAL_SYNC (0x080030 + VOYAGER_BASE)
272#define PANEL_CURRENT_LINE (0x080034 + VOYAGER_BASE)
273#define VIDEO_DISPLAY_CTRL (0x080040 + VOYAGER_BASE)
274#define VIDEO_FB_0_ADDRESS (0x080044 + VOYAGER_BASE)
275#define VIDEO_FB_WIDTH (0x080048 + VOYAGER_BASE)
276#define VIDEO_FB_0_LAST_ADDRESS (0x08004C + VOYAGER_BASE)
277#define VIDEO_PLANE_TL (0x080050 + VOYAGER_BASE)
278#define VIDEO_PLANE_BR (0x080054 + VOYAGER_BASE)
279#define VIDEO_SCALE (0x080058 + VOYAGER_BASE)
280#define VIDEO_INITIAL_SCALE (0x08005C + VOYAGER_BASE)
281#define VIDEO_YUV_CONSTANTS (0x080060 + VOYAGER_BASE)
282#define VIDEO_FB_1_ADDRESS (0x080064 + VOYAGER_BASE)
283#define VIDEO_FB_1_LAST_ADDRESS (0x080068 + VOYAGER_BASE)
284#define VIDEO_ALPHA_DISPLAY_CTRL (0x080080 + VOYAGER_BASE)
285#define VIDEO_ALPHA_FB_ADDRESS (0x080084 + VOYAGER_BASE)
286#define VIDEO_ALPHA_FB_WIDTH (0x080088 + VOYAGER_BASE)
287#define VIDEO_ALPHA_FB_LAST_ADDRESS (0x08008C + VOYAGER_BASE)
288#define VIDEO_ALPHA_PLANE_TL (0x080090 + VOYAGER_BASE)
289#define VIDEO_ALPHA_PLANE_BR (0x080094 + VOYAGER_BASE)
290#define VIDEO_ALPHA_SCALE (0x080098 + VOYAGER_BASE)
291#define VIDEO_ALPHA_INITIAL_SCALE (0x08009C + VOYAGER_BASE)
292#define VIDEO_ALPHA_CHROMA_KEY (0x0800A0 + VOYAGER_BASE)
293#define PANEL_HWC_ADDRESS (0x0800F0 + VOYAGER_BASE)
294#define PANEL_HWC_LOCATION (0x0800F4 + VOYAGER_BASE)
295#define PANEL_HWC_COLOR_12 (0x0800F8 + VOYAGER_BASE)
296#define PANEL_HWC_COLOR_3 (0x0800FC + VOYAGER_BASE)
297#define ALPHA_DISPLAY_CTRL (0x080100 + VOYAGER_BASE)
298#define ALPHA_FB_ADDRESS (0x080104 + VOYAGER_BASE)
299#define ALPHA_FB_WIDTH (0x080108 + VOYAGER_BASE)
300#define ALPHA_PLANE_TL (0x08010C + VOYAGER_BASE)
301#define ALPHA_PLANE_BR (0x080110 + VOYAGER_BASE)
302#define ALPHA_CHROMA_KEY (0x080114 + VOYAGER_BASE)
303#define CRT_DISPLAY_CTRL (0x080200 + VOYAGER_BASE)
304#define CRT_FB_ADDRESS (0x080204 + VOYAGER_BASE)
305#define CRT_FB_WIDTH (0x080208 + VOYAGER_BASE)
306#define CRT_HORIZONTAL_TOTAL (0x08020C + VOYAGER_BASE)
307#define CRT_HORIZONTAL_SYNC (0x080210 + VOYAGER_BASE)
308#define CRT_VERTICAL_TOTAL (0x080214 + VOYAGER_BASE)
309#define CRT_VERTICAL_SYNC (0x080218 + VOYAGER_BASE)
310#define CRT_SIGNATURE_ANALYZER (0x08021C + VOYAGER_BASE)
311#define CRT_CURRENT_LINE (0x080220 + VOYAGER_BASE)
312#define CRT_MONITOR_DETECT (0x080224 + VOYAGER_BASE)
313#define CRT_HWC_ADDRESS (0x080230 + VOYAGER_BASE)
314#define CRT_HWC_LOCATION (0x080234 + VOYAGER_BASE)
315#define CRT_HWC_COLOR_12 (0x080238 + VOYAGER_BASE)
316#define CRT_HWC_COLOR_3 (0x08023C + VOYAGER_BASE)
317#define CRT_PALETTE_RAM (0x080400 + VOYAGER_BASE)
318#define PANEL_PALETTE_RAM (0x080800 + VOYAGER_BASE)
319#define VIDEO_PALETTE_RAM (0x080C00 + VOYAGER_BASE)
320
321/* ----- 8051 Controle register ----------------------------------------- */
322#define VOYAGER_8051_BASE (0x000c0000 + VOYAGER_BASE)
323#define VOYAGER_8051_RESET (0x000b0000 + VOYAGER_BASE)
324#define VOYAGER_8051_SELECT (0x000b0004 + VOYAGER_BASE)
325#define VOYAGER_8051_CPU_INT (0x000b000c + VOYAGER_BASE)
326
327/* ----- AC97 Controle register ----------------------------------------- */
328#define AC97_TX_SLOT0 (0x00000000 + VOYAGER_AC97_BASE)
329#define AC97_CONTROL_STATUS (0x00000080 + VOYAGER_AC97_BASE)
330#define AC97C_READ (1 << 19)
331#define AC97C_WD_BIT (1 << 2)
332#define AC97C_INDEX_MASK 0x7f
333
334/* arch/sh/cchips/voyagergx/consistent.c */
335void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t);
336int voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t);
337
338/* arch/sh/cchips/voyagergx/irq.c */
339void setup_voyagergx_irq(void);
340
341#endif /* _VOYAGER_GX_REG_H */