aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-sh64
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-sh64')
-rw-r--r--include/asm-sh64/a.out.h37
-rw-r--r--include/asm-sh64/atomic.h126
-rw-r--r--include/asm-sh64/bitops.h516
-rw-r--r--include/asm-sh64/bug.h32
-rw-r--r--include/asm-sh64/bugs.h38
-rw-r--r--include/asm-sh64/byteorder.h49
-rw-r--r--include/asm-sh64/cache.h141
-rw-r--r--include/asm-sh64/cacheflush.h48
-rw-r--r--include/asm-sh64/cayman.h20
-rw-r--r--include/asm-sh64/checksum.h95
-rw-r--r--include/asm-sh64/cpumask.h6
-rw-r--r--include/asm-sh64/cputime.h6
-rw-r--r--include/asm-sh64/current.h28
-rw-r--r--include/asm-sh64/delay.h11
-rw-r--r--include/asm-sh64/div64.h6
-rw-r--r--include/asm-sh64/dma-mapping.h162
-rw-r--r--include/asm-sh64/dma.h41
-rw-r--r--include/asm-sh64/elf.h107
-rw-r--r--include/asm-sh64/errno.h6
-rw-r--r--include/asm-sh64/fcntl.h7
-rw-r--r--include/asm-sh64/hardirq.h19
-rw-r--r--include/asm-sh64/hardware.h22
-rw-r--r--include/asm-sh64/hdreg.h6
-rw-r--r--include/asm-sh64/hw_irq.h16
-rw-r--r--include/asm-sh64/ide.h35
-rw-r--r--include/asm-sh64/io.h250
-rw-r--r--include/asm-sh64/ioctl.h83
-rw-r--r--include/asm-sh64/ioctls.h116
-rw-r--r--include/asm-sh64/ipc.h1
-rw-r--r--include/asm-sh64/ipcbuf.h40
-rw-r--r--include/asm-sh64/irq.h149
-rw-r--r--include/asm-sh64/keyboard.h74
-rw-r--r--include/asm-sh64/kmap_types.h7
-rw-r--r--include/asm-sh64/linkage.h7
-rw-r--r--include/asm-sh64/local.h7
-rw-r--r--include/asm-sh64/mc146818rtc.h7
-rw-r--r--include/asm-sh64/mman.h6
-rw-r--r--include/asm-sh64/mmu.h7
-rw-r--r--include/asm-sh64/mmu_context.h209
-rw-r--r--include/asm-sh64/module.h20
-rw-r--r--include/asm-sh64/msgbuf.h42
-rw-r--r--include/asm-sh64/namei.h24
-rw-r--r--include/asm-sh64/page.h137
-rw-r--r--include/asm-sh64/param.h43
-rw-r--r--include/asm-sh64/pci.h110
-rw-r--r--include/asm-sh64/percpu.h6
-rw-r--r--include/asm-sh64/pgalloc.h195
-rw-r--r--include/asm-sh64/pgtable.h508
-rw-r--r--include/asm-sh64/platform.h69
-rw-r--r--include/asm-sh64/poll.h36
-rw-r--r--include/asm-sh64/posix_types.h131
-rw-r--r--include/asm-sh64/processor.h286
-rw-r--r--include/asm-sh64/ptrace.h37
-rw-r--r--include/asm-sh64/registers.h106
-rw-r--r--include/asm-sh64/resource.h6
-rw-r--r--include/asm-sh64/scatterlist.h23
-rw-r--r--include/asm-sh64/sections.h7
-rw-r--r--include/asm-sh64/segment.h6
-rw-r--r--include/asm-sh64/semaphore-helper.h101
-rw-r--r--include/asm-sh64/semaphore.h123
-rw-r--r--include/asm-sh64/sembuf.h36
-rw-r--r--include/asm-sh64/serial.h33
-rw-r--r--include/asm-sh64/setup.h16
-rw-r--r--include/asm-sh64/shmbuf.h53
-rw-r--r--include/asm-sh64/shmparam.h20
-rw-r--r--include/asm-sh64/sigcontext.h30
-rw-r--r--include/asm-sh64/siginfo.h6
-rw-r--r--include/asm-sh64/signal.h185
-rw-r--r--include/asm-sh64/smp.h15
-rw-r--r--include/asm-sh64/socket.h6
-rw-r--r--include/asm-sh64/sockios.h24
-rw-r--r--include/asm-sh64/spinlock.h17
-rw-r--r--include/asm-sh64/stat.h88
-rw-r--r--include/asm-sh64/statfs.h6
-rw-r--r--include/asm-sh64/string.h21
-rw-r--r--include/asm-sh64/system.h195
-rw-r--r--include/asm-sh64/termbits.h6
-rw-r--r--include/asm-sh64/termios.h117
-rw-r--r--include/asm-sh64/thread_info.h87
-rw-r--r--include/asm-sh64/timex.h34
-rw-r--r--include/asm-sh64/tlb.h92
-rw-r--r--include/asm-sh64/tlbflush.h31
-rw-r--r--include/asm-sh64/topology.h6
-rw-r--r--include/asm-sh64/types.h76
-rw-r--r--include/asm-sh64/uaccess.h327
-rw-r--r--include/asm-sh64/ucontext.h23
-rw-r--r--include/asm-sh64/unaligned.h17
-rw-r--r--include/asm-sh64/unistd.h560
-rw-r--r--include/asm-sh64/user.h71
89 files changed, 6757 insertions, 0 deletions
diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h
new file mode 100644
index 000000000000..e1995e86b663
--- /dev/null
+++ b/include/asm-sh64/a.out.h
@@ -0,0 +1,37 @@
1#ifndef __ASM_SH64_A_OUT_H
2#define __ASM_SH64_A_OUT_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/a.out.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15struct exec
16{
17 unsigned long a_info; /* Use macros N_MAGIC, etc for access */
18 unsigned a_text; /* length of text, in bytes */
19 unsigned a_data; /* length of data, in bytes */
20 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
21 unsigned a_syms; /* length of symbol table data in file, in bytes */
22 unsigned a_entry; /* start address */
23 unsigned a_trsize; /* length of relocation info for text, in bytes */
24 unsigned a_drsize; /* length of relocation info for data, in bytes */
25};
26
27#define N_TRSIZE(a) ((a).a_trsize)
28#define N_DRSIZE(a) ((a).a_drsize)
29#define N_SYMSIZE(a) ((a).a_syms)
30
31#ifdef __KERNEL__
32
33#define STACK_TOP TASK_SIZE
34
35#endif
36
37#endif /* __ASM_SH64_A_OUT_H */
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
new file mode 100644
index 000000000000..8c3872d3e65f
--- /dev/null
+++ b/include/asm-sh64/atomic.h
@@ -0,0 +1,126 @@
1#ifndef __ASM_SH64_ATOMIC_H
2#define __ASM_SH64_ATOMIC_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/atomic.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
15
16/*
17 * Atomic operations that C can't guarantee us. Useful for
18 * resource counting etc..
19 *
20 */
21
22typedef struct { volatile int counter; } atomic_t;
23
24#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
25
26#define atomic_read(v) ((v)->counter)
27#define atomic_set(v,i) ((v)->counter = (i))
28
29#include <asm/system.h>
30
31/*
32 * To get proper branch prediction for the main line, we must branch
33 * forward to code at the end of this object's .text section, then
34 * branch back to restart the operation.
35 */
36
37static __inline__ void atomic_add(int i, atomic_t * v)
38{
39 unsigned long flags;
40
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
44}
45
46static __inline__ void atomic_sub(int i, atomic_t *v)
47{
48 unsigned long flags;
49
50 local_irq_save(flags);
51 *(long *)v -= i;
52 local_irq_restore(flags);
53}
54
55static __inline__ int atomic_add_return(int i, atomic_t * v)
56{
57 unsigned long temp, flags;
58
59 local_irq_save(flags);
60 temp = *(long *)v;
61 temp += i;
62 *(long *)v = temp;
63 local_irq_restore(flags);
64
65 return temp;
66}
67
68#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
69
70static __inline__ int atomic_sub_return(int i, atomic_t * v)
71{
72 unsigned long temp, flags;
73
74 local_irq_save(flags);
75 temp = *(long *)v;
76 temp -= i;
77 *(long *)v = temp;
78 local_irq_restore(flags);
79
80 return temp;
81}
82
83#define atomic_dec_return(v) atomic_sub_return(1,(v))
84#define atomic_inc_return(v) atomic_add_return(1,(v))
85
86/*
87 * atomic_inc_and_test - increment and test
88 * @v: pointer of type atomic_t
89 *
90 * Atomically increments @v by 1
91 * and returns true if the result is zero, or false for all
92 * other cases.
93 */
94#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
95
96#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
97#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
98
99#define atomic_inc(v) atomic_add(1,(v))
100#define atomic_dec(v) atomic_sub(1,(v))
101
102static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
103{
104 unsigned long flags;
105
106 local_irq_save(flags);
107 *(long *)v &= ~mask;
108 local_irq_restore(flags);
109}
110
111static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
112{
113 unsigned long flags;
114
115 local_irq_save(flags);
116 *(long *)v |= mask;
117 local_irq_restore(flags);
118}
119
120/* Atomic operations are already serializing on SH */
121#define smp_mb__before_atomic_dec() barrier()
122#define smp_mb__after_atomic_dec() barrier()
123#define smp_mb__before_atomic_inc() barrier()
124#define smp_mb__after_atomic_inc() barrier()
125
126#endif /* __ASM_SH64_ATOMIC_H */
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
new file mode 100644
index 000000000000..e1ff63e09227
--- /dev/null
+++ b/include/asm-sh64/bitops.h
@@ -0,0 +1,516 @@
1#ifndef __ASM_SH64_BITOPS_H
2#define __ASM_SH64_BITOPS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/bitops.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 */
14
15#ifdef __KERNEL__
16#include <linux/compiler.h>
17#include <asm/system.h>
18/* For __swab32 */
19#include <asm/byteorder.h>
20
21static __inline__ void set_bit(int nr, volatile void * addr)
22{
23 int mask;
24 volatile unsigned int *a = addr;
25 unsigned long flags;
26
27 a += nr >> 5;
28 mask = 1 << (nr & 0x1f);
29 local_irq_save(flags);
30 *a |= mask;
31 local_irq_restore(flags);
32}
33
34static inline void __set_bit(int nr, void *addr)
35{
36 int mask;
37 unsigned int *a = addr;
38
39 a += nr >> 5;
40 mask = 1 << (nr & 0x1f);
41 *a |= mask;
42}
43
44/*
45 * clear_bit() doesn't provide any barrier for the compiler.
46 */
47#define smp_mb__before_clear_bit() barrier()
48#define smp_mb__after_clear_bit() barrier()
49static inline void clear_bit(int nr, volatile unsigned long *a)
50{
51 int mask;
52 unsigned long flags;
53
54 a += nr >> 5;
55 mask = 1 << (nr & 0x1f);
56 local_irq_save(flags);
57 *a &= ~mask;
58 local_irq_restore(flags);
59}
60
61static inline void __clear_bit(int nr, volatile unsigned long *a)
62{
63 int mask;
64
65 a += nr >> 5;
66 mask = 1 << (nr & 0x1f);
67 *a &= ~mask;
68}
69
70static __inline__ void change_bit(int nr, volatile void * addr)
71{
72 int mask;
73 volatile unsigned int *a = addr;
74 unsigned long flags;
75
76 a += nr >> 5;
77 mask = 1 << (nr & 0x1f);
78 local_irq_save(flags);
79 *a ^= mask;
80 local_irq_restore(flags);
81}
82
83static __inline__ void __change_bit(int nr, volatile void * addr)
84{
85 int mask;
86 volatile unsigned int *a = addr;
87
88 a += nr >> 5;
89 mask = 1 << (nr & 0x1f);
90 *a ^= mask;
91}
92
93static __inline__ int test_and_set_bit(int nr, volatile void * addr)
94{
95 int mask, retval;
96 volatile unsigned int *a = addr;
97 unsigned long flags;
98
99 a += nr >> 5;
100 mask = 1 << (nr & 0x1f);
101 local_irq_save(flags);
102 retval = (mask & *a) != 0;
103 *a |= mask;
104 local_irq_restore(flags);
105
106 return retval;
107}
108
109static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
110{
111 int mask, retval;
112 volatile unsigned int *a = addr;
113
114 a += nr >> 5;
115 mask = 1 << (nr & 0x1f);
116 retval = (mask & *a) != 0;
117 *a |= mask;
118
119 return retval;
120}
121
122static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
123{
124 int mask, retval;
125 volatile unsigned int *a = addr;
126 unsigned long flags;
127
128 a += nr >> 5;
129 mask = 1 << (nr & 0x1f);
130 local_irq_save(flags);
131 retval = (mask & *a) != 0;
132 *a &= ~mask;
133 local_irq_restore(flags);
134
135 return retval;
136}
137
138static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
139{
140 int mask, retval;
141 volatile unsigned int *a = addr;
142
143 a += nr >> 5;
144 mask = 1 << (nr & 0x1f);
145 retval = (mask & *a) != 0;
146 *a &= ~mask;
147
148 return retval;
149}
150
151static __inline__ int test_and_change_bit(int nr, volatile void * addr)
152{
153 int mask, retval;
154 volatile unsigned int *a = addr;
155 unsigned long flags;
156
157 a += nr >> 5;
158 mask = 1 << (nr & 0x1f);
159 local_irq_save(flags);
160 retval = (mask & *a) != 0;
161 *a ^= mask;
162 local_irq_restore(flags);
163
164 return retval;
165}
166
167static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
168{
169 int mask, retval;
170 volatile unsigned int *a = addr;
171
172 a += nr >> 5;
173 mask = 1 << (nr & 0x1f);
174 retval = (mask & *a) != 0;
175 *a ^= mask;
176
177 return retval;
178}
179
180static __inline__ int test_bit(int nr, const volatile void *addr)
181{
182 return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
183}
184
185static __inline__ unsigned long ffz(unsigned long word)
186{
187 unsigned long result, __d2, __d3;
188
189 __asm__("gettr tr0, %2\n\t"
190 "pta $+32, tr0\n\t"
191 "andi %1, 1, %3\n\t"
192 "beq %3, r63, tr0\n\t"
193 "pta $+4, tr0\n"
194 "0:\n\t"
195 "shlri.l %1, 1, %1\n\t"
196 "addi %0, 1, %0\n\t"
197 "andi %1, 1, %3\n\t"
198 "beqi %3, 1, tr0\n"
199 "1:\n\t"
200 "ptabs %2, tr0\n\t"
201 : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
202 : "0" (0L), "1" (word));
203
204 return result;
205}
206
207/**
208 * __ffs - find first bit in word
209 * @word: The word to search
210 *
211 * Undefined if no bit exists, so code should check against 0 first.
212 */
213static inline unsigned long __ffs(unsigned long word)
214{
215 int r = 0;
216
217 if (!word)
218 return 0;
219 if (!(word & 0xffff)) {
220 word >>= 16;
221 r += 16;
222 }
223 if (!(word & 0xff)) {
224 word >>= 8;
225 r += 8;
226 }
227 if (!(word & 0xf)) {
228 word >>= 4;
229 r += 4;
230 }
231 if (!(word & 3)) {
232 word >>= 2;
233 r += 2;
234 }
235 if (!(word & 1)) {
236 word >>= 1;
237 r += 1;
238 }
239 return r;
240}
241
242/**
243 * find_next_bit - find the next set bit in a memory region
244 * @addr: The address to base the search on
245 * @offset: The bitnumber to start searching at
246 * @size: The maximum size to search
247 */
248static inline unsigned long find_next_bit(const unsigned long *addr,
249 unsigned long size, unsigned long offset)
250{
251 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
252 unsigned int result = offset & ~31UL;
253 unsigned int tmp;
254
255 if (offset >= size)
256 return size;
257 size -= result;
258 offset &= 31UL;
259 if (offset) {
260 tmp = *p++;
261 tmp &= ~0UL << offset;
262 if (size < 32)
263 goto found_first;
264 if (tmp)
265 goto found_middle;
266 size -= 32;
267 result += 32;
268 }
269 while (size >= 32) {
270 if ((tmp = *p++) != 0)
271 goto found_middle;
272 result += 32;
273 size -= 32;
274 }
275 if (!size)
276 return result;
277 tmp = *p;
278
279found_first:
280 tmp &= ~0UL >> (32 - size);
281 if (tmp == 0UL) /* Are any bits set? */
282 return result + size; /* Nope. */
283found_middle:
284 return result + __ffs(tmp);
285}
286
287/**
288 * find_first_bit - find the first set bit in a memory region
289 * @addr: The address to start the search at
290 * @size: The maximum size to search
291 *
292 * Returns the bit-number of the first set bit, not the number of the byte
293 * containing a bit.
294 */
295#define find_first_bit(addr, size) \
296 find_next_bit((addr), (size), 0)
297
298
299static inline int find_next_zero_bit(void *addr, int size, int offset)
300{
301 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
302 unsigned long result = offset & ~31UL;
303 unsigned long tmp;
304
305 if (offset >= size)
306 return size;
307 size -= result;
308 offset &= 31UL;
309 if (offset) {
310 tmp = *(p++);
311 tmp |= ~0UL >> (32-offset);
312 if (size < 32)
313 goto found_first;
314 if (~tmp)
315 goto found_middle;
316 size -= 32;
317 result += 32;
318 }
319 while (size & ~31UL) {
320 if (~(tmp = *(p++)))
321 goto found_middle;
322 result += 32;
323 size -= 32;
324 }
325 if (!size)
326 return result;
327 tmp = *p;
328
329found_first:
330 tmp |= ~0UL << size;
331found_middle:
332 return result + ffz(tmp);
333}
334
335#define find_first_zero_bit(addr, size) \
336 find_next_zero_bit((addr), (size), 0)
337
338/*
339 * hweightN: returns the hamming weight (i.e. the number
340 * of bits set) of a N-bit word
341 */
342
343#define hweight32(x) generic_hweight32(x)
344#define hweight16(x) generic_hweight16(x)
345#define hweight8(x) generic_hweight8(x)
346
347/*
348 * Every architecture must define this function. It's the fastest
349 * way of searching a 140-bit bitmap where the first 100 bits are
350 * unlikely to be set. It's guaranteed that at least one of the 140
351 * bits is cleared.
352 */
353
354static inline int sched_find_first_bit(unsigned long *b)
355{
356 if (unlikely(b[0]))
357 return __ffs(b[0]);
358 if (unlikely(b[1]))
359 return __ffs(b[1]) + 32;
360 if (unlikely(b[2]))
361 return __ffs(b[2]) + 64;
362 if (b[3])
363 return __ffs(b[3]) + 96;
364 return __ffs(b[4]) + 128;
365}
366
367/*
368 * ffs: find first bit set. This is defined the same way as
369 * the libc and compiler builtin ffs routines, therefore
370 * differs in spirit from the above ffz (man ffs).
371 */
372
373#define ffs(x) generic_ffs(x)
374
375/*
376 * hweightN: returns the hamming weight (i.e. the number
377 * of bits set) of a N-bit word
378 */
379
380#define hweight32(x) generic_hweight32(x)
381#define hweight16(x) generic_hweight16(x)
382#define hweight8(x) generic_hweight8(x)
383
384#ifdef __LITTLE_ENDIAN__
385#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
386#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
387#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
388#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
389#define ext2_find_next_zero_bit(addr, size, offset) \
390 find_next_zero_bit((addr), (size), (offset))
391#else
392static __inline__ int ext2_set_bit(int nr, volatile void * addr)
393{
394 int mask, retval;
395 unsigned long flags;
396 volatile unsigned char *ADDR = (unsigned char *) addr;
397
398 ADDR += nr >> 3;
399 mask = 1 << (nr & 0x07);
400 local_irq_save(flags);
401 retval = (mask & *ADDR) != 0;
402 *ADDR |= mask;
403 local_irq_restore(flags);
404 return retval;
405}
406
407static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
408{
409 int mask, retval;
410 unsigned long flags;
411 volatile unsigned char *ADDR = (unsigned char *) addr;
412
413 ADDR += nr >> 3;
414 mask = 1 << (nr & 0x07);
415 local_irq_save(flags);
416 retval = (mask & *ADDR) != 0;
417 *ADDR &= ~mask;
418 local_irq_restore(flags);
419 return retval;
420}
421
422static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
423{
424 int mask;
425 const volatile unsigned char *ADDR = (const unsigned char *) addr;
426
427 ADDR += nr >> 3;
428 mask = 1 << (nr & 0x07);
429 return ((mask & *ADDR) != 0);
430}
431
432#define ext2_find_first_zero_bit(addr, size) \
433 ext2_find_next_zero_bit((addr), (size), 0)
434
435static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
436{
437 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
438 unsigned long result = offset & ~31UL;
439 unsigned long tmp;
440
441 if (offset >= size)
442 return size;
443 size -= result;
444 offset &= 31UL;
445 if(offset) {
446 /* We hold the little endian value in tmp, but then the
447 * shift is illegal. So we could keep a big endian value
448 * in tmp, like this:
449 *
450 * tmp = __swab32(*(p++));
451 * tmp |= ~0UL >> (32-offset);
452 *
453 * but this would decrease preformance, so we change the
454 * shift:
455 */
456 tmp = *(p++);
457 tmp |= __swab32(~0UL >> (32-offset));
458 if(size < 32)
459 goto found_first;
460 if(~tmp)
461 goto found_middle;
462 size -= 32;
463 result += 32;
464 }
465 while(size & ~31UL) {
466 if(~(tmp = *(p++)))
467 goto found_middle;
468 result += 32;
469 size -= 32;
470 }
471 if(!size)
472 return result;
473 tmp = *p;
474
475found_first:
476 /* tmp is little endian, so we would have to swab the shift,
477 * see above. But then we have to swab tmp below for ffz, so
478 * we might as well do this here.
479 */
480 return result + ffz(__swab32(tmp) | (~0UL << size));
481found_middle:
482 return result + ffz(__swab32(tmp));
483}
484#endif
485
486#define ext2_set_bit_atomic(lock, nr, addr) \
487 ({ \
488 int ret; \
489 spin_lock(lock); \
490 ret = ext2_set_bit((nr), (addr)); \
491 spin_unlock(lock); \
492 ret; \
493 })
494
495#define ext2_clear_bit_atomic(lock, nr, addr) \
496 ({ \
497 int ret; \
498 spin_lock(lock); \
499 ret = ext2_clear_bit((nr), (addr)); \
500 spin_unlock(lock); \
501 ret; \
502 })
503
504/* Bitmap functions for the minix filesystem. */
505#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
506#define minix_set_bit(nr,addr) set_bit(nr,addr)
507#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
508#define minix_test_bit(nr,addr) test_bit(nr,addr)
509#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
510
511#define ffs(x) generic_ffs(x)
512#define fls(x) generic_fls(x)
513
514#endif /* __KERNEL__ */
515
516#endif /* __ASM_SH64_BITOPS_H */
diff --git a/include/asm-sh64/bug.h b/include/asm-sh64/bug.h
new file mode 100644
index 000000000000..3acd54d59566
--- /dev/null
+++ b/include/asm-sh64/bug.h
@@ -0,0 +1,32 @@
1#ifndef __ASM_SH64_BUG_H
2#define __ASM_SH64_BUG_H
3
4#include <linux/config.h>
5
6/*
7 * Tell the user there is some problem, then force a segfault (in process
8 * context) or a panic (interrupt context).
9 */
10#define BUG() do { \
11 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
12 *(volatile int *)0 = 0; \
13} while (0)
14
15#define BUG_ON(condition) do { \
16 if (unlikely((condition)!=0)) \
17 BUG(); \
18} while(0)
19
20#define PAGE_BUG(page) do { \
21 BUG(); \
22} while (0)
23
24#define WARN_ON(condition) do { \
25 if (unlikely((condition)!=0)) { \
26 printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
27 dump_stack(); \
28 } \
29} while (0)
30
31#endif /* __ASM_SH64_BUG_H */
32
diff --git a/include/asm-sh64/bugs.h b/include/asm-sh64/bugs.h
new file mode 100644
index 000000000000..05554aaea672
--- /dev/null
+++ b/include/asm-sh64/bugs.h
@@ -0,0 +1,38 @@
1#ifndef __ASM_SH64_BUGS_H
2#define __ASM_SH64_BUGS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/bugs.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
15
16/*
17 * This is included by init/main.c to check for architecture-dependent bugs.
18 *
19 * Needs:
20 * void check_bugs(void);
21 */
22
23/*
24 * I don't know of any Super-H bugs yet.
25 */
26
27#include <asm/processor.h>
28
29static void __init check_bugs(void)
30{
31 extern char *get_cpu_subtype(void);
32 extern unsigned long loops_per_jiffy;
33
34 cpu_data->loops_per_jiffy = loops_per_jiffy;
35
36 printk("CPU: %s\n", get_cpu_subtype());
37}
38#endif /* __ASM_SH64_BUGS_H */
diff --git a/include/asm-sh64/byteorder.h b/include/asm-sh64/byteorder.h
new file mode 100644
index 000000000000..f602ebe334eb
--- /dev/null
+++ b/include/asm-sh64/byteorder.h
@@ -0,0 +1,49 @@
1#ifndef __ASM_SH64_BYTEORDER_H
2#define __ASM_SH64_BYTEORDER_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/byteorder.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <asm/types.h>
16
17static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
18{
19 __asm__("byterev %0, %0\n\t"
20 "shari %0, 32, %0"
21 : "=r" (x)
22 : "0" (x));
23 return x;
24}
25
26static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
27{
28 __asm__("byterev %0, %0\n\t"
29 "shari %0, 48, %0"
30 : "=r" (x)
31 : "0" (x));
32 return x;
33}
34
35#define __arch__swab32(x) ___arch__swab32(x)
36#define __arch__swab16(x) ___arch__swab16(x)
37
38#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
39# define __BYTEORDER_HAS_U64__
40# define __SWAB_64_THRU_32__
41#endif
42
43#ifdef __LITTLE_ENDIAN__
44#include <linux/byteorder/little_endian.h>
45#else
46#include <linux/byteorder/big_endian.h>
47#endif
48
49#endif /* __ASM_SH64_BYTEORDER_H */
diff --git a/include/asm-sh64/cache.h b/include/asm-sh64/cache.h
new file mode 100644
index 000000000000..f54e85e8a470
--- /dev/null
+++ b/include/asm-sh64/cache.h
@@ -0,0 +1,141 @@
1#ifndef __ASM_SH64_CACHE_H
2#define __ASM_SH64_CACHE_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/cache.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003, 2004 Paul Mundt
13 *
14 */
15#include <asm/cacheflush.h>
16
17#define L1_CACHE_SHIFT 5
18/* bytes per L1 cache line */
19#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
20#define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1))
21#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK)
22#define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10)
23/* Largest L1 which this arch supports */
24#define L1_CACHE_SHIFT_MAX 5
25
26#ifdef MODULE
27#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
28#else
29#define __cacheline_aligned \
30 __attribute__((__aligned__(L1_CACHE_BYTES), \
31 __section__(".data.cacheline_aligned")))
32#endif
33
34/*
35 * Control Registers.
36 */
37#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */
38#define ICCR_REG0 0 /* Register 0 offset */
39#define ICCR_REG1 1 /* Register 1 offset */
40#define ICCR0 ICCR_BASE+ICCR_REG0
41#define ICCR1 ICCR_BASE+ICCR_REG1
42
43#define ICCR0_OFF 0x0 /* Set ICACHE off */
44#define ICCR0_ON 0x1 /* Set ICACHE on */
45#define ICCR0_ICI 0x2 /* Invalidate all in IC */
46
47#define ICCR1_NOLOCK 0x0 /* Set No Locking */
48
49#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */
50#define OCCR_REG0 0 /* Register 0 offset */
51#define OCCR_REG1 1 /* Register 1 offset */
52#define OCCR0 OCCR_BASE+OCCR_REG0
53#define OCCR1 OCCR_BASE+OCCR_REG1
54
55#define OCCR0_OFF 0x0 /* Set OCACHE off */
56#define OCCR0_ON 0x1 /* Set OCACHE on */
57#define OCCR0_OCI 0x2 /* Invalidate all in OC */
58#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */
59#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */
60
61#define OCCR1_NOLOCK 0x0 /* Set No Locking */
62
63
64/*
65 * SH-5
66 * A bit of description here, for neff=32.
67 *
68 * |<--- tag (19 bits) --->|
69 * +-----------------------------+-----------------+------+----------+------+
70 * | | | ways |set index |offset|
71 * +-----------------------------+-----------------+------+----------+------+
72 * ^ 2 bits 8 bits 5 bits
73 * +- Bit 31
74 *
75 * Cacheline size is based on offset: 5 bits = 32 bytes per line
76 * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
77 * have a broader space for registers. These are outlined by
78 * CACHE_?C_*_STEP below.
79 *
80 */
81
82/* Valid and Dirty bits */
83#define SH_CACHE_VALID (1LL<<0)
84#define SH_CACHE_UPDATED (1LL<<57)
85
86/* Cache flags */
87#define SH_CACHE_MODE_WT (1LL<<0)
88#define SH_CACHE_MODE_WB (1LL<<1)
89
90#ifndef __ASSEMBLY__
91
92/*
93 * Cache information structure.
94 *
95 * Defined for both I and D cache, per-processor.
96 */
97struct cache_info {
98 unsigned int ways;
99 unsigned int sets;
100 unsigned int linesz;
101
102 unsigned int way_shift;
103 unsigned int entry_shift;
104 unsigned int set_shift;
105 unsigned int way_step_shift;
106 unsigned int asid_shift;
107
108 unsigned int way_ofs;
109
110 unsigned int asid_mask;
111 unsigned int idx_mask;
112 unsigned int epn_mask;
113
114 unsigned long flags;
115};
116
117#endif /* __ASSEMBLY__ */
118
119/* Instruction cache */
120#define CACHE_IC_ADDRESS_ARRAY 0x01000000
121
122/* Operand Cache */
123#define CACHE_OC_ADDRESS_ARRAY 0x01800000
124
125/* These declarations relate to cache 'synonyms' in the operand cache. A
126 'synonym' occurs where effective address bits overlap between those used for
127 indexing the cache sets and those passed to the MMU for translation. In the
128 case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
129
130#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */
131#define CACHE_OC_SYN_SHIFT 12
132/* Mask to select synonym bit(s) */
133#define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
134
135
136/*
137 * Instruction cache can't be invalidated based on physical addresses.
138 * No Instruction Cache defines required, then.
139 */
140
141#endif /* __ASM_SH64_CACHE_H */
diff --git a/include/asm-sh64/cacheflush.h b/include/asm-sh64/cacheflush.h
new file mode 100644
index 000000000000..55f71aa0aa6b
--- /dev/null
+++ b/include/asm-sh64/cacheflush.h
@@ -0,0 +1,48 @@
1#ifndef __ASM_SH64_CACHEFLUSH_H
2#define __ASM_SH64_CACHEFLUSH_H
3
4#ifndef __ASSEMBLY__
5
6#include <asm/page.h>
7
8struct vm_area_struct;
9struct page;
10struct mm_struct;
11
12extern void flush_cache_all(void);
13extern void flush_cache_mm(struct mm_struct *mm);
14extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
15extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
16 unsigned long end);
17extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
18extern void flush_dcache_page(struct page *pg);
19extern void flush_icache_range(unsigned long start, unsigned long end);
20extern void flush_icache_user_range(struct vm_area_struct *vma,
21 struct page *page, unsigned long addr,
22 int len);
23
24#define flush_dcache_mmap_lock(mapping) do { } while (0)
25#define flush_dcache_mmap_unlock(mapping) do { } while (0)
26
27#define flush_cache_vmap(start, end) flush_cache_all()
28#define flush_cache_vunmap(start, end) flush_cache_all()
29
30#define flush_icache_page(vma, page) do { } while (0)
31
32#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
33 do { \
34 flush_cache_page(vma, vaddr, page_to_pfn(page));\
35 memcpy(dst, src, len); \
36 flush_icache_user_range(vma, page, vaddr, len); \
37 } while (0)
38
39#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
40 do { \
41 flush_cache_page(vma, vaddr, page_to_pfn(page));\
42 memcpy(dst, src, len); \
43 } while (0)
44
45#endif /* __ASSEMBLY__ */
46
47#endif /* __ASM_SH64_CACHEFLUSH_H */
48
diff --git a/include/asm-sh64/cayman.h b/include/asm-sh64/cayman.h
new file mode 100644
index 000000000000..7b6b96844842
--- /dev/null
+++ b/include/asm-sh64/cayman.h
@@ -0,0 +1,20 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * include/asm-sh64/cayman.h
7 *
8 * Cayman definitions
9 *
10 * Global defintions for the SH5 Cayman board
11 *
12 * Copyright (C) 2002 Stuart Menefy
13 */
14
15
16/* Setup for the SMSC FDC37C935 / LAN91C100FD */
17#define SMSC_IRQ IRQ_IRL1
18
19/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */
20#define PCI2_IRQ IRQ_IRL3
diff --git a/include/asm-sh64/checksum.h b/include/asm-sh64/checksum.h
new file mode 100644
index 000000000000..aa3911a99490
--- /dev/null
+++ b/include/asm-sh64/checksum.h
@@ -0,0 +1,95 @@
1#ifndef __ASM_SH64_CHECKSUM_H
2#define __ASM_SH64_CHECKSUM_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/checksum.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <asm/registers.h>
16
17/*
18 * computes the checksum of a memory block at buff, length len,
19 * and adds in "sum" (32-bit)
20 *
21 * returns a 32-bit number suitable for feeding into itself
22 * or csum_tcpudp_magic
23 *
24 * this function must be called with even lengths, except
25 * for the last fragment, which may be odd
26 *
27 * it's best to have buff aligned on a 32-bit boundary
28 */
29asmlinkage unsigned int csum_partial(const unsigned char *buff, int len,
30 unsigned int sum);
31
32/*
33 * Note: when you get a NULL pointer exception here this means someone
34 * passed in an incorrect kernel address to one of these functions.
35 *
36 * If you use these functions directly please don't forget the
37 * verify_area().
38 */
39
40
41unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len,
42 unsigned int sum);
43
44unsigned int csum_partial_copy_from_user(const char *src, char *dst,
45 int len, int sum, int *err_ptr);
46
47/*
48 * These are the old (and unsafe) way of doing checksums, a warning message will be
49 * printed if they are used and an exeption occurs.
50 *
51 * these functions should go away after some time.
52 */
53
54#define csum_partial_copy_fromuser csum_partial_copy
55
56unsigned int csum_partial_copy(const char *src, char *dst, int len,
57 unsigned int sum);
58
59static inline unsigned short csum_fold(unsigned int sum)
60{
61 sum = (sum & 0xffff) + (sum >> 16);
62 sum = (sum & 0xffff) + (sum >> 16);
63 return ~(sum);
64}
65
66unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
67
68unsigned long csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
69 unsigned short len, unsigned short proto,
70 unsigned int sum);
71
72/*
73 * computes the checksum of the TCP/UDP pseudo-header
74 * returns a 16-bit checksum, already complemented
75 */
76static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
77 unsigned long daddr,
78 unsigned short len,
79 unsigned short proto,
80 unsigned int sum)
81{
82 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
83}
84
85/*
86 * this routine is used for miscellaneous IP-like checksums, mainly
87 * in icmp.c
88 */
89static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
90{
91 return csum_fold(csum_partial(buff, len, 0));
92}
93
94#endif /* __ASM_SH64_CHECKSUM_H */
95
diff --git a/include/asm-sh64/cpumask.h b/include/asm-sh64/cpumask.h
new file mode 100644
index 000000000000..b7b105dbedaf
--- /dev/null
+++ b/include/asm-sh64/cpumask.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_CPUMASK_H
2#define __ASM_SH64_CPUMASK_H
3
4#include <asm-generic/cpumask.h>
5
6#endif /* __ASM_SH64_CPUMASK_H */
diff --git a/include/asm-sh64/cputime.h b/include/asm-sh64/cputime.h
new file mode 100644
index 000000000000..0fd89da2aa86
--- /dev/null
+++ b/include/asm-sh64/cputime.h
@@ -0,0 +1,6 @@
1#ifndef __SH64_CPUTIME_H
2#define __SH64_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __SH64_CPUTIME_H */
diff --git a/include/asm-sh64/current.h b/include/asm-sh64/current.h
new file mode 100644
index 000000000000..261224339d6f
--- /dev/null
+++ b/include/asm-sh64/current.h
@@ -0,0 +1,28 @@
1#ifndef __ASM_SH64_CURRENT_H
2#define __ASM_SH64_CURRENT_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/current.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
15
16#include <linux/thread_info.h>
17
18struct task_struct;
19
20static __inline__ struct task_struct * get_current(void)
21{
22 return current_thread_info()->task;
23}
24
25#define current get_current()
26
27#endif /* __ASM_SH64_CURRENT_H */
28
diff --git a/include/asm-sh64/delay.h b/include/asm-sh64/delay.h
new file mode 100644
index 000000000000..6ae31301a16a
--- /dev/null
+++ b/include/asm-sh64/delay.h
@@ -0,0 +1,11 @@
1#ifndef __ASM_SH64_DELAY_H
2#define __ASM_SH64_DELAY_H
3
4extern void __delay(int loops);
5extern void __udelay(unsigned long long usecs, unsigned long lpj);
6extern void __ndelay(unsigned long long nsecs, unsigned long lpj);
7extern void udelay(unsigned long usecs);
8extern void ndelay(unsigned long nsecs);
9
10#endif /* __ASM_SH64_DELAY_H */
11
diff --git a/include/asm-sh64/div64.h b/include/asm-sh64/div64.h
new file mode 100644
index 000000000000..f75869565e2e
--- /dev/null
+++ b/include/asm-sh64/div64.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_DIV64_H
2#define __ASM_SH64_DIV64_H
3
4#include <asm-generic/div64.h>
5
6#endif /* __ASM_SH64_DIV64_H */
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
new file mode 100644
index 000000000000..b8d26fe677f4
--- /dev/null
+++ b/include/asm-sh64/dma-mapping.h
@@ -0,0 +1,162 @@
1#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <asm/scatterlist.h>
7#include <asm/io.h>
8
9struct pci_dev;
10extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
11 dma_addr_t *dma_handle);
12extern void consistent_free(struct pci_dev *hwdev, size_t size,
13 void *vaddr, dma_addr_t dma_handle);
14
15#define dma_supported(dev, mask) (1)
16
17static inline int dma_set_mask(struct device *dev, u64 mask)
18{
19 if (!dev->dma_mask || !dma_supported(dev, mask))
20 return -EIO;
21
22 *dev->dma_mask = mask;
23
24 return 0;
25}
26
27static inline void *dma_alloc_coherent(struct device *dev, size_t size,
28 dma_addr_t *dma_handle, int flag)
29{
30 return consistent_alloc(NULL, size, dma_handle);
31}
32
33static inline void dma_free_coherent(struct device *dev, size_t size,
34 void *vaddr, dma_addr_t dma_handle)
35{
36 consistent_free(NULL, size, vaddr, dma_handle);
37}
38
39static inline void dma_cache_sync(void *vaddr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_cache_wback_inv((unsigned long)vaddr, size);
43}
44
45static inline dma_addr_t dma_map_single(struct device *dev,
46 void *ptr, size_t size,
47 enum dma_data_direction dir)
48{
49#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
50 if (dev->bus == &pci_bus_type)
51 return virt_to_bus(ptr);
52#endif
53 dma_cache_sync(ptr, size, dir);
54
55 return virt_to_bus(ptr);
56}
57
58#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
59
60static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
61 int nents, enum dma_data_direction dir)
62{
63 int i;
64
65 for (i = 0; i < nents; i++) {
66#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
67 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
68 sg[i].length, dir);
69#endif
70 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
71 }
72
73 return nents;
74}
75
76#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
77
78static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
79 unsigned long offset, size_t size,
80 enum dma_data_direction dir)
81{
82 return dma_map_single(dev, page_address(page) + offset, size, dir);
83}
84
85static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
86 size_t size, enum dma_data_direction dir)
87{
88 dma_unmap_single(dev, dma_address, size, dir);
89}
90
91static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
92 size_t size, enum dma_data_direction dir)
93{
94#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
95 if (dev->bus == &pci_bus_type)
96 return;
97#endif
98 dma_cache_sync(bus_to_virt(dma_handle), size, dir);
99}
100
101static inline void dma_sync_single_range(struct device *dev,
102 dma_addr_t dma_handle,
103 unsigned long offset, size_t size,
104 enum dma_data_direction dir)
105{
106#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
107 if (dev->bus == &pci_bus_type)
108 return;
109#endif
110 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
111}
112
113static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
114 int nelems, enum dma_data_direction dir)
115{
116 int i;
117
118 for (i = 0; i < nelems; i++) {
119#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
120 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
121 sg[i].length, dir);
122#endif
123 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
124 }
125}
126
127static inline void dma_sync_single_for_cpu(struct device *dev,
128 dma_addr_t dma_handle, size_t size,
129 enum dma_data_direction dir)
130 __attribute__ ((alias("dma_sync_single")));
131
132static inline void dma_sync_single_for_device(struct device *dev,
133 dma_addr_t dma_handle, size_t size,
134 enum dma_data_direction dir)
135 __attribute__ ((alias("dma_sync_single")));
136
137static inline void dma_sync_sg_for_cpu(struct device *dev,
138 struct scatterlist *sg, int nelems,
139 enum dma_data_direction dir)
140 __attribute__ ((alias("dma_sync_sg")));
141
142static inline void dma_sync_sg_for_device(struct device *dev,
143 struct scatterlist *sg, int nelems,
144 enum dma_data_direction dir)
145 __attribute__ ((alias("dma_sync_sg")));
146
147static inline int dma_get_cache_alignment(void)
148{
149 /*
150 * Each processor family will define its own L1_CACHE_SHIFT,
151 * L1_CACHE_BYTES wraps to this, so this is always safe.
152 */
153 return L1_CACHE_BYTES;
154}
155
156static inline int dma_mapping_error(dma_addr_t dma_addr)
157{
158 return dma_addr == 0;
159}
160
161#endif /* __ASM_SH_DMA_MAPPING_H */
162
diff --git a/include/asm-sh64/dma.h b/include/asm-sh64/dma.h
new file mode 100644
index 000000000000..e701f39470a2
--- /dev/null
+++ b/include/asm-sh64/dma.h
@@ -0,0 +1,41 @@
1#ifndef __ASM_SH64_DMA_H
2#define __ASM_SH64_DMA_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/dma.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
15
16#include <linux/mm.h>
17#include <asm/io.h>
18#include <asm/pgtable.h>
19
20#define MAX_DMA_CHANNELS 4
21
22/*
23 * SH5 can DMA in any memory area.
24 *
25 * The static definition is dodgy because it should limit
26 * the highest DMA-able address based on the actual
27 * Physical memory available. This is actually performed
28 * at run time in defining the memory allowed to DMA_ZONE.
29 */
30#define MAX_DMA_ADDRESS ~(NPHYS_MASK)
31
32#define DMA_MODE_READ 0
33#define DMA_MODE_WRITE 1
34
35#ifdef CONFIG_PCI
36extern int isa_dma_bridge_buggy;
37#else
38#define isa_dma_bridge_buggy (0)
39#endif
40
41#endif /* __ASM_SH64_DMA_H */
diff --git a/include/asm-sh64/elf.h b/include/asm-sh64/elf.h
new file mode 100644
index 000000000000..f994286e1998
--- /dev/null
+++ b/include/asm-sh64/elf.h
@@ -0,0 +1,107 @@
1#ifndef __ASM_SH64_ELF_H
2#define __ASM_SH64_ELF_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/elf.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/*
16 * ELF register definitions..
17 */
18
19#include <asm/ptrace.h>
20#include <asm/user.h>
21#include <asm/byteorder.h>
22
23typedef unsigned long elf_greg_t;
24
25#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
26typedef elf_greg_t elf_gregset_t[ELF_NGREG];
27
28typedef struct user_fpu_struct elf_fpregset_t;
29
30/*
31 * This is used to ensure we don't load something for the wrong architecture.
32 */
33#define elf_check_arch(x) ( (x)->e_machine == EM_SH )
34
35/*
36 * These are used to set parameters in the core dumps.
37 */
38#define ELF_CLASS ELFCLASS32
39#ifdef __LITTLE_ENDIAN__
40#define ELF_DATA ELFDATA2LSB
41#else
42#define ELF_DATA ELFDATA2MSB
43#endif
44#define ELF_ARCH EM_SH
45
46#define USE_ELF_CORE_DUMP
47#define ELF_EXEC_PAGESIZE 4096
48
49/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
50 use of this is to invoke "./ld.so someprog" to test out a new version of
51 the loader. We need to make sure that it is out of the way of the program
52 that it will "exec", and that there is sufficient room for the brk. */
53
54#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
55
56#define R_SH_DIR32 1
57#define R_SH_REL32 2
58#define R_SH_IMM_LOW16 246
59#define R_SH_IMM_LOW16_PCREL 247
60#define R_SH_IMM_MEDLOW16 248
61#define R_SH_IMM_MEDLOW16_PCREL 249
62
63#define ELF_CORE_COPY_REGS(_dest,_regs) \
64 memcpy((char *) &_dest, (char *) _regs, \
65 sizeof(struct pt_regs));
66
67/* This yields a mask that user programs can use to figure out what
68 instruction set this CPU supports. This could be done in user space,
69 but it's not easy, and we've already done it here. */
70
71#define ELF_HWCAP (0)
72
73/* This yields a string that ld.so will use to load implementation
74 specific libraries for optimization. This is more specific in
75 intent than poking at uname or /proc/cpuinfo.
76
77 For the moment, we have only optimizations for the Intel generations,
78 but that could change... */
79
80#define ELF_PLATFORM (NULL)
81
82#define ELF_PLAT_INIT(_r, load_addr) \
83 do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
84 _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
85 _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
86 _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
87 _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
88 _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
89 _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
90 _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
91 _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
92 _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
93 _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
94 _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
95 _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
96 _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
97 _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
98 _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
99 _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
100 _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
101 _r->sr = SR_FD | SR_MMU; } while (0)
102
103#ifdef __KERNEL__
104#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
105#endif
106
107#endif /* __ASM_SH64_ELF_H */
diff --git a/include/asm-sh64/errno.h b/include/asm-sh64/errno.h
new file mode 100644
index 000000000000..57b46d4bdd41
--- /dev/null
+++ b/include/asm-sh64/errno.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_ERRNO_H
2#define __ASM_SH64_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif /* __ASM_SH64_ERRNO_H */
diff --git a/include/asm-sh64/fcntl.h b/include/asm-sh64/fcntl.h
new file mode 100644
index 000000000000..ffcc36c64fa5
--- /dev/null
+++ b/include/asm-sh64/fcntl.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_SH64_FCNTL_H
2#define __ASM_SH64_FCNTL_H
3
4#include <asm-sh/fcntl.h>
5
6#endif /* __ASM_SH64_FCNTL_H */
7
diff --git a/include/asm-sh64/hardirq.h b/include/asm-sh64/hardirq.h
new file mode 100644
index 000000000000..ad2330e41fd5
--- /dev/null
+++ b/include/asm-sh64/hardirq.h
@@ -0,0 +1,19 @@
1#ifndef __ASM_SH64_HARDIRQ_H
2#define __ASM_SH64_HARDIRQ_H
3
4#include <linux/config.h>
5#include <linux/threads.h>
6#include <linux/irq.h>
7
8/* entry.S is sensitive to the offsets of these fields */
9typedef struct {
10 unsigned int __softirq_pending;
11} ____cacheline_aligned irq_cpustat_t;
12
13#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
14
15/* arch/sh64/kernel/irq.c */
16extern void ack_bad_irq(unsigned int irq);
17
18#endif /* __ASM_SH64_HARDIRQ_H */
19
diff --git a/include/asm-sh64/hardware.h b/include/asm-sh64/hardware.h
new file mode 100644
index 000000000000..931c1ad80847
--- /dev/null
+++ b/include/asm-sh64/hardware.h
@@ -0,0 +1,22 @@
1#ifndef __ASM_SH64_HARDWARE_H
2#define __ASM_SH64_HARDWARE_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/hardware.h
10 *
11 * Copyright (C) 2002 Stuart Menefy
12 * Copyright (C) 2003 Paul Mundt
13 *
14 * Defitions of the locations of registers in the physical address space.
15 */
16
17#define PHYS_PERIPHERAL_BLOCK 0x09000000
18#define PHYS_DMAC_BLOCK 0x0e000000
19#define PHYS_PCI_BLOCK 0x60000000
20#define PHYS_EMI_BLOCK 0xff000000
21
22#endif /* __ASM_SH64_HARDWARE_H */
diff --git a/include/asm-sh64/hdreg.h b/include/asm-sh64/hdreg.h
new file mode 100644
index 000000000000..52d983635a27
--- /dev/null
+++ b/include/asm-sh64/hdreg.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_HDREG_H
2#define __ASM_SH64_HDREG_H
3
4#include <asm-generic/hdreg.h>
5
6#endif /* __ASM_SH64_HDREG_H */
diff --git a/include/asm-sh64/hw_irq.h b/include/asm-sh64/hw_irq.h
new file mode 100644
index 000000000000..ae718d1f2d6c
--- /dev/null
+++ b/include/asm-sh64/hw_irq.h
@@ -0,0 +1,16 @@
1#ifndef __ASM_SH64_HW_IRQ_H
2#define __ASM_SH64_HW_IRQ_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/hw_irq.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14static __inline__ void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { /* Nothing to do */ }
15
16#endif /* __ASM_SH64_HW_IRQ_H */
diff --git a/include/asm-sh64/ide.h b/include/asm-sh64/ide.h
new file mode 100644
index 000000000000..6fd514daa1ba
--- /dev/null
+++ b/include/asm-sh64/ide.h
@@ -0,0 +1,35 @@
1/*
2 * linux/include/asm-sh64/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 *
6 * sh64 version by Richard Curnow & Paul Mundt
7 */
8
9/*
10 * This file contains the sh64 architecture specific IDE code.
11 */
12
13#ifndef __ASM_SH64_IDE_H
14#define __ASM_SH64_IDE_H
15
16#ifdef __KERNEL__
17
18#include <linux/config.h>
19
20#ifndef MAX_HWIFS
21#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
22#endif
23
24/* Without this, the initialisation of PCI IDE cards end up calling
25 * ide_init_hwif_ports, which won't work. */
26#ifdef CONFIG_BLK_DEV_IDEPCI
27#define IDE_ARCH_OBSOLETE_INIT 1
28#define ide_default_io_ctl(base) (0)
29#endif
30
31#include <asm-generic/ide_iops.h>
32
33#endif /* __KERNEL__ */
34
35#endif /* __ASM_SH64_IDE_H */
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h
new file mode 100644
index 000000000000..cfafaa73b2b0
--- /dev/null
+++ b/include/asm-sh64/io.h
@@ -0,0 +1,250 @@
1#ifndef __ASM_SH64_IO_H
2#define __ASM_SH64_IO_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/io.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
15
16/*
17 * Convention:
18 * read{b,w,l}/write{b,w,l} are for PCI,
19 * while in{b,w,l}/out{b,w,l} are for ISA
20 * These may (will) be platform specific function.
21 *
22 * In addition, we have
23 * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
24 * which are processor specific. Address should be the result of
25 * onchip_remap();
26 */
27
28#include <linux/compiler.h>
29#include <asm/cache.h>
30#include <asm/system.h>
31#include <asm/page.h>
32#include <asm-generic/iomap.h>
33
34#define virt_to_bus virt_to_phys
35#define bus_to_virt phys_to_virt
36#define page_to_bus page_to_phys
37
38/*
39 * Nothing overly special here.. instead of doing the same thing
40 * over and over again, we just define a set of sh64_in/out functions
41 * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
42 * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
43 */
44static inline unsigned char sh64_in8(const volatile void __iomem *addr)
45{
46 return *(volatile unsigned char __force *)addr;
47}
48
49static inline unsigned short sh64_in16(const volatile void __iomem *addr)
50{
51 return *(volatile unsigned short __force *)addr;
52}
53
54static inline unsigned int sh64_in32(const volatile void __iomem *addr)
55{
56 return *(volatile unsigned int __force *)addr;
57}
58
59static inline unsigned long long sh64_in64(const volatile void __iomem *addr)
60{
61 return *(volatile unsigned long long __force *)addr;
62}
63
64static inline void sh64_out8(unsigned char b, volatile void __iomem *addr)
65{
66 *(volatile unsigned char __force *)addr = b;
67 wmb();
68}
69
70static inline void sh64_out16(unsigned short b, volatile void __iomem *addr)
71{
72 *(volatile unsigned short __force *)addr = b;
73 wmb();
74}
75
76static inline void sh64_out32(unsigned int b, volatile void __iomem *addr)
77{
78 *(volatile unsigned int __force *)addr = b;
79 wmb();
80}
81
82static inline void sh64_out64(unsigned long long b, volatile void __iomem *addr)
83{
84 *(volatile unsigned long long __force *)addr = b;
85 wmb();
86}
87
88#define readb(addr) sh64_in8(addr)
89#define readw(addr) sh64_in16(addr)
90#define readl(addr) sh64_in32(addr)
91#define readb_relaxed(addr) sh64_in8(addr)
92#define readw_relaxed(addr) sh64_in16(addr)
93#define readl_relaxed(addr) sh64_in32(addr)
94
95#define writeb(b, addr) sh64_out8(b, addr)
96#define writew(b, addr) sh64_out16(b, addr)
97#define writel(b, addr) sh64_out32(b, addr)
98
99#define ctrl_inb(addr) sh64_in8(ioport_map(addr, 1))
100#define ctrl_inw(addr) sh64_in16(ioport_map(addr, 2))
101#define ctrl_inl(addr) sh64_in32(ioport_map(addr, 4))
102
103#define ctrl_outb(b, addr) sh64_out8(b, ioport_map(addr, 1))
104#define ctrl_outw(b, addr) sh64_out16(b, ioport_map(addr, 2))
105#define ctrl_outl(b, addr) sh64_out32(b, ioport_map(addr, 4))
106
107#define ioread8(addr) sh64_in8(addr)
108#define ioread16(addr) sh64_in16(addr)
109#define ioread32(addr) sh64_in32(addr)
110#define iowrite8(b, addr) sh64_out8(b, addr)
111#define iowrite16(b, addr) sh64_out16(b, addr)
112#define iowrite32(b, addr) sh64_out32(b, addr)
113
114#define inb(addr) ctrl_inb(addr)
115#define inw(addr) ctrl_inw(addr)
116#define inl(addr) ctrl_inl(addr)
117#define outb(b, addr) ctrl_outb(b, addr)
118#define outw(b, addr) ctrl_outw(b, addr)
119#define outl(b, addr) ctrl_outl(b, addr)
120
121void outsw(unsigned long port, const void *addr, unsigned long count);
122void insw(unsigned long port, void *addr, unsigned long count);
123void outsl(unsigned long port, const void *addr, unsigned long count);
124void insl(unsigned long port, void *addr, unsigned long count);
125
126void memcpy_toio(void __iomem *to, const void *from, long count);
127void memcpy_fromio(void *to, void __iomem *from, long count);
128
129#define mmiowb()
130
131#ifdef __KERNEL__
132
133#ifdef CONFIG_SH_CAYMAN
134extern unsigned long smsc_superio_virt;
135#endif
136#ifdef CONFIG_PCI
137extern unsigned long pciio_virt;
138#endif
139
140#define IO_SPACE_LIMIT 0xffffffff
141
142/*
143 * Change virtual addresses to physical addresses and vv.
144 * These are trivial on the 1:1 Linux/SuperH mapping
145 */
146extern __inline__ unsigned long virt_to_phys(volatile void * address)
147{
148 return __pa(address);
149}
150
151extern __inline__ void * phys_to_virt(unsigned long address)
152{
153 return __va(address);
154}
155
156extern void * __ioremap(unsigned long phys_addr, unsigned long size,
157 unsigned long flags);
158
159extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
160{
161 return __ioremap(phys_addr, size, 1);
162}
163
164extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
165{
166 return __ioremap(phys_addr, size, 0);
167}
168
169extern void iounmap(void *addr);
170
171unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
172extern void onchip_unmap(unsigned long vaddr);
173
174static __inline__ int check_signature(volatile void __iomem *io_addr,
175 const unsigned char *signature, int length)
176{
177 int retval = 0;
178 do {
179 if (readb(io_addr) != *signature)
180 goto out;
181 io_addr++;
182 signature++;
183 length--;
184 } while (length);
185 retval = 1;
186out:
187 return retval;
188}
189
190/*
191 * The caches on some architectures aren't dma-coherent and have need to
192 * handle this in software. There are three types of operations that
193 * can be applied to dma buffers.
194 *
195 * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
196 * writing the content of the caches back to memory, if necessary.
197 * The function also invalidates the affected part of the caches as
198 * necessary before DMA transfers from outside to memory.
199 * - dma_cache_inv(start, size) invalidates the affected parts of the
200 * caches. Dirty lines of the caches may be written back or simply
201 * be discarded. This operation is necessary before dma operations
202 * to the memory.
203 * - dma_cache_wback(start, size) writes back any dirty lines but does
204 * not invalidate the cache. This can be used before DMA reads from
205 * memory,
206 */
207
208static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
209{
210 unsigned long s = start & L1_CACHE_ALIGN_MASK;
211 unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
212
213 for (; s <= e; s += L1_CACHE_BYTES)
214 asm volatile ("ocbp %0, 0" : : "r" (s));
215}
216
217static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
218{
219 // Note that caller has to be careful with overzealous
220 // invalidation should there be partial cache lines at the extremities
221 // of the specified range
222 unsigned long s = start & L1_CACHE_ALIGN_MASK;
223 unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
224
225 for (; s <= e; s += L1_CACHE_BYTES)
226 asm volatile ("ocbi %0, 0" : : "r" (s));
227}
228
229static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
230{
231 unsigned long s = start & L1_CACHE_ALIGN_MASK;
232 unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
233
234 for (; s <= e; s += L1_CACHE_BYTES)
235 asm volatile ("ocbwb %0, 0" : : "r" (s));
236}
237
238/*
239 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
240 * access
241 */
242#define xlate_dev_mem_ptr(p) __va(p)
243
244/*
245 * Convert a virtual cached pointer to an uncached pointer
246 */
247#define xlate_dev_kmem_ptr(p) p
248
249#endif /* __KERNEL__ */
250#endif /* __ASM_SH64_IO_H */
diff --git a/include/asm-sh64/ioctl.h b/include/asm-sh64/ioctl.h
new file mode 100644
index 000000000000..c089a6fb78e0
--- /dev/null
+++ b/include/asm-sh64/ioctl.h
@@ -0,0 +1,83 @@
1#ifndef __ASM_SH64_IOCTL_H
2#define __ASM_SH64_IOCTL_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/ioctl.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 * linux/ioctl.h for Linux by H.H. Bergman.
14 *
15 */
16
17/* ioctl command encoding: 32 bits total, command in lower 16 bits,
18 * size of the parameter structure in the lower 14 bits of the
19 * upper 16 bits.
20 * Encoding the size of the parameter structure in the ioctl request
21 * is useful for catching programs compiled with old versions
22 * and to avoid overwriting user space outside the user buffer area.
23 * The highest 2 bits are reserved for indicating the ``access mode''.
24 * NOTE: This limits the max parameter size to 16kB -1 !
25 */
26
27/*
28 * The following is for compatibility across the various Linux
29 * platforms. The i386 ioctl numbering scheme doesn't really enforce
30 * a type field. De facto, however, the top 8 bits of the lower 16
31 * bits are indeed used as a type field, so we might just as well make
32 * this explicit here. Please be sure to use the decoding macros
33 * below from now on.
34 */
35#define _IOC_NRBITS 8
36#define _IOC_TYPEBITS 8
37#define _IOC_SIZEBITS 14
38#define _IOC_DIRBITS 2
39
40#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
41#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
42#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
43#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
44
45#define _IOC_NRSHIFT 0
46#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
47#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
48#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
49
50/*
51 * Direction bits.
52 */
53#define _IOC_NONE 0U
54#define _IOC_WRITE 1U
55#define _IOC_READ 2U
56
57#define _IOC(dir,type,nr,size) \
58 (((dir) << _IOC_DIRSHIFT) | \
59 ((type) << _IOC_TYPESHIFT) | \
60 ((nr) << _IOC_NRSHIFT) | \
61 ((size) << _IOC_SIZESHIFT))
62
63/* used to create numbers */
64#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
65#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
66#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
67#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
68
69/* used to decode ioctl numbers.. */
70#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
71#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
72#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
73#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
74
75/* ...and for the drivers/sound files... */
76
77#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
78#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
79#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
80#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
81#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
82
83#endif /* __ASM_SH64_IOCTL_H */
diff --git a/include/asm-sh64/ioctls.h b/include/asm-sh64/ioctls.h
new file mode 100644
index 000000000000..08f3c1f2e419
--- /dev/null
+++ b/include/asm-sh64/ioctls.h
@@ -0,0 +1,116 @@
1#ifndef __ASM_SH64_IOCTLS_H
2#define __ASM_SH64_IOCTLS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/ioctls.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2004 Richard Curnow
13 *
14 */
15
16#include <asm/ioctl.h>
17
18#define FIOCLEX 0x6601 /* _IO('f', 1) */
19#define FIONCLEX 0x6602 /* _IO('f', 2) */
20#define FIOASYNC 0x4004667d /* _IOW('f', 125, int) */
21#define FIONBIO 0x4004667e /* _IOW('f', 126, int) */
22#define FIONREAD 0x8004667f /* _IOW('f', 127, int) */
23#define TIOCINQ FIONREAD
24#define FIOQSIZE 0x80086680 /* _IOR('f', 128, loff_t) */
25
26#define TCGETS 0x5401
27#define TCSETS 0x5402
28#define TCSETSW 0x5403
29#define TCSETSF 0x5404
30
31#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
32#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
33#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
34#define TCSETAF 0x4012741c /* _IOW('t', 28, struct termio) */
35
36#define TCSBRK 0x741d /* _IO('t', 29) */
37#define TCXONC 0x741e /* _IO('t', 30) */
38#define TCFLSH 0x741f /* _IO('t', 31) */
39
40#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
41#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
42#define TIOCSTART 0x746e /* _IO('t', 110) start output, like ^Q */
43#define TIOCSTOP 0x746f /* _IO('t', 111) stop output, like ^S */
44#define TIOCOUTQ 0x80047473 /* _IOR('t', 115, int) output queue size */
45
46#define TIOCSPGRP 0x40047476 /* _IOW('t', 118, int) */
47#define TIOCGPGRP 0x80047477 /* _IOR('t', 119, int) */
48
49#define TIOCEXCL 0x540c /* _IO('T', 12) */
50#define TIOCNXCL 0x540d /* _IO('T', 13) */
51#define TIOCSCTTY 0x540e /* _IO('T', 14) */
52
53#define TIOCSTI 0x40015412 /* _IOW('T', 18, char) 0x5412 */
54#define TIOCMGET 0x80045415 /* _IOR('T', 21, unsigned int) 0x5415 */
55#define TIOCMBIS 0x40045416 /* _IOW('T', 22, unsigned int) 0x5416 */
56#define TIOCMBIC 0x40045417 /* _IOW('T', 23, unsigned int) 0x5417 */
57#define TIOCMSET 0x40045418 /* _IOW('T', 24, unsigned int) 0x5418 */
58
59#define TIOCM_LE 0x001
60#define TIOCM_DTR 0x002
61#define TIOCM_RTS 0x004
62#define TIOCM_ST 0x008
63#define TIOCM_SR 0x010
64#define TIOCM_CTS 0x020
65#define TIOCM_CAR 0x040
66#define TIOCM_RNG 0x080
67#define TIOCM_DSR 0x100
68#define TIOCM_CD TIOCM_CAR
69#define TIOCM_RI TIOCM_RNG
70
71#define TIOCGSOFTCAR 0x80045419 /* _IOR('T', 25, unsigned int) 0x5419 */
72#define TIOCSSOFTCAR 0x4004541a /* _IOW('T', 26, unsigned int) 0x541A */
73#define TIOCLINUX 0x4004541c /* _IOW('T', 28, char) 0x541C */
74#define TIOCCONS 0x541d /* _IO('T', 29) */
75#define TIOCGSERIAL 0x803c541e /* _IOR('T', 30, struct serial_struct) 0x541E */
76#define TIOCSSERIAL 0x403c541f /* _IOW('T', 31, struct serial_struct) 0x541F */
77#define TIOCPKT 0x40045420 /* _IOW('T', 32, int) 0x5420 */
78
79#define TIOCPKT_DATA 0
80#define TIOCPKT_FLUSHREAD 1
81#define TIOCPKT_FLUSHWRITE 2
82#define TIOCPKT_STOP 4
83#define TIOCPKT_START 8
84#define TIOCPKT_NOSTOP 16
85#define TIOCPKT_DOSTOP 32
86
87
88#define TIOCNOTTY 0x5422 /* _IO('T', 34) */
89#define TIOCSETD 0x40045423 /* _IOW('T', 35, int) 0x5423 */
90#define TIOCGETD 0x80045424 /* _IOR('T', 36, int) 0x5424 */
91#define TCSBRKP 0x40045424 /* _IOW('T', 37, int) 0x5425 */ /* Needed for POSIX tcsendbreak() */
92#define TIOCTTYGSTRUCT 0x8c105426 /* _IOR('T', 38, struct tty_struct) 0x5426 */ /* For debugging only */
93#define TIOCSBRK 0x5427 /* _IO('T', 39) */ /* BSD compatibility */
94#define TIOCCBRK 0x5428 /* _IO('T', 40) */ /* BSD compatibility */
95#define TIOCGSID 0x80045429 /* _IOR('T', 41, pid_t) 0x5429 */ /* Return the session ID of FD */
96#define TIOCGPTN 0x80045430 /* _IOR('T',0x30, unsigned int) 0x5430 Get Pty Number (of pty-mux device) */
97#define TIOCSPTLCK 0x40045431 /* _IOW('T',0x31, int) Lock/unlock Pty */
98
99#define TIOCSERCONFIG 0x5453 /* _IO('T', 83) */
100#define TIOCSERGWILD 0x80045454 /* _IOR('T', 84, int) 0x5454 */
101#define TIOCSERSWILD 0x40045455 /* _IOW('T', 85, int) 0x5455 */
102#define TIOCGLCKTRMIOS 0x5456
103#define TIOCSLCKTRMIOS 0x5457
104#define TIOCSERGSTRUCT 0x80d85458 /* _IOR('T', 88, struct async_struct) 0x5458 */ /* For debugging only */
105#define TIOCSERGETLSR 0x80045459 /* _IOR('T', 89, unsigned int) 0x5459 */ /* Get line status register */
106
107/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
108#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
109
110#define TIOCSERGETMULTI 0x80a8545a /* _IOR('T', 90, struct serial_multiport_struct) 0x545A */ /* Get multiport config */
111#define TIOCSERSETMULTI 0x40a8545b /* _IOW('T', 91, struct serial_multiport_struct) 0x545B */ /* Set multiport config */
112
113#define TIOCMIWAIT 0x545c /* _IO('T', 92) wait for a change on serial input line(s) */
114#define TIOCGICOUNT 0x802c545d /* _IOR('T', 93, struct async_icount) 0x545D */ /* read serial port inline interrupt counts */
115
116#endif /* __ASM_SH64_IOCTLS_H */
diff --git a/include/asm-sh64/ipc.h b/include/asm-sh64/ipc.h
new file mode 100644
index 000000000000..a46e3d9c2a3f
--- /dev/null
+++ b/include/asm-sh64/ipc.h
@@ -0,0 +1 @@
#include <asm-generic/ipc.h>
diff --git a/include/asm-sh64/ipcbuf.h b/include/asm-sh64/ipcbuf.h
new file mode 100644
index 000000000000..c441e35299c0
--- /dev/null
+++ b/include/asm-sh64/ipcbuf.h
@@ -0,0 +1,40 @@
1#ifndef __ASM_SH64_IPCBUF_H__
2#define __ASM_SH64_IPCBUF_H__
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/ipcbuf.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/*
16 * The ipc64_perm structure for i386 architecture.
17 * Note extra padding because this structure is passed back and forth
18 * between kernel and user space.
19 *
20 * Pad space is left for:
21 * - 32-bit mode_t and seq
22 * - 2 miscellaneous 32-bit values
23 */
24
25struct ipc64_perm
26{
27 __kernel_key_t key;
28 __kernel_uid32_t uid;
29 __kernel_gid32_t gid;
30 __kernel_uid32_t cuid;
31 __kernel_gid32_t cgid;
32 __kernel_mode_t mode;
33 unsigned short __pad1;
34 unsigned short seq;
35 unsigned short __pad2;
36 unsigned long __unused1;
37 unsigned long __unused2;
38};
39
40#endif /* __ASM_SH64_IPCBUF_H__ */
diff --git a/include/asm-sh64/irq.h b/include/asm-sh64/irq.h
new file mode 100644
index 000000000000..f815b43df845
--- /dev/null
+++ b/include/asm-sh64/irq.h
@@ -0,0 +1,149 @@
1#ifndef __ASM_SH64_IRQ_H
2#define __ASM_SH64_IRQ_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/irq.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <linux/config.h>
16
17/*
18 * Encoded IRQs are not considered worth to be supported.
19 * Main reason is that there's no per-encoded-interrupt
20 * enable/disable mechanism (as there was in SH3/4).
21 * An all enabled/all disabled is worth only if there's
22 * a cascaded IC to disable/enable/ack on. Until such
23 * IC is available there's no such support.
24 *
25 * Presumably Encoded IRQs may use extra IRQs beyond 64,
26 * below. Some logic must be added to cope with IRQ_IRL?
27 * in an exclusive way.
28 *
29 * Priorities are set at Platform level, when IRQ_IRL0-3
30 * are set to 0 Encoding is allowed. Otherwise it's not
31 * allowed.
32 */
33
34/* Independent IRQs */
35#define IRQ_IRL0 0
36#define IRQ_IRL1 1
37#define IRQ_IRL2 2
38#define IRQ_IRL3 3
39
40#define IRQ_INTA 4
41#define IRQ_INTB 5
42#define IRQ_INTC 6
43#define IRQ_INTD 7
44
45#define IRQ_SERR 12
46#define IRQ_ERR 13
47#define IRQ_PWR3 14
48#define IRQ_PWR2 15
49#define IRQ_PWR1 16
50#define IRQ_PWR0 17
51
52#define IRQ_DMTE0 18
53#define IRQ_DMTE1 19
54#define IRQ_DMTE2 20
55#define IRQ_DMTE3 21
56#define IRQ_DAERR 22
57
58#define IRQ_TUNI0 32
59#define IRQ_TUNI1 33
60#define IRQ_TUNI2 34
61#define IRQ_TICPI2 35
62
63#define IRQ_ATI 36
64#define IRQ_PRI 37
65#define IRQ_CUI 38
66
67#define IRQ_ERI 39
68#define IRQ_RXI 40
69#define IRQ_BRI 41
70#define IRQ_TXI 42
71
72#define IRQ_ITI 63
73
74#define NR_INTC_IRQS 64
75
76#ifdef CONFIG_SH_CAYMAN
77#define NR_EXT_IRQS 32
78#define START_EXT_IRQS 64
79
80/* PCI bus 2 uses encoded external interrupts on the Cayman board */
81#define IRQ_P2INTA (START_EXT_IRQS + (3*8) + 0)
82#define IRQ_P2INTB (START_EXT_IRQS + (3*8) + 1)
83#define IRQ_P2INTC (START_EXT_IRQS + (3*8) + 2)
84#define IRQ_P2INTD (START_EXT_IRQS + (3*8) + 3)
85
86#define I8042_KBD_IRQ (START_EXT_IRQS + 2)
87#define I8042_AUX_IRQ (START_EXT_IRQS + 6)
88
89#define IRQ_CFCARD (START_EXT_IRQS + 7)
90#define IRQ_PCMCIA (0)
91
92#else
93#define NR_EXT_IRQS 0
94#endif
95
96#define NR_IRQS (NR_INTC_IRQS+NR_EXT_IRQS)
97
98
99/* Default IRQs, fixed */
100#define TIMER_IRQ IRQ_TUNI0
101#define RTC_IRQ IRQ_CUI
102
103/* Default Priorities, Platform may choose differently */
104#define NO_PRIORITY 0 /* Disabled */
105#define TIMER_PRIORITY 2
106#define RTC_PRIORITY TIMER_PRIORITY
107#define SCIF_PRIORITY 3
108#define INTD_PRIORITY 3
109#define IRL3_PRIORITY 4
110#define INTC_PRIORITY 6
111#define IRL2_PRIORITY 7
112#define INTB_PRIORITY 9
113#define IRL1_PRIORITY 10
114#define INTA_PRIORITY 12
115#define IRL0_PRIORITY 13
116#define TOP_PRIORITY 15
117
118extern void disable_irq(unsigned int);
119extern void disable_irq_nosync(unsigned int);
120extern void enable_irq(unsigned int);
121
122extern int intc_evt_to_irq[(0xE20/0x20)+1];
123int intc_irq_describe(char* p, int irq);
124
125#define irq_canonicalize(irq) (irq)
126
127#ifdef CONFIG_SH_CAYMAN
128int cayman_irq_demux(int evt);
129int cayman_irq_describe(char* p, int irq);
130#define irq_demux(x) cayman_irq_demux(x)
131#define irq_describe(p, x) cayman_irq_describe(p, x)
132#else
133#define irq_demux(x) (intc_evt_to_irq[x])
134#define irq_describe(p, x) intc_irq_describe(p, x)
135#endif
136
137/*
138 * Function for "on chip support modules".
139 */
140
141/*
142 * SH-5 supports Priority based interrupts only.
143 * Interrupt priorities are defined at platform level.
144 */
145#define set_ipr_data(a, b, c, d)
146#define make_ipr_irq(a)
147#define make_imask_irq(a)
148
149#endif /* __ASM_SH64_IRQ_H */
diff --git a/include/asm-sh64/keyboard.h b/include/asm-sh64/keyboard.h
new file mode 100644
index 000000000000..733e2bbe7b8c
--- /dev/null
+++ b/include/asm-sh64/keyboard.h
@@ -0,0 +1,74 @@
1/*
2 * linux/include/asm-shmedia/keyboard.h
3 *
4 * Copied from i386 version:
5 * Created 3 Nov 1996 by Geert Uytterhoeven
6 */
7
8/*
9 * This file contains the i386 architecture specific keyboard definitions
10 */
11
12#ifndef __ASM_SH64_KEYBOARD_H
13#define __ASM_SH64_KEYBOARD_H
14
15#ifdef __KERNEL__
16
17#include <linux/kernel.h>
18#include <linux/ioport.h>
19#include <asm/io.h>
20
21#ifdef CONFIG_SH_CAYMAN
22#define KEYBOARD_IRQ (START_EXT_IRQS + 2) /* SMSC SuperIO IRQ 1 */
23#endif
24#define DISABLE_KBD_DURING_INTERRUPTS 0
25
26extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
27extern int pckbd_getkeycode(unsigned int scancode);
28extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
29 char raw_mode);
30extern char pckbd_unexpected_up(unsigned char keycode);
31extern void pckbd_leds(unsigned char leds);
32extern void pckbd_init_hw(void);
33extern unsigned char pckbd_sysrq_xlate[128];
34
35#define kbd_setkeycode pckbd_setkeycode
36#define kbd_getkeycode pckbd_getkeycode
37#define kbd_translate pckbd_translate
38#define kbd_unexpected_up pckbd_unexpected_up
39#define kbd_leds pckbd_leds
40#define kbd_init_hw pckbd_init_hw
41#define kbd_sysrq_xlate pckbd_sysrq_xlate
42
43#define SYSRQ_KEY 0x54
44
45/* resource allocation */
46#define kbd_request_region()
47#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \
48 "keyboard", NULL)
49
50/* How to access the keyboard macros on this platform. */
51#define kbd_read_input() inb(KBD_DATA_REG)
52#define kbd_read_status() inb(KBD_STATUS_REG)
53#define kbd_write_output(val) outb(val, KBD_DATA_REG)
54#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
55
56/* Some stoneage hardware needs delays after some operations. */
57#define kbd_pause() do { } while(0)
58
59/*
60 * Machine specific bits for the PS/2 driver
61 */
62
63#ifdef CONFIG_SH_CAYMAN
64#define AUX_IRQ (START_EXT_IRQS + 6) /* SMSC SuperIO IRQ12 */
65#endif
66
67#define aux_request_irq(hand, dev_id) \
68 request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS2 Mouse", dev_id)
69
70#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id)
71
72#endif /* __KERNEL__ */
73#endif /* __ASM_SH64_KEYBOARD_H */
74
diff --git a/include/asm-sh64/kmap_types.h b/include/asm-sh64/kmap_types.h
new file mode 100644
index 000000000000..2ae7c7587919
--- /dev/null
+++ b/include/asm-sh64/kmap_types.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_SH64_KMAP_TYPES_H
2#define __ASM_SH64_KMAP_TYPES_H
3
4#include <asm-sh/kmap_types.h>
5
6#endif /* __ASM_SH64_KMAP_TYPES_H */
7
diff --git a/include/asm-sh64/linkage.h b/include/asm-sh64/linkage.h
new file mode 100644
index 000000000000..1dd0e84a228d
--- /dev/null
+++ b/include/asm-sh64/linkage.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_SH64_LINKAGE_H
2#define __ASM_SH64_LINKAGE_H
3
4#include <asm-sh/linkage.h>
5
6#endif /* __ASM_SH64_LINKAGE_H */
7
diff --git a/include/asm-sh64/local.h b/include/asm-sh64/local.h
new file mode 100644
index 000000000000..d9bd95dd36e2
--- /dev/null
+++ b/include/asm-sh64/local.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_SH64_LOCAL_H
2#define __ASM_SH64_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* __ASM_SH64_LOCAL_H */
7
diff --git a/include/asm-sh64/mc146818rtc.h b/include/asm-sh64/mc146818rtc.h
new file mode 100644
index 000000000000..6cd3aec68dbe
--- /dev/null
+++ b/include/asm-sh64/mc146818rtc.h
@@ -0,0 +1,7 @@
1/*
2 * linux/include/asm-sh64/mc146818rtc.h
3 *
4*/
5
6/* For now, an empty place-holder to get IDE to compile. */
7
diff --git a/include/asm-sh64/mman.h b/include/asm-sh64/mman.h
new file mode 100644
index 000000000000..a9be6d885c3e
--- /dev/null
+++ b/include/asm-sh64/mman.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_MMAN_H
2#define __ASM_SH64_MMAN_H
3
4#include <asm-sh/mman.h>
5
6#endif /* __ASM_SH64_MMAN_H */
diff --git a/include/asm-sh64/mmu.h b/include/asm-sh64/mmu.h
new file mode 100644
index 000000000000..ccd36d26615a
--- /dev/null
+++ b/include/asm-sh64/mmu.h
@@ -0,0 +1,7 @@
1#ifndef __MMU_H
2#define __MMU_H
3
4/* Default "unsigned long" context */
5typedef unsigned long mm_context_t;
6
7#endif
diff --git a/include/asm-sh64/mmu_context.h b/include/asm-sh64/mmu_context.h
new file mode 100644
index 000000000000..f062e1513272
--- /dev/null
+++ b/include/asm-sh64/mmu_context.h
@@ -0,0 +1,209 @@
1#ifndef __ASM_SH64_MMU_CONTEXT_H
2#define __ASM_SH64_MMU_CONTEXT_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/mmu_context.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 * ASID handling idea taken from MIPS implementation.
15 *
16 */
17
18#ifndef __ASSEMBLY__
19
20/*
21 * Cache of MMU context last used.
22 *
23 * The MMU "context" consists of two things:
24 * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache)
25 * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache)
26 */
27extern unsigned long mmu_context_cache;
28
29#include <linux/config.h>
30#include <asm/page.h>
31
32
33/* Current mm's pgd */
34extern pgd_t *mmu_pdtp_cache;
35
36#define SR_ASID_MASK 0xffffffffff00ffffULL
37#define SR_ASID_SHIFT 16
38
39#define MMU_CONTEXT_ASID_MASK 0x000000ff
40#define MMU_CONTEXT_VERSION_MASK 0xffffff00
41#define MMU_CONTEXT_FIRST_VERSION 0x00000100
42#define NO_CONTEXT 0
43
44/* ASID is 8-bit value, so it can't be 0x100 */
45#define MMU_NO_ASID 0x100
46
47
48/*
49 * Virtual Page Number mask
50 */
51#define MMU_VPN_MASK 0xfffff000
52
53extern __inline__ void
54get_new_mmu_context(struct mm_struct *mm)
55{
56 extern void flush_tlb_all(void);
57 extern void flush_cache_all(void);
58
59 unsigned long mc = ++mmu_context_cache;
60
61 if (!(mc & MMU_CONTEXT_ASID_MASK)) {
62 /* We exhaust ASID of this version.
63 Flush all TLB and start new cycle. */
64 flush_tlb_all();
65 /* We have to flush all caches as ASIDs are
66 used in cache */
67 flush_cache_all();
68 /* Fix version if needed.
69 Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */
70 if (!mc)
71 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
72 }
73 mm->context = mc;
74}
75
76/*
77 * Get MMU context if needed.
78 */
79static __inline__ void
80get_mmu_context(struct mm_struct *mm)
81{
82 if (mm) {
83 unsigned long mc = mmu_context_cache;
84 /* Check if we have old version of context.
85 If it's old, we need to get new context with new version. */
86 if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK)
87 get_new_mmu_context(mm);
88 }
89}
90
91/*
92 * Initialize the context related info for a new mm_struct
93 * instance.
94 */
95static inline int init_new_context(struct task_struct *tsk,
96 struct mm_struct *mm)
97{
98 mm->context = NO_CONTEXT;
99
100 return 0;
101}
102
103/*
104 * Destroy context related info for an mm_struct that is about
105 * to be put to rest.
106 */
107static inline void destroy_context(struct mm_struct *mm)
108{
109 extern void flush_tlb_mm(struct mm_struct *mm);
110
111 /* Well, at least free TLB entries */
112 flush_tlb_mm(mm);
113}
114
115#endif /* __ASSEMBLY__ */
116
117/* Common defines */
118#define TLB_STEP 0x00000010
119#define TLB_PTEH 0x00000000
120#define TLB_PTEL 0x00000008
121
122/* PTEH defines */
123#define PTEH_ASID_SHIFT 2
124#define PTEH_VALID 0x0000000000000001
125#define PTEH_SHARED 0x0000000000000002
126#define PTEH_MATCH_ASID 0x00000000000003ff
127
128#ifndef __ASSEMBLY__
129/* This has to be a common function because the next location to fill
130 * information is shared. */
131extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
132
133/* Profiling counter. */
134#ifdef CONFIG_SH64_PROC_TLB
135extern unsigned long long calls_to_do_fast_page_fault;
136#endif
137
138static inline unsigned long get_asid(void)
139{
140 unsigned long long sr;
141
142 asm volatile ("getcon " __SR ", %0\n\t"
143 : "=r" (sr));
144
145 sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
146 return (unsigned long) sr;
147}
148
149/* Set ASID into SR */
150static inline void set_asid(unsigned long asid)
151{
152 unsigned long long sr, pc;
153
154 asm volatile ("getcon " __SR ", %0" : "=r" (sr));
155
156 sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
157
158 /*
159 * It is possible that this function may be inlined and so to avoid
160 * the assembler reporting duplicate symbols we make use of the gas trick
161 * of generating symbols using numerics and forward reference.
162 */
163 asm volatile ("movi 1, %1\n\t"
164 "shlli %1, 28, %1\n\t"
165 "or %0, %1, %1\n\t"
166 "putcon %1, " __SR "\n\t"
167 "putcon %0, " __SSR "\n\t"
168 "movi 1f, %1\n\t"
169 "ori %1, 1 , %1\n\t"
170 "putcon %1, " __SPC "\n\t"
171 "rte\n"
172 "1:\n\t"
173 : "=r" (sr), "=r" (pc) : "0" (sr));
174}
175
176/*
177 * After we have set current->mm to a new value, this activates
178 * the context for the new mm so we see the new mappings.
179 */
180static __inline__ void activate_context(struct mm_struct *mm)
181{
182 get_mmu_context(mm);
183 set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
184}
185
186
187static __inline__ void switch_mm(struct mm_struct *prev,
188 struct mm_struct *next,
189 struct task_struct *tsk)
190{
191 if (prev != next) {
192 mmu_pdtp_cache = next->pgd;
193 activate_context(next);
194 }
195}
196
197#define deactivate_mm(tsk,mm) do { } while (0)
198
199#define activate_mm(prev, next) \
200 switch_mm((prev),(next),NULL)
201
202static inline void
203enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
204{
205}
206
207#endif /* __ASSEMBLY__ */
208
209#endif /* __ASM_SH64_MMU_CONTEXT_H */
diff --git a/include/asm-sh64/module.h b/include/asm-sh64/module.h
new file mode 100644
index 000000000000..c313650d3d93
--- /dev/null
+++ b/include/asm-sh64/module.h
@@ -0,0 +1,20 @@
1#ifndef __ASM_SH64_MODULE_H
2#define __ASM_SH64_MODULE_H
3/*
4 * This file contains the SH architecture specific module code.
5 */
6
7struct mod_arch_specific {
8 /* empty */
9};
10
11#define Elf_Shdr Elf32_Shdr
12#define Elf_Sym Elf32_Sym
13#define Elf_Ehdr Elf32_Ehdr
14
15#define module_map(x) vmalloc(x)
16#define module_unmap(x) vfree(x)
17#define module_arch_init(x) (0)
18#define arch_init_modules(x) do { } while (0)
19
20#endif /* __ASM_SH64_MODULE_H */
diff --git a/include/asm-sh64/msgbuf.h b/include/asm-sh64/msgbuf.h
new file mode 100644
index 000000000000..cf0494ce0ba8
--- /dev/null
+++ b/include/asm-sh64/msgbuf.h
@@ -0,0 +1,42 @@
1#ifndef __ASM_SH64_MSGBUF_H
2#define __ASM_SH64_MSGBUF_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/msgbuf.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/*
16 * The msqid64_ds structure for i386 architecture.
17 * Note extra padding because this structure is passed back and forth
18 * between kernel and user space.
19 *
20 * Pad space is left for:
21 * - 64-bit time_t to solve y2038 problem
22 * - 2 miscellaneous 32-bit values
23 */
24
25struct msqid64_ds {
26 struct ipc64_perm msg_perm;
27 __kernel_time_t msg_stime; /* last msgsnd time */
28 unsigned long __unused1;
29 __kernel_time_t msg_rtime; /* last msgrcv time */
30 unsigned long __unused2;
31 __kernel_time_t msg_ctime; /* last change time */
32 unsigned long __unused3;
33 unsigned long msg_cbytes; /* current number of bytes on queue */
34 unsigned long msg_qnum; /* number of messages in queue */
35 unsigned long msg_qbytes; /* max number of bytes on queue */
36 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
37 __kernel_pid_t msg_lrpid; /* last receive pid */
38 unsigned long __unused4;
39 unsigned long __unused5;
40};
41
42#endif /* __ASM_SH64_MSGBUF_H */
diff --git a/include/asm-sh64/namei.h b/include/asm-sh64/namei.h
new file mode 100644
index 000000000000..99d759a805ce
--- /dev/null
+++ b/include/asm-sh64/namei.h
@@ -0,0 +1,24 @@
1#ifndef __ASM_SH64_NAMEI_H
2#define __ASM_SH64_NAMEI_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/namei.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 * Included from linux/fs/namei.c
14 *
15 */
16
17/* This dummy routine maybe changed to something useful
18 * for /usr/gnemul/ emulation stuff.
19 * Look at asm-sparc/namei.h for details.
20 */
21
22#define __emul_prefix() NULL
23
24#endif /* __ASM_SH64_NAMEI_H */
diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h
new file mode 100644
index 000000000000..e1f7f5a41210
--- /dev/null
+++ b/include/asm-sh64/page.h
@@ -0,0 +1,137 @@
1#ifndef __ASM_SH64_PAGE_H
2#define __ASM_SH64_PAGE_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/page.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003, 2004 Paul Mundt
13 *
14 * benedict.gaster@superh.com 19th, 24th July 2002.
15 *
16 * Modified to take account of enabling for D-CACHE support.
17 *
18 */
19
20#include <linux/config.h>
21
22/* PAGE_SHIFT determines the page size */
23#define PAGE_SHIFT 12
24#ifdef __ASSEMBLY__
25#define PAGE_SIZE 4096
26#else
27#define PAGE_SIZE (1UL << PAGE_SHIFT)
28#endif
29#define PAGE_MASK (~(PAGE_SIZE-1))
30#define PTE_MASK PAGE_MASK
31
32#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
33#define HPAGE_SHIFT 16
34#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
35#define HPAGE_SHIFT 20
36#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
37#define HPAGE_SHIFT 29
38#endif
39
40#ifdef CONFIG_HUGETLB_PAGE
41#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
42#define HPAGE_MASK (~(HPAGE_SIZE-1))
43#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
44#endif
45
46#ifdef __KERNEL__
47#ifndef __ASSEMBLY__
48
49extern struct page *mem_map;
50extern void sh64_page_clear(void *page);
51extern void sh64_page_copy(void *from, void *to);
52
53#define clear_page(page) sh64_page_clear(page)
54#define copy_page(to,from) sh64_page_copy(from, to)
55
56#if defined(CONFIG_DCACHE_DISABLED)
57
58#define clear_user_page(page, vaddr, pg) clear_page(page)
59#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
60
61#else
62
63extern void clear_user_page(void *to, unsigned long address, struct page *pg);
64extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
65
66#endif /* defined(CONFIG_DCACHE_DISABLED) */
67
68/*
69 * These are used to make use of C type-checking..
70 */
71typedef struct { unsigned long long pte; } pte_t;
72typedef struct { unsigned long pmd; } pmd_t;
73typedef struct { unsigned long pgd; } pgd_t;
74typedef struct { unsigned long pgprot; } pgprot_t;
75
76#define pte_val(x) ((x).pte)
77#define pmd_val(x) ((x).pmd)
78#define pgd_val(x) ((x).pgd)
79#define pgprot_val(x) ((x).pgprot)
80
81#define __pte(x) ((pte_t) { (x) } )
82#define __pmd(x) ((pmd_t) { (x) } )
83#define __pgd(x) ((pgd_t) { (x) } )
84#define __pgprot(x) ((pgprot_t) { (x) } )
85
86#endif /* !__ASSEMBLY__ */
87
88/* to align the pointer to the (next) page boundary */
89#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
90
91/*
92 * Kconfig defined.
93 */
94#define __MEMORY_START (CONFIG_MEMORY_START)
95#define PAGE_OFFSET (CONFIG_CACHED_MEMORY_OFFSET)
96
97#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
98#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
99#define MAP_NR(addr) ((__pa(addr)-__MEMORY_START) >> PAGE_SHIFT)
100#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
101
102#define phys_to_page(phys) (mem_map + (((phys) - __MEMORY_START) >> PAGE_SHIFT))
103#define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + __MEMORY_START)
104
105/* PFN start number, because of __MEMORY_START */
106#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
107
108#define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START)
109#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START)
110#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
111#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr)
112#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
113
114#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
115 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
116
117#ifndef __ASSEMBLY__
118
119/* Pure 2^n version of get_order */
120extern __inline__ int get_order(unsigned long size)
121{
122 int order;
123
124 size = (size-1) >> (PAGE_SHIFT-1);
125 order = -1;
126 do {
127 size >>= 1;
128 order++;
129 } while (size);
130 return order;
131}
132
133#endif
134
135#endif /* __KERNEL__ */
136
137#endif /* __ASM_SH64_PAGE_H */
diff --git a/include/asm-sh64/param.h b/include/asm-sh64/param.h
new file mode 100644
index 000000000000..d18cc87c1a80
--- /dev/null
+++ b/include/asm-sh64/param.h
@@ -0,0 +1,43 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * include/asm-sh64/param.h
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Paul Mundt
10 *
11 */
12#ifndef __ASM_SH64_PARAM_H
13#define __ASM_SH64_PARAM_H
14
15#include <linux/config.h>
16
17#ifdef __KERNEL__
18# ifdef CONFIG_SH_WDT
19# define HZ 1000 /* Needed for high-res WOVF */
20# else
21# define HZ 100
22# endif
23# define USER_HZ 100 /* User interfaces are in "ticks" */
24# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
25#endif
26
27#ifndef HZ
28#define HZ 100
29#endif
30
31#define EXEC_PAGESIZE 4096
32
33#ifndef NGROUPS
34#define NGROUPS 32
35#endif
36
37#ifndef NOGROUP
38#define NOGROUP (-1)
39#endif
40
41#define MAXHOSTNAMELEN 64 /* max length of hostname */
42
43#endif /* __ASM_SH64_PARAM_H */
diff --git a/include/asm-sh64/pci.h b/include/asm-sh64/pci.h
new file mode 100644
index 000000000000..8cc14e139750
--- /dev/null
+++ b/include/asm-sh64/pci.h
@@ -0,0 +1,110 @@
1#ifndef __ASM_SH64_PCI_H
2#define __ASM_SH64_PCI_H
3
4#ifdef __KERNEL__
5
6#include <linux/dma-mapping.h>
7
8/* Can be used to override the logic in pci_scan_bus for skipping
9 already-configured bus numbers - to be used for buggy BIOSes
10 or architectures with incomplete PCI setup by the loader */
11
12#define pcibios_assign_all_busses() 1
13
14/*
15 * These are currently the correct values for the STM overdrive board
16 * We need some way of setting this on a board specific way, it will
17 * not be the same on other boards I think
18 */
19#if defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
20#define PCIBIOS_MIN_IO 0x2000
21#define PCIBIOS_MIN_MEM 0x40000000
22#endif
23
24extern void pcibios_set_master(struct pci_dev *dev);
25
26/*
27 * Set penalize isa irq function
28 */
29static inline void pcibios_penalize_isa_irq(int irq)
30{
31 /* We don't do dynamic PCI IRQ allocation */
32}
33
34/* Dynamic DMA mapping stuff.
35 * SuperH has everything mapped statically like x86.
36 */
37
38/* The PCI address space does equal the physical memory
39 * address space. The networking and block device layers use
40 * this boolean for bounce buffer decisions.
41 */
42#define PCI_DMA_BUS_IS_PHYS (1)
43
44#include <linux/types.h>
45#include <linux/slab.h>
46#include <asm/scatterlist.h>
47#include <linux/string.h>
48#include <asm/io.h>
49
50/* pci_unmap_{single,page} being a nop depends upon the
51 * configuration.
52 */
53#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
54#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
55 dma_addr_t ADDR_NAME;
56#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
57 __u32 LEN_NAME;
58#define pci_unmap_addr(PTR, ADDR_NAME) \
59 ((PTR)->ADDR_NAME)
60#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
61 (((PTR)->ADDR_NAME) = (VAL))
62#define pci_unmap_len(PTR, LEN_NAME) \
63 ((PTR)->LEN_NAME)
64#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
65 (((PTR)->LEN_NAME) = (VAL))
66#else
67#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
68#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
69#define pci_unmap_addr(PTR, ADDR_NAME) (0)
70#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
71#define pci_unmap_len(PTR, LEN_NAME) (0)
72#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
73#endif
74
75/* Not supporting more than 32-bit PCI bus addresses now, but
76 * must satisfy references to this function. Change if needed.
77 */
78#define pci_dac_dma_supported(pci_dev, mask) (0)
79
80/* These macros should be used after a pci_map_sg call has been done
81 * to get bus addresses of each of the SG entries and their lengths.
82 * You should only work with the number of sg entries pci_map_sg
83 * returns, or alternatively stop on the first sg_dma_len(sg) which
84 * is 0.
85 */
86#define sg_dma_address(sg) ((sg)->dma_address)
87#define sg_dma_len(sg) ((sg)->length)
88
89/* Board-specific fixup routines. */
90extern void pcibios_fixup(void);
91extern void pcibios_fixup_irqs(void);
92
93#ifdef CONFIG_PCI_AUTO
94extern int pciauto_assign_resources(int busno, struct pci_channel *hose);
95#endif
96
97static inline void pcibios_add_platform_entries(struct pci_dev *dev)
98{
99}
100
101#endif /* __KERNEL__ */
102
103/* generic pci stuff */
104#include <asm-generic/pci.h>
105
106/* generic DMA-mapping stuff */
107#include <asm-generic/pci-dma-compat.h>
108
109#endif /* __ASM_SH64_PCI_H */
110
diff --git a/include/asm-sh64/percpu.h b/include/asm-sh64/percpu.h
new file mode 100644
index 000000000000..a01d16cd0e8c
--- /dev/null
+++ b/include/asm-sh64/percpu.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_PERCPU
2#define __ASM_SH64_PERCPU
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ASM_SH64_PERCPU */
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
new file mode 100644
index 000000000000..b25f5df5535c
--- /dev/null
+++ b/include/asm-sh64/pgalloc.h
@@ -0,0 +1,195 @@
1#ifndef __ASM_SH64_PGALLOC_H
2#define __ASM_SH64_PGALLOC_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/pgalloc.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003, 2004 Paul Mundt
13 * Copyright (C) 2003, 2004 Richard Curnow
14 *
15 */
16
17#include <linux/threads.h>
18#include <linux/mm.h>
19
20#define pgd_quicklist (current_cpu_data.pgd_quick)
21#define pmd_quicklist (current_cpu_data.pmd_quick)
22#define pte_quicklist (current_cpu_data.pte_quick)
23#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
24
25static inline void pgd_init(unsigned long page)
26{
27 unsigned long *pgd = (unsigned long *)page;
28 extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
29 int i;
30
31 for (i = 0; i < USER_PTRS_PER_PGD; i++)
32 pgd[i] = (unsigned long)empty_bad_pte_table;
33}
34
35/*
36 * Allocate and free page tables. The xxx_kernel() versions are
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
40
41extern __inline__ pgd_t *get_pgd_slow(void)
42{
43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
44 pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
45 return ret;
46}
47
48extern __inline__ pgd_t *get_pgd_fast(void)
49{
50 unsigned long *ret;
51
52 if ((ret = pgd_quicklist) != NULL) {
53 pgd_quicklist = (unsigned long *)(*ret);
54 ret[0] = 0;
55 pgtable_cache_size--;
56 } else
57 ret = (unsigned long *)get_pgd_slow();
58
59 if (ret) {
60 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
61 }
62 return (pgd_t *)ret;
63}
64
65extern __inline__ void free_pgd_fast(pgd_t *pgd)
66{
67 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
68 pgd_quicklist = (unsigned long *) pgd;
69 pgtable_cache_size++;
70}
71
72extern __inline__ void free_pgd_slow(pgd_t *pgd)
73{
74 kfree((void *)pgd);
75}
76
77extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
78extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
79
80extern __inline__ pte_t *get_pte_fast(void)
81{
82 unsigned long *ret;
83
84 if((ret = (unsigned long *)pte_quicklist) != NULL) {
85 pte_quicklist = (unsigned long *)(*ret);
86 ret[0] = ret[1];
87 pgtable_cache_size--;
88 }
89 return (pte_t *)ret;
90}
91
92extern __inline__ void free_pte_fast(pte_t *pte)
93{
94 *(unsigned long *)pte = (unsigned long) pte_quicklist;
95 pte_quicklist = (unsigned long *) pte;
96 pgtable_cache_size++;
97}
98
99static inline void pte_free_kernel(pte_t *pte)
100{
101 free_page((unsigned long)pte);
102}
103
104static inline void pte_free(struct page *pte)
105{
106 __free_page(pte);
107}
108
109static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
110 unsigned long address)
111{
112 pte_t *pte;
113
114 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO);
115
116 return pte;
117}
118
119static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
120{
121 struct page *pte;
122
123 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
124
125 return pte;
126}
127
128#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
129
130/*
131 * allocating and freeing a pmd is trivial: the 1-entry pmd is
132 * inside the pgd, so has no extra memory associated with it.
133 */
134
135#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
136
137#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
138#define pmd_free(x) do { } while (0)
139#define pgd_populate(mm, pmd, pte) BUG()
140#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
141#define __pmd_free_tlb(tlb,pmd) do { } while (0)
142
143#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
144
145static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
146{
147 pmd_t *pmd;
148 pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
149 return pmd;
150}
151
152static __inline__ void pmd_free(pmd_t *pmd)
153{
154 free_page((unsigned long) pmd);
155}
156
157#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
158#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
159
160#else
161#error "No defined page table size"
162#endif
163
164#define check_pgt_cache() do { } while (0)
165#define pgd_free(pgd) free_pgd_slow(pgd)
166#define pgd_alloc(mm) get_pgd_fast()
167
168extern int do_check_pgt_cache(int, int);
169
170extern inline void set_pgdir(unsigned long address, pgd_t entry)
171{
172 struct task_struct * p;
173 pgd_t *pgd;
174
175 read_lock(&tasklist_lock);
176 for_each_process(p) {
177 if (!p->mm)
178 continue;
179 *pgd_offset(p->mm,address) = entry;
180 }
181 read_unlock(&tasklist_lock);
182 for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
183 pgd[address >> PGDIR_SHIFT] = entry;
184}
185
186#define pmd_populate_kernel(mm, pmd, pte) \
187 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
188
189static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
190 struct page *pte)
191{
192 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
193}
194
195#endif /* __ASM_SH64_PGALLOC_H */
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
new file mode 100644
index 000000000000..45f70c0f4a5e
--- /dev/null
+++ b/include/asm-sh64/pgtable.h
@@ -0,0 +1,508 @@
1#ifndef __ASM_SH64_PGTABLE_H
2#define __ASM_SH64_PGTABLE_H
3
4#include <asm-generic/4level-fixup.h>
5
6/*
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * include/asm-sh64/pgtable.h
12 *
13 * Copyright (C) 2000, 2001 Paolo Alberelli
14 * Copyright (C) 2003, 2004 Paul Mundt
15 * Copyright (C) 2003, 2004 Richard Curnow
16 *
17 * This file contains the functions and defines necessary to modify and use
18 * the SuperH page table tree.
19 */
20
21#ifndef __ASSEMBLY__
22#include <asm/processor.h>
23#include <asm/page.h>
24#include <linux/threads.h>
25#include <linux/config.h>
26
27extern void paging_init(void);
28
29/* We provide our own get_unmapped_area to avoid cache synonym issue */
30#define HAVE_ARCH_UNMAPPED_AREA
31
32/*
33 * Basically we have the same two-level (which is the logical three level
34 * Linux page table layout folded) page tables as the i386.
35 */
36
37/*
38 * ZERO_PAGE is a global shared page that is always zero: used
39 * for zero-mapped memory areas etc..
40 */
41extern unsigned char empty_zero_page[PAGE_SIZE];
42#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
43
44#endif /* !__ASSEMBLY__ */
45
46/*
47 * NEFF and NPHYS related defines.
48 * FIXME : These need to be model-dependent. For now this is OK, SH5-101 and SH5-103
49 * implement 32 bits effective and 32 bits physical. But future implementations may
50 * extend beyond this.
51 */
52#define NEFF 32
53#define NEFF_SIGN (1LL << (NEFF - 1))
54#define NEFF_MASK (-1LL << NEFF)
55
56#define NPHYS 32
57#define NPHYS_SIGN (1LL << (NPHYS - 1))
58#define NPHYS_MASK (-1LL << NPHYS)
59
60/* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond
61 that 3-level would be appropriate. */
62#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
63/* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */
64#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))
65#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */
66#define PTE_SHIFT PAGE_SHIFT
67#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)
68
69/* top level: PMD. */
70#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
71#define PGD_BITS (NEFF - PGDIR_SHIFT)
72#define PTRS_PER_PGD (1<<PGD_BITS)
73
74/* middle level: PMD. This doesn't do anything for the 2-level case. */
75#define PTRS_PER_PMD (1)
76
77#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
78#define PGDIR_MASK (~(PGDIR_SIZE-1))
79#define PMD_SHIFT PGDIR_SHIFT
80#define PMD_SIZE PGDIR_SIZE
81#define PMD_MASK PGDIR_MASK
82
83#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
84/*
85 * three-level asymmetric paging structure: PGD is top level.
86 * The asymmetry comes from 32-bit pointers and 64-bit PTEs.
87 */
88/* bottom level: PTE. It's 9 bits = 512 pointers */
89#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long))
90#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */
91#define PTE_SHIFT PAGE_SHIFT
92#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE)
93
94/* middle level: PMD. It's 10 bits = 1024 pointers */
95#define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *))
96#define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */
97#define PMD_SHIFT (PTE_SHIFT + PTE_BITS)
98#define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE)
99
100/* top level: PMD. It's 1 bit = 2 pointers */
101#define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS)
102#define PGD_BITS (NEFF - PGDIR_SHIFT)
103#define PTRS_PER_PGD (1<<PGD_BITS)
104
105#define PMD_SIZE (1UL << PMD_SHIFT)
106#define PMD_MASK (~(PMD_SIZE-1))
107#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
108#define PGDIR_MASK (~(PGDIR_SIZE-1))
109
110#else
111#error "No defined number of page table levels"
112#endif
113
114/*
115 * Error outputs.
116 */
117#define pte_ERROR(e) \
118 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
119#define pmd_ERROR(e) \
120 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
121#define pgd_ERROR(e) \
122 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
123
124/*
125 * Table setting routines. Used within arch/mm only.
126 */
127#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
128#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
129
130static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
131{
132 unsigned long long x = ((unsigned long long) pteval.pte);
133 unsigned long long *xp = (unsigned long long *) pteptr;
134 /*
135 * Sign-extend based on NPHYS.
136 */
137 *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
138}
139#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
140
141static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
142{
143 pmd_val(*pmdp) = (unsigned long) ptep;
144}
145
146/*
147 * PGD defines. Top level.
148 */
149
150/* To find an entry in a generic PGD. */
151#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
152#define __pgd_offset(address) pgd_index(address)
153#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
154
155/* To find an entry in a kernel PGD. */
156#define pgd_offset_k(address) pgd_offset(&init_mm, address)
157
158/*
159 * PGD level access routines.
160 *
161 * Note1:
162 * There's no need to use physical addresses since the tree walk is all
163 * in performed in software, until the PTE translation.
164 *
165 * Note 2:
166 * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad,
167 * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain
168 * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must
169 * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and
170 * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a
171 * bad pgd that must be notified via printk().
172 *
173 */
174#define _PGD_EMPTY 0x0
175
176#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
177static inline int pgd_none(pgd_t pgd) { return 0; }
178static inline int pgd_bad(pgd_t pgd) { return 0; }
179#define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0)
180#define pgd_clear(xx) do { } while(0)
181
182#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
183#define pgd_present(pgd_entry) (1)
184#define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY)
185/* TODO: Think later about what a useful definition of 'bad' would be now. */
186#define pgd_bad(pgd_entry) (0)
187#define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY)))
188
189#endif
190
191
192#define pgd_page(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK))
193
194/*
195 * PMD defines. Middle level.
196 */
197
198/* PGD to PMD dereferencing */
199#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
200static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
201{
202 return (pmd_t *) dir;
203}
204#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
205#define __pmd_offset(address) \
206 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
207#define pmd_offset(dir, addr) \
208 ((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr)))
209#endif
210
211/*
212 * PMD level access routines. Same notes as above.
213 */
214#define _PMD_EMPTY 0x0
215/* Either the PMD is empty or present, it's not paged out */
216#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
217#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
218#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
219#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
220
221#define pmd_page_kernel(pmd_entry) \
222 ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
223
224#define pmd_page(pmd) \
225 (virt_to_page(pmd_val(pmd)))
226
227/* PMD to PTE dereferencing */
228#define pte_index(address) \
229 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
230
231#define pte_offset_kernel(dir, addr) \
232 ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
233
234#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
235#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
236#define pte_unmap(pte) do { } while (0)
237#define pte_unmap_nested(pte) do { } while (0)
238
239/* Round it up ! */
240#define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE)
241#define FIRST_USER_PGD_NR 0
242
243#ifndef __ASSEMBLY__
244#define VMALLOC_END 0xff000000
245#define VMALLOC_START 0xf0000000
246#define VMALLOC_VMADDR(x) ((unsigned long)(x))
247
248#define IOBASE_VADDR 0xff000000
249#define IOBASE_END 0xffffffff
250
251/*
252 * PTEL coherent flags.
253 * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
254 */
255/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
256 positions, to avoid expensive bit shuffling on every refill. The remaining
257 bits are used for s/w purposes and masked out on each refill.
258
259 Note, the PTE slots are used to hold data of type swp_entry_t when a page is
260 swapped out. Only the _PAGE_PRESENT flag is significant when the page is
261 swapped out, and it must be placed so that it doesn't overlap either the
262 type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
263 at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
264 scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
265 [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
266 into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
267#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
268#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
269#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
270#define _PAGE_PRESENT 0x004 /* software: page referenced */
271#define _PAGE_FILE 0x004 /* software: only when !present */
272#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
273#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
274#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
275#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
276#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
277#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
278#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
279#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
280#define _PAGE_ACCESSED 0x800 /* software: page referenced */
281
282/* Mask which drops software flags */
283#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL
284
285/*
286 * HugeTLB support
287 */
288#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
289#define _PAGE_SZHUGE (_PAGE_SIZE0)
290#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
291#define _PAGE_SZHUGE (_PAGE_SIZE1)
292#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
293#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
294#endif
295
296/*
297 * Default flags for a Kernel page.
298 * This is fundametally also SHARED because the main use of this define
299 * (other than for PGD/PMD entries) is for the VMALLOC pool which is
300 * contextless.
301 *
302 * _PAGE_EXECUTE is required for modules
303 *
304 */
305#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
306 _PAGE_EXECUTE | \
307 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
308 _PAGE_SHARED)
309
310/* Default flags for a User page */
311#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
312
313#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
314
315#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
316#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
317 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \
318 _PAGE_SHARED)
319/* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
320 * protection mode for the stack. */
321#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
322 _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE)
323#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \
324 _PAGE_ACCESSED | _PAGE_USER)
325#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
326
327
328/*
329 * In ST50 we have full permissions (Read/Write/Execute/Shared).
330 * Just match'em all. These are for mmap(), therefore all at least
331 * User/Cachable/Present/Accessed. No point in making Fault on Write.
332 */
333#define __MMAP_COMMON (_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
334 /* sxwr */
335#define __P000 __pgprot(__MMAP_COMMON)
336#define __P001 __pgprot(__MMAP_COMMON | _PAGE_READ)
337#define __P010 __pgprot(__MMAP_COMMON)
338#define __P011 __pgprot(__MMAP_COMMON | _PAGE_READ)
339#define __P100 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
340#define __P101 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
341#define __P110 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE)
342#define __P111 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ)
343
344#define __S000 __pgprot(__MMAP_COMMON | _PAGE_SHARED)
345#define __S001 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ)
346#define __S010 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE)
347#define __S011 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE)
348#define __S100 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE)
349#define __S101 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ)
350#define __S110 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE)
351#define __S111 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE)
352
353/* Make it a device mapping for maximum safety (e.g. for mapping device
354 registers into user-space via /dev/map). */
355#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
356#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
357
358/*
359 * Handling allocation failures during page table setup.
360 */
361extern void __handle_bad_pmd_kernel(pmd_t * pmd);
362#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
363
364/*
365 * PTE level access routines.
366 *
367 * Note1:
368 * It's the tree walk leaf. This is physical address to be stored.
369 *
370 * Note 2:
371 * Regarding the choice of _PTE_EMPTY:
372
373 We must choose a bit pattern that cannot be valid, whether or not the page
374 is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
375 out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
376 left for us to select. If we force bit[7]==0 when swapped out, we could use
377 the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
378 we force bit[7]==1 when swapped out, we can use all zeroes to indicate
379 empty. This is convenient, because the page tables get cleared to zero
380 when they are allocated.
381
382 */
383#define _PTE_EMPTY 0x0
384#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
385#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
386#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
387
388/*
389 * Some definitions to translate between mem_map, PTEs, and page
390 * addresses:
391 */
392
393/*
394 * Given a PTE, return the index of the mem_map[] entry corresponding
395 * to the page frame the PTE. Get the absolute physical address, make
396 * a relative physical address and translate it to an index.
397 */
398#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
399 __MEMORY_START) >> PAGE_SHIFT)
400
401/*
402 * Given a PTE, return the "struct page *".
403 */
404#define pte_page(x) (mem_map + pte_pagenr(x))
405
406/*
407 * Return number of (down rounded) MB corresponding to x pages.
408 */
409#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
410
411
412/*
413 * The following have defined behavior only work if pte_present() is true.
414 */
415static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
416static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; }
417static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
418static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
419static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
420static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
421
422extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
423extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
424extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
425extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
426extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
427
428extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
429extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
430extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
431extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
432extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
433
434/*
435 * Conversion functions: convert a page and protection to a page entry.
436 *
437 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
438 */
439#define mk_pte(page,pgprot) \
440({ \
441 pte_t __pte; \
442 \
443 set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
444 __MEMORY_START | pgprot_val((pgprot)))); \
445 __pte; \
446})
447
448/*
449 * This takes a (absolute) physical page address that is used
450 * by the remapping functions
451 */
452#define mk_pte_phys(physpage, pgprot) \
453({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
454
455extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
456{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
457
458#define page_pte_prot(page, prot) mk_pte(page, prot)
459#define page_pte(page) page_pte_prot(page, __pgprot(0))
460
461typedef pte_t *pte_addr_t;
462#define pgtable_cache_init() do { } while (0)
463
464extern void update_mmu_cache(struct vm_area_struct * vma,
465 unsigned long address, pte_t pte);
466
467/* Encode and decode a swap entry */
468#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
469#define __swp_offset(x) ((x).val >> 8)
470#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
471#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
472#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
473
474/* Encode and decode a nonlinear file mapping entry */
475#define PTE_FILE_MAX_BITS 29
476#define pte_to_pgoff(pte) (pte_val(pte))
477#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
478
479/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
480#define PageSkip(page) (0)
481#define kern_addr_valid(addr) (1)
482
483#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
484 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
485
486#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
487 remap_pfn_range(vma, vaddr, pfn, size, prot)
488
489#define MK_IOSPACE_PFN(space, pfn) (pfn)
490#define GET_IOSPACE(pfn) 0
491#define GET_PFN(pfn) (pfn)
492
493#endif /* !__ASSEMBLY__ */
494
495/*
496 * No page table caches to initialise
497 */
498#define pgtable_cache_init() do { } while (0)
499
500#define pte_pfn(x) (((unsigned long)((x).pte)) >> PAGE_SHIFT)
501#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
502#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
503
504extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
505
506#include <asm-generic/pgtable.h>
507
508#endif /* __ASM_SH64_PGTABLE_H */
diff --git a/include/asm-sh64/platform.h b/include/asm-sh64/platform.h
new file mode 100644
index 000000000000..7046a9014027
--- /dev/null
+++ b/include/asm-sh64/platform.h
@@ -0,0 +1,69 @@
1#ifndef __ASM_SH64_PLATFORM_H
2#define __ASM_SH64_PLATFORM_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/platform.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 * benedict.gaster@superh.com: 3rd May 2002
14 * Added support for ramdisk, removing statically linked romfs at the same time.
15 */
16
17#include <linux/ioport.h>
18#include <asm/irq.h>
19
20
21/*
22 * Platform definition structure.
23 */
24struct sh64_platform {
25 unsigned int readonly_rootfs;
26 unsigned int ramdisk_flags;
27 unsigned int initial_root_dev;
28 unsigned int loader_type;
29 unsigned int initrd_start;
30 unsigned int initrd_size;
31 unsigned int fpu_flags;
32 unsigned int io_res_count;
33 unsigned int kram_res_count;
34 unsigned int xram_res_count;
35 unsigned int rom_res_count;
36 struct resource *io_res_p;
37 struct resource *kram_res_p;
38 struct resource *xram_res_p;
39 struct resource *rom_res_p;
40};
41
42extern struct sh64_platform platform_parms;
43
44extern unsigned long long memory_start, memory_end;
45
46extern unsigned long long fpu_in_use;
47
48extern int platform_int_priority[NR_INTC_IRQS];
49
50#define FPU_FLAGS (platform_parms.fpu_flags)
51#define STANDARD_IO_RESOURCES (platform_parms.io_res_count)
52#define STANDARD_KRAM_RESOURCES (platform_parms.kram_res_count)
53#define STANDARD_XRAM_RESOURCES (platform_parms.xram_res_count)
54#define STANDARD_ROM_RESOURCES (platform_parms.rom_res_count)
55
56/*
57 * Kernel Memory description, Respectively:
58 * code = last but one memory descriptor
59 * data = last memory descriptor
60 */
61#define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2])
62#define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1])
63
64/* Be prepared to 64-bit sign extensions */
65#define PFN_UP(x) ((((x) + PAGE_SIZE-1) >> PAGE_SHIFT) & 0x000fffff)
66#define PFN_DOWN(x) (((x) >> PAGE_SHIFT) & 0x000fffff)
67#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
68
69#endif /* __ASM_SH64_PLATFORM_H */
diff --git a/include/asm-sh64/poll.h b/include/asm-sh64/poll.h
new file mode 100644
index 000000000000..a420d14eb704
--- /dev/null
+++ b/include/asm-sh64/poll.h
@@ -0,0 +1,36 @@
1#ifndef __ASM_SH64_POLL_H
2#define __ASM_SH64_POLL_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/poll.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/* These are specified by iBCS2 */
16#define POLLIN 0x0001
17#define POLLPRI 0x0002
18#define POLLOUT 0x0004
19#define POLLERR 0x0008
20#define POLLHUP 0x0010
21#define POLLNVAL 0x0020
22
23/* The rest seem to be more-or-less nonstandard. Check them! */
24#define POLLRDNORM 0x0040
25#define POLLRDBAND 0x0080
26#define POLLWRNORM 0x0100
27#define POLLWRBAND 0x0200
28#define POLLMSG 0x0400
29
30struct pollfd {
31 int fd;
32 short events;
33 short revents;
34};
35
36#endif /* __ASM_SH64_POLL_H */
diff --git a/include/asm-sh64/posix_types.h b/include/asm-sh64/posix_types.h
new file mode 100644
index 000000000000..0620317a6f0f
--- /dev/null
+++ b/include/asm-sh64/posix_types.h
@@ -0,0 +1,131 @@
1#ifndef __ASM_SH64_POSIX_TYPES_H
2#define __ASM_SH64_POSIX_TYPES_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/posix_types.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 * This file is generally used by user-level software, so you need to
15 * be a little careful about namespace pollution etc. Also, we cannot
16 * assume GCC is being used.
17 */
18
19typedef unsigned long __kernel_ino_t;
20typedef unsigned short __kernel_mode_t;
21typedef unsigned short __kernel_nlink_t;
22typedef long __kernel_off_t;
23typedef int __kernel_pid_t;
24typedef unsigned short __kernel_ipc_pid_t;
25typedef unsigned short __kernel_uid_t;
26typedef unsigned short __kernel_gid_t;
27typedef long unsigned int __kernel_size_t;
28typedef int __kernel_ssize_t;
29typedef int __kernel_ptrdiff_t;
30typedef long __kernel_time_t;
31typedef long __kernel_suseconds_t;
32typedef long __kernel_clock_t;
33typedef int __kernel_timer_t;
34typedef int __kernel_clockid_t;
35typedef int __kernel_daddr_t;
36typedef char * __kernel_caddr_t;
37typedef unsigned short __kernel_uid16_t;
38typedef unsigned short __kernel_gid16_t;
39typedef unsigned int __kernel_uid32_t;
40typedef unsigned int __kernel_gid32_t;
41
42typedef unsigned short __kernel_old_uid_t;
43typedef unsigned short __kernel_old_gid_t;
44typedef unsigned short __kernel_old_dev_t;
45
46#ifdef __GNUC__
47typedef long long __kernel_loff_t;
48#endif
49
50typedef struct {
51#if defined(__KERNEL__) || defined(__USE_ALL)
52 int val[2];
53#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
54 int __val[2];
55#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
56} __kernel_fsid_t;
57
58#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
59
60#undef __FD_SET
61static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
62{
63 unsigned long __tmp = __fd / __NFDBITS;
64 unsigned long __rem = __fd % __NFDBITS;
65 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
66}
67
68#undef __FD_CLR
69static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
70{
71 unsigned long __tmp = __fd / __NFDBITS;
72 unsigned long __rem = __fd % __NFDBITS;
73 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
74}
75
76
77#undef __FD_ISSET
78static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
79{
80 unsigned long __tmp = __fd / __NFDBITS;
81 unsigned long __rem = __fd % __NFDBITS;
82 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
83}
84
85/*
86 * This will unroll the loop for the normal constant case (8 ints,
87 * for a 256-bit fd_set)
88 */
89#undef __FD_ZERO
90static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
91{
92 unsigned long *__tmp = __p->fds_bits;
93 int __i;
94
95 if (__builtin_constant_p(__FDSET_LONGS)) {
96 switch (__FDSET_LONGS) {
97 case 16:
98 __tmp[ 0] = 0; __tmp[ 1] = 0;
99 __tmp[ 2] = 0; __tmp[ 3] = 0;
100 __tmp[ 4] = 0; __tmp[ 5] = 0;
101 __tmp[ 6] = 0; __tmp[ 7] = 0;
102 __tmp[ 8] = 0; __tmp[ 9] = 0;
103 __tmp[10] = 0; __tmp[11] = 0;
104 __tmp[12] = 0; __tmp[13] = 0;
105 __tmp[14] = 0; __tmp[15] = 0;
106 return;
107
108 case 8:
109 __tmp[ 0] = 0; __tmp[ 1] = 0;
110 __tmp[ 2] = 0; __tmp[ 3] = 0;
111 __tmp[ 4] = 0; __tmp[ 5] = 0;
112 __tmp[ 6] = 0; __tmp[ 7] = 0;
113 return;
114
115 case 4:
116 __tmp[ 0] = 0; __tmp[ 1] = 0;
117 __tmp[ 2] = 0; __tmp[ 3] = 0;
118 return;
119 }
120 }
121 __i = __FDSET_LONGS;
122 while (__i) {
123 __i--;
124 *__tmp = 0;
125 __tmp++;
126 }
127}
128
129#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
130
131#endif /* __ASM_SH64_POSIX_TYPES_H */
diff --git a/include/asm-sh64/processor.h b/include/asm-sh64/processor.h
new file mode 100644
index 000000000000..a51bd41e6fbc
--- /dev/null
+++ b/include/asm-sh64/processor.h
@@ -0,0 +1,286 @@
1#ifndef __ASM_SH64_PROCESSOR_H
2#define __ASM_SH64_PROCESSOR_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/processor.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 * Copyright (C) 2004 Richard Curnow
14 *
15 */
16
17#include <asm/page.h>
18
19#ifndef __ASSEMBLY__
20
21#include <asm/types.h>
22#include <asm/cache.h>
23#include <asm/registers.h>
24#include <linux/threads.h>
25
26/*
27 * Default implementation of macro that returns current
28 * instruction pointer ("program counter").
29 */
30#define current_text_addr() ({ \
31void *pc; \
32unsigned long long __dummy = 0; \
33__asm__("gettr tr0, %1\n\t" \
34 "pta 4, tr0\n\t" \
35 "gettr tr0, %0\n\t" \
36 "ptabs %1, tr0\n\t" \
37 :"=r" (pc), "=r" (__dummy) \
38 : "1" (__dummy)); \
39pc; })
40
41/*
42 * CPU type and hardware bug flags. Kept separately for each CPU.
43 */
44enum cpu_type {
45 CPU_SH5_101,
46 CPU_SH5_103,
47 CPU_SH_NONE
48};
49
50/*
51 * TLB information structure
52 *
53 * Defined for both I and D tlb, per-processor.
54 */
55struct tlb_info {
56 unsigned long long next;
57 unsigned long long first;
58 unsigned long long last;
59
60 unsigned int entries;
61 unsigned int step;
62
63 unsigned long flags;
64};
65
66struct sh_cpuinfo {
67 enum cpu_type type;
68 unsigned long loops_per_jiffy;
69
70 char hard_math;
71
72 unsigned long *pgd_quick;
73 unsigned long *pmd_quick;
74 unsigned long *pte_quick;
75 unsigned long pgtable_cache_sz;
76 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
77
78 /* Cache info */
79 struct cache_info icache;
80 struct cache_info dcache;
81
82 /* TLB info */
83 struct tlb_info itlb;
84 struct tlb_info dtlb;
85};
86
87extern struct sh_cpuinfo boot_cpu_data;
88
89#define cpu_data (&boot_cpu_data)
90#define current_cpu_data boot_cpu_data
91
92#endif
93
94/*
95 * User space process size: 2GB - 4k.
96 */
97#define TASK_SIZE 0x7ffff000UL
98
99/* This decides where the kernel will search for a free chunk of vm
100 * space during mmap's.
101 */
102#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
103
104/*
105 * Bit of SR register
106 *
107 * FD-bit:
108 * When it's set, it means the processor doesn't have right to use FPU,
109 * and it results exception when the floating operation is executed.
110 *
111 * IMASK-bit:
112 * Interrupt level mask
113 *
114 * STEP-bit:
115 * Single step bit
116 *
117 */
118#define SR_FD 0x00008000
119
120#if defined(CONFIG_SH64_SR_WATCH)
121#define SR_MMU 0x84000000
122#else
123#define SR_MMU 0x80000000
124#endif
125
126#define SR_IMASK 0x000000f0
127#define SR_SSTEP 0x08000000
128
129#ifndef __ASSEMBLY__
130
131/*
132 * FPU structure and data : require 8-byte alignment as we need to access it
133 with fld.p, fst.p
134 */
135
136struct sh_fpu_hard_struct {
137 unsigned long fp_regs[64];
138 unsigned int fpscr;
139 /* long status; * software status information */
140};
141
142#if 0
143/* Dummy fpu emulator */
144struct sh_fpu_soft_struct {
145 unsigned long long fp_regs[32];
146 unsigned int fpscr;
147 unsigned char lookahead;
148 unsigned long entry_pc;
149};
150#endif
151
152union sh_fpu_union {
153 struct sh_fpu_hard_struct hard;
154 /* 'hard' itself only produces 32 bit alignment, yet we need
155 to access it using 64 bit load/store as well. */
156 unsigned long long alignment_dummy;
157};
158
159struct thread_struct {
160 unsigned long sp;
161 unsigned long pc;
162 /* This stores the address of the pt_regs built during a context
163 switch, or of the register save area built for a kernel mode
164 exception. It is used for backtracing the stack of a sleeping task
165 or one that traps in kernel mode. */
166 struct pt_regs *kregs;
167 /* This stores the address of the pt_regs constructed on entry from
168 user mode. It is a fixed value over the lifetime of a process, or
169 NULL for a kernel thread. */
170 struct pt_regs *uregs;
171
172 unsigned long trap_no, error_code;
173 unsigned long address;
174 /* Hardware debugging registers may come here */
175
176 /* floating point info */
177 union sh_fpu_union fpu;
178};
179
180#define INIT_MMAP \
181{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
182
183extern struct pt_regs fake_swapper_regs;
184
185#define INIT_THREAD { \
186 .sp = sizeof(init_stack) + \
187 (long) &init_stack, \
188 .pc = 0, \
189 .kregs = &fake_swapper_regs, \
190 .uregs = NULL, \
191 .trap_no = 0, \
192 .error_code = 0, \
193 .address = 0, \
194 .fpu = { { { 0, } }, } \
195}
196
197/*
198 * Do necessary setup to start up a newly executed thread.
199 */
200#define SR_USER (SR_MMU | SR_FD)
201
202#define start_thread(regs, new_pc, new_sp) \
203 set_fs(USER_DS); \
204 regs->sr = SR_USER; /* User mode. */ \
205 regs->pc = new_pc - 4; /* Compensate syscall exit */ \
206 regs->pc |= 1; /* Set SHmedia ! */ \
207 regs->regs[18] = 0; \
208 regs->regs[15] = new_sp
209
210/* Forward declaration, a strange C thing */
211struct task_struct;
212struct mm_struct;
213
214/* Free all resources held by a thread. */
215extern void release_thread(struct task_struct *);
216/*
217 * create a kernel thread without removing it from tasklists
218 */
219extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
220
221
222/* Copy and release all segment info associated with a VM */
223#define copy_segments(p, mm) do { } while (0)
224#define release_segments(mm) do { } while (0)
225#define forget_segments() do { } while (0)
226#define prepare_to_copy(tsk) do { } while (0)
227/*
228 * FPU lazy state save handling.
229 */
230
231extern __inline__ void release_fpu(void)
232{
233 unsigned long long __dummy;
234
235 /* Set FD flag in SR */
236 __asm__ __volatile__("getcon " __SR ", %0\n\t"
237 "or %0, %1, %0\n\t"
238 "putcon %0, " __SR "\n\t"
239 : "=&r" (__dummy)
240 : "r" (SR_FD));
241}
242
243extern __inline__ void grab_fpu(void)
244{
245 unsigned long long __dummy;
246
247 /* Clear out FD flag in SR */
248 __asm__ __volatile__("getcon " __SR ", %0\n\t"
249 "and %0, %1, %0\n\t"
250 "putcon %0, " __SR "\n\t"
251 : "=&r" (__dummy)
252 : "r" (~SR_FD));
253}
254
255/* Round to nearest, no exceptions on inexact, overflow, underflow,
256 zero-divide, invalid. Configure option for whether to flush denorms to
257 zero, or except if a denorm is encountered. */
258#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
259#define FPSCR_INIT 0x00040000
260#else
261#define FPSCR_INIT 0x00000000
262#endif
263
264/* Save the current FP regs */
265void fpsave(struct sh_fpu_hard_struct *fpregs);
266
267/* Initialise the FP state of a task */
268void fpinit(struct sh_fpu_hard_struct *fpregs);
269
270extern struct task_struct *last_task_used_math;
271
272/*
273 * Return saved PC of a blocked thread.
274 */
275#define thread_saved_pc(tsk) (tsk->thread.pc)
276
277extern unsigned long get_wchan(struct task_struct *p);
278
279#define KSTK_EIP(tsk) ((tsk)->thread.pc)
280#define KSTK_ESP(tsk) ((tsk)->thread.sp)
281
282#define cpu_relax() do { } while (0)
283
284#endif /* __ASSEMBLY__ */
285#endif /* __ASM_SH64_PROCESSOR_H */
286
diff --git a/include/asm-sh64/ptrace.h b/include/asm-sh64/ptrace.h
new file mode 100644
index 000000000000..56190f521587
--- /dev/null
+++ b/include/asm-sh64/ptrace.h
@@ -0,0 +1,37 @@
1#ifndef __ASM_SH64_PTRACE_H
2#define __ASM_SH64_PTRACE_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/ptrace.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/*
16 * This struct defines the way the registers are stored on the
17 * kernel stack during a system call or other kernel entry.
18 */
19struct pt_regs {
20 unsigned long long pc;
21 unsigned long long sr;
22 unsigned long long syscall_nr;
23 unsigned long long regs[63];
24 unsigned long long tregs[8];
25 unsigned long long pad[2];
26};
27
28#ifdef __KERNEL__
29#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
30#define instruction_pointer(regs) ((regs)->pc)
31#define profile_pc(regs) instruction_pointer(regs)
32extern void show_regs(struct pt_regs *);
33#endif
34
35#define PTRACE_O_TRACESYSGOOD 0x00000001
36
37#endif /* __ASM_SH64_PTRACE_H */
diff --git a/include/asm-sh64/registers.h b/include/asm-sh64/registers.h
new file mode 100644
index 000000000000..7eec666acf84
--- /dev/null
+++ b/include/asm-sh64/registers.h
@@ -0,0 +1,106 @@
1#ifndef __ASM_SH64_REGISTERS_H
2#define __ASM_SH64_REGISTERS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/registers.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2004 Richard Curnow
13 */
14
15#ifdef __ASSEMBLY__
16/* =====================================================================
17**
18** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
19** Assigns symbolic names to control & target registers.
20*/
21
22/*
23 * Define some useful aliases for control registers.
24 */
25#define SR cr0
26#define SSR cr1
27#define PSSR cr2
28 /* cr3 UNDEFINED */
29#define INTEVT cr4
30#define EXPEVT cr5
31#define PEXPEVT cr6
32#define TRA cr7
33#define SPC cr8
34#define PSPC cr9
35#define RESVEC cr10
36#define VBR cr11
37 /* cr12 UNDEFINED */
38#define TEA cr13
39 /* cr14-cr15 UNDEFINED */
40#define DCR cr16
41#define KCR0 cr17
42#define KCR1 cr18
43 /* cr19-cr31 UNDEFINED */
44 /* cr32-cr61 RESERVED */
45#define CTC cr62
46#define USR cr63
47
48/*
49 * ABI dependent registers (general purpose set)
50 */
51#define RET r2
52#define ARG1 r2
53#define ARG2 r3
54#define ARG3 r4
55#define ARG4 r5
56#define ARG5 r6
57#define ARG6 r7
58#define SP r15
59#define LINK r18
60#define ZERO r63
61
62/*
63 * Status register defines: used only by assembly sources (and
64 * syntax independednt)
65 */
66#define SR_RESET_VAL 0x0000000050008000
67#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */
68#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */
69
70#if defined (CONFIG_SH64_SR_WATCH)
71#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */
72#else
73#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */
74#endif
75
76#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */
77#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */
78
79#else /* Not __ASSEMBLY__ syntax */
80
81/*
82** Stringify reg. name
83*/
84#define __str(x) #x
85
86/* Stringify control register names for use in inline assembly */
87#define __SR __str(SR)
88#define __SSR __str(SSR)
89#define __PSSR __str(PSSR)
90#define __INTEVT __str(INTEVT)
91#define __EXPEVT __str(EXPEVT)
92#define __PEXPEVT __str(PEXPEVT)
93#define __TRA __str(TRA)
94#define __SPC __str(SPC)
95#define __PSPC __str(PSPC)
96#define __RESVEC __str(RESVEC)
97#define __VBR __str(VBR)
98#define __TEA __str(TEA)
99#define __DCR __str(DCR)
100#define __KCR0 __str(KCR0)
101#define __KCR1 __str(KCR1)
102#define __CTC __str(CTC)
103#define __USR __str(USR)
104
105#endif /* __ASSEMBLY__ */
106#endif /* __ASM_SH64_REGISTERS_H */
diff --git a/include/asm-sh64/resource.h b/include/asm-sh64/resource.h
new file mode 100644
index 000000000000..8ff93944ae66
--- /dev/null
+++ b/include/asm-sh64/resource.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_RESOURCE_H
2#define __ASM_SH64_RESOURCE_H
3
4#include <asm-sh/resource.h>
5
6#endif /* __ASM_SH64_RESOURCE_H */
diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h
new file mode 100644
index 000000000000..5d8fa32d2e9d
--- /dev/null
+++ b/include/asm-sh64/scatterlist.h
@@ -0,0 +1,23 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * include/asm-sh64/scatterlist.h
7 *
8 * Copyright (C) 2003 Paul Mundt
9 *
10 */
11#ifndef __ASM_SH64_SCATTERLIST_H
12#define __ASM_SH64_SCATTERLIST_H
13
14struct scatterlist {
15 struct page * page; /* Location for highmem page, if any */
16 unsigned int offset;/* for highmem, page offset */
17 dma_addr_t dma_address;
18 unsigned int length;
19};
20
21#define ISA_DMA_THRESHOLD (0xffffffff)
22
23#endif /* !__ASM_SH64_SCATTERLIST_H */
diff --git a/include/asm-sh64/sections.h b/include/asm-sh64/sections.h
new file mode 100644
index 000000000000..897f36bcdf85
--- /dev/null
+++ b/include/asm-sh64/sections.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_SH64_SECTIONS_H
2#define __ASM_SH64_SECTIONS_H
3
4#include <asm-sh/sections.h>
5
6#endif /* __ASM_SH64_SECTIONS_H */
7
diff --git a/include/asm-sh64/segment.h b/include/asm-sh64/segment.h
new file mode 100644
index 000000000000..92ac001fc483
--- /dev/null
+++ b/include/asm-sh64/segment.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_SEGMENT_H
2#define _ASM_SEGMENT_H
3
4/* Only here because we have some old header files that expect it.. */
5
6#endif /* _ASM_SEGMENT_H */
diff --git a/include/asm-sh64/semaphore-helper.h b/include/asm-sh64/semaphore-helper.h
new file mode 100644
index 000000000000..fcfafe263e86
--- /dev/null
+++ b/include/asm-sh64/semaphore-helper.h
@@ -0,0 +1,101 @@
1#ifndef __ASM_SH64_SEMAPHORE_HELPER_H
2#define __ASM_SH64_SEMAPHORE_HELPER_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/semaphore-helper.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14#include <asm/errno.h>
15
16/*
17 * SMP- and interrupt-safe semaphores helper functions.
18 *
19 * (C) Copyright 1996 Linus Torvalds
20 * (C) Copyright 1999 Andrea Arcangeli
21 */
22
23/*
24 * These two _must_ execute atomically wrt each other.
25 *
26 * This is trivially done with load_locked/store_cond,
27 * which we have. Let the rest of the losers suck eggs.
28 */
29static __inline__ void wake_one_more(struct semaphore * sem)
30{
31 atomic_inc((atomic_t *)&sem->sleepers);
32}
33
34static __inline__ int waking_non_zero(struct semaphore *sem)
35{
36 unsigned long flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
43 }
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
46}
47
48/*
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
53 *
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 */
58static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
59 struct task_struct *tsk)
60{
61 unsigned long flags;
62 int ret = 0;
63
64 spin_lock_irqsave(&semaphore_wake_lock, flags);
65 if (sem->sleepers > 0) {
66 sem->sleepers--;
67 ret = 1;
68 } else if (signal_pending(tsk)) {
69 atomic_inc(&sem->count);
70 ret = -EINTR;
71 }
72 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
73 return ret;
74}
75
76/*
77 * waking_non_zero_trylock:
78 * 1 failed to lock
79 * 0 got the lock
80 *
81 * We must undo the sem->count down_trylock() increment while we are
82 * protected by the spinlock in order to make atomic this atomic_inc() with the
83 * atomic_read() in wake_one_more(), otherwise we can race. -arca
84 */
85static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
86{
87 unsigned long flags;
88 int ret = 1;
89
90 spin_lock_irqsave(&semaphore_wake_lock, flags);
91 if (sem->sleepers <= 0)
92 atomic_inc(&sem->count);
93 else {
94 sem->sleepers--;
95 ret = 0;
96 }
97 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
98 return ret;
99}
100
101#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh64/semaphore.h b/include/asm-sh64/semaphore.h
new file mode 100644
index 000000000000..fce22bb9a546
--- /dev/null
+++ b/include/asm-sh64/semaphore.h
@@ -0,0 +1,123 @@
1#ifndef __ASM_SH64_SEMAPHORE_H
2#define __ASM_SH64_SEMAPHORE_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/semaphore.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 * SMP- and interrupt-safe semaphores.
14 *
15 * (C) Copyright 1996 Linus Torvalds
16 *
17 * SuperH verison by Niibe Yutaka
18 * (Currently no asm implementation but generic C code...)
19 *
20 */
21
22#include <linux/linkage.h>
23#include <linux/spinlock.h>
24#include <linux/wait.h>
25#include <linux/rwsem.h>
26
27#include <asm/system.h>
28#include <asm/atomic.h>
29
30struct semaphore {
31 atomic_t count;
32 int sleepers;
33 wait_queue_head_t wait;
34};
35
36#define __SEMAPHORE_INITIALIZER(name, n) \
37{ \
38 .count = ATOMIC_INIT(n), \
39 .sleepers = 0, \
40 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
41}
42
43#define __MUTEX_INITIALIZER(name) \
44 __SEMAPHORE_INITIALIZER(name,1)
45
46#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
47 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
48
49#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
50#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
51
52static inline void sema_init (struct semaphore *sem, int val)
53{
54/*
55 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
56 *
57 * i'd rather use the more flexible initialization above, but sadly
58 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
59 */
60 atomic_set(&sem->count, val);
61 sem->sleepers = 0;
62 init_waitqueue_head(&sem->wait);
63}
64
65static inline void init_MUTEX (struct semaphore *sem)
66{
67 sema_init(sem, 1);
68}
69
70static inline void init_MUTEX_LOCKED (struct semaphore *sem)
71{
72 sema_init(sem, 0);
73}
74
75#if 0
76asmlinkage void __down_failed(void /* special register calling convention */);
77asmlinkage int __down_failed_interruptible(void /* params in registers */);
78asmlinkage int __down_failed_trylock(void /* params in registers */);
79asmlinkage void __up_wakeup(void /* special register calling convention */);
80#endif
81
82asmlinkage void __down(struct semaphore * sem);
83asmlinkage int __down_interruptible(struct semaphore * sem);
84asmlinkage int __down_trylock(struct semaphore * sem);
85asmlinkage void __up(struct semaphore * sem);
86
87extern spinlock_t semaphore_wake_lock;
88
89static inline void down(struct semaphore * sem)
90{
91 if (atomic_dec_return(&sem->count) < 0)
92 __down(sem);
93}
94
95static inline int down_interruptible(struct semaphore * sem)
96{
97 int ret = 0;
98
99 if (atomic_dec_return(&sem->count) < 0)
100 ret = __down_interruptible(sem);
101 return ret;
102}
103
104static inline int down_trylock(struct semaphore * sem)
105{
106 int ret = 0;
107
108 if (atomic_dec_return(&sem->count) < 0)
109 ret = __down_trylock(sem);
110 return ret;
111}
112
113/*
114 * Note! This is subtle. We jump to wake people up only if
115 * the semaphore was negative (== somebody was waiting on it).
116 */
117static inline void up(struct semaphore * sem)
118{
119 if (atomic_inc_return(&sem->count) <= 0)
120 __up(sem);
121}
122
123#endif /* __ASM_SH64_SEMAPHORE_H */
diff --git a/include/asm-sh64/sembuf.h b/include/asm-sh64/sembuf.h
new file mode 100644
index 000000000000..ec4d9f143577
--- /dev/null
+++ b/include/asm-sh64/sembuf.h
@@ -0,0 +1,36 @@
1#ifndef __ASM_SH64_SEMBUF_H
2#define __ASM_SH64_SEMBUF_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/sembuf.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/*
16 * The semid64_ds structure for i386 architecture.
17 * Note extra padding because this structure is passed back and forth
18 * between kernel and user space.
19 *
20 * Pad space is left for:
21 * - 64-bit time_t to solve y2038 problem
22 * - 2 miscellaneous 32-bit values
23 */
24
25struct semid64_ds {
26 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
27 __kernel_time_t sem_otime; /* last semop time */
28 unsigned long __unused1;
29 __kernel_time_t sem_ctime; /* last change time */
30 unsigned long __unused2;
31 unsigned long sem_nsems; /* no. of semaphores in array */
32 unsigned long __unused3;
33 unsigned long __unused4;
34};
35
36#endif /* __ASM_SH64_SEMBUF_H */
diff --git a/include/asm-sh64/serial.h b/include/asm-sh64/serial.h
new file mode 100644
index 000000000000..8e39b4e90c76
--- /dev/null
+++ b/include/asm-sh64/serial.h
@@ -0,0 +1,33 @@
1/*
2 * include/asm-sh/serial.h
3 *
4 * Configuration details for 8250, 16450, 16550, etc. serial ports
5 */
6
7#ifndef _ASM_SERIAL_H
8#define _ASM_SERIAL_H
9
10/*
11 * This assumes you have a 1.8432 MHz clock for your UART.
12 *
13 * It'd be nice if someone built a serial card with a 24.576 MHz
14 * clock, since the 16550A is capable of handling a top speed of 1.5
15 * megabits/second; but this requires the faster clock.
16 */
17#define BASE_BAUD ( 1843200 / 16 )
18
19#define RS_TABLE_SIZE 2
20
21#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
22
23#define STD_SERIAL_PORT_DEFNS \
24 /* UART CLK PORT IRQ FLAGS */ \
25 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
26 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS } /* ttyS1 */
27
28#define SERIAL_PORT_DFNS STD_SERIAL_PORT_DEFNS
29
30/* XXX: This should be moved ino irq.h */
31#define irq_cannonicalize(x) (x)
32
33#endif /* _ASM_SERIAL_H */
diff --git a/include/asm-sh64/setup.h b/include/asm-sh64/setup.h
new file mode 100644
index 000000000000..ebd42eb1b709
--- /dev/null
+++ b/include/asm-sh64/setup.h
@@ -0,0 +1,16 @@
1#ifndef __ASM_SH64_SETUP_H
2#define __ASM_SH64_SETUP_H
3
4#define PARAM ((unsigned char *)empty_zero_page)
5#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
6#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
7#define ORIG_ROOT_DEV (*(unsigned long *) (PARAM+0x008))
8#define LOADER_TYPE (*(unsigned long *) (PARAM+0x00c))
9#define INITRD_START (*(unsigned long *) (PARAM+0x010))
10#define INITRD_SIZE (*(unsigned long *) (PARAM+0x014))
11
12#define COMMAND_LINE ((char *) (PARAM+256))
13#define COMMAND_LINE_SIZE 256
14
15#endif /* __ASM_SH64_SETUP_H */
16
diff --git a/include/asm-sh64/shmbuf.h b/include/asm-sh64/shmbuf.h
new file mode 100644
index 000000000000..022f3494dd64
--- /dev/null
+++ b/include/asm-sh64/shmbuf.h
@@ -0,0 +1,53 @@
1#ifndef __ASM_SH64_SHMBUF_H
2#define __ASM_SH64_SHMBUF_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/shmbuf.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/*
16 * The shmid64_ds structure for i386 architecture.
17 * Note extra padding because this structure is passed back and forth
18 * between kernel and user space.
19 *
20 * Pad space is left for:
21 * - 64-bit time_t to solve y2038 problem
22 * - 2 miscellaneous 32-bit values
23 */
24
25struct shmid64_ds {
26 struct ipc64_perm shm_perm; /* operation perms */
27 size_t shm_segsz; /* size of segment (bytes) */
28 __kernel_time_t shm_atime; /* last attach time */
29 unsigned long __unused1;
30 __kernel_time_t shm_dtime; /* last detach time */
31 unsigned long __unused2;
32 __kernel_time_t shm_ctime; /* last change time */
33 unsigned long __unused3;
34 __kernel_pid_t shm_cpid; /* pid of creator */
35 __kernel_pid_t shm_lpid; /* pid of last operator */
36 unsigned long shm_nattch; /* no. of current attaches */
37 unsigned long __unused4;
38 unsigned long __unused5;
39};
40
41struct shminfo64 {
42 unsigned long shmmax;
43 unsigned long shmmin;
44 unsigned long shmmni;
45 unsigned long shmseg;
46 unsigned long shmall;
47 unsigned long __unused1;
48 unsigned long __unused2;
49 unsigned long __unused3;
50 unsigned long __unused4;
51};
52
53#endif /* __ASM_SH64_SHMBUF_H */
diff --git a/include/asm-sh64/shmparam.h b/include/asm-sh64/shmparam.h
new file mode 100644
index 000000000000..d3a99a4dc0e3
--- /dev/null
+++ b/include/asm-sh64/shmparam.h
@@ -0,0 +1,20 @@
1#ifndef __ASM_SH64_SHMPARAM_H
2#define __ASM_SH64_SHMPARAM_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/shmparam.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <asm/cache.h>
16
17/* attach addr a multiple of this */
18#define SHMLBA (cpu_data->dcache.sets * L1_CACHE_BYTES)
19
20#endif /* __ASM_SH64_SHMPARAM_H */
diff --git a/include/asm-sh64/sigcontext.h b/include/asm-sh64/sigcontext.h
new file mode 100644
index 000000000000..6293509d8cc1
--- /dev/null
+++ b/include/asm-sh64/sigcontext.h
@@ -0,0 +1,30 @@
1#ifndef __ASM_SH64_SIGCONTEXT_H
2#define __ASM_SH64_SIGCONTEXT_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/sigcontext.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15struct sigcontext {
16 unsigned long oldmask;
17
18 /* CPU registers */
19 unsigned long long sc_regs[63];
20 unsigned long long sc_tregs[8];
21 unsigned long long sc_pc;
22 unsigned long long sc_sr;
23
24 /* FPU registers */
25 unsigned long long sc_fpregs[32];
26 unsigned int sc_fpscr;
27 unsigned int sc_fpvalid;
28};
29
30#endif /* __ASM_SH64_SIGCONTEXT_H */
diff --git a/include/asm-sh64/siginfo.h b/include/asm-sh64/siginfo.h
new file mode 100644
index 000000000000..56ef1da534d7
--- /dev/null
+++ b/include/asm-sh64/siginfo.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_SIGINFO_H
2#define __ASM_SH64_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif /* __ASM_SH64_SIGINFO_H */
diff --git a/include/asm-sh64/signal.h b/include/asm-sh64/signal.h
new file mode 100644
index 000000000000..77957e9b92d9
--- /dev/null
+++ b/include/asm-sh64/signal.h
@@ -0,0 +1,185 @@
1#ifndef __ASM_SH64_SIGNAL_H
2#define __ASM_SH64_SIGNAL_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/signal.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <linux/types.h>
16#include <asm/processor.h>
17
18/* Avoid too many header ordering problems. */
19struct siginfo;
20
21#define _NSIG 64
22#define _NSIG_BPW 32
23#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
24
25typedef unsigned long old_sigset_t; /* at least 32 bits */
26
27typedef struct {
28 unsigned long sig[_NSIG_WORDS];
29} sigset_t;
30
31#define SIGHUP 1
32#define SIGINT 2
33#define SIGQUIT 3
34#define SIGILL 4
35#define SIGTRAP 5
36#define SIGABRT 6
37#define SIGIOT 6
38#define SIGBUS 7
39#define SIGFPE 8
40#define SIGKILL 9
41#define SIGUSR1 10
42#define SIGSEGV 11
43#define SIGUSR2 12
44#define SIGPIPE 13
45#define SIGALRM 14
46#define SIGTERM 15
47#define SIGSTKFLT 16
48#define SIGCHLD 17
49#define SIGCONT 18
50#define SIGSTOP 19
51#define SIGTSTP 20
52#define SIGTTIN 21
53#define SIGTTOU 22
54#define SIGURG 23
55#define SIGXCPU 24
56#define SIGXFSZ 25
57#define SIGVTALRM 26
58#define SIGPROF 27
59#define SIGWINCH 28
60#define SIGIO 29
61#define SIGPOLL SIGIO
62/*
63#define SIGLOST 29
64*/
65#define SIGPWR 30
66#define SIGSYS 31
67#define SIGUNUSED 31
68
69/* These should not be considered constants from userland. */
70#define SIGRTMIN 32
71#define SIGRTMAX (_NSIG-1)
72
73/*
74 * SA_FLAGS values:
75 *
76 * SA_ONSTACK indicates that a registered stack_t will be used.
77 * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
78 * SA_RESTART flag to get restarting signals (which were the default long ago)
79 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
80 * SA_RESETHAND clears the handler when the signal is delivered.
81 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
82 * SA_NODEFER prevents the current signal from being masked in the handler.
83 *
84 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
85 * Unix names RESETHAND and NODEFER respectively.
86 */
87#define SA_NOCLDSTOP 0x00000001
88#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
89#define SA_SIGINFO 0x00000004
90#define SA_ONSTACK 0x08000000
91#define SA_RESTART 0x10000000
92#define SA_NODEFER 0x40000000
93#define SA_RESETHAND 0x80000000
94
95#define SA_NOMASK SA_NODEFER
96#define SA_ONESHOT SA_RESETHAND
97#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
98
99#define SA_RESTORER 0x04000000
100
101/*
102 * sigaltstack controls
103 */
104#define SS_ONSTACK 1
105#define SS_DISABLE 2
106
107#define MINSIGSTKSZ 2048
108#define SIGSTKSZ THREAD_SIZE
109
110#ifdef __KERNEL__
111
112/*
113 * These values of sa_flags are used only by the kernel as part of the
114 * irq handling routines.
115 *
116 * SA_INTERRUPT is also used by the irq handling routines.
117 * SA_SHIRQ is for shared interrupt support on PCI and EISA.
118 */
119#define SA_PROBE SA_ONESHOT
120#define SA_SAMPLE_RANDOM SA_RESTART
121#define SA_SHIRQ 0x04000000
122#endif
123
124#define SIG_BLOCK 0 /* for blocking signals */
125#define SIG_UNBLOCK 1 /* for unblocking signals */
126#define SIG_SETMASK 2 /* for setting the signal mask */
127
128/* Type of a signal handler. */
129typedef void (*__sighandler_t)(int);
130
131#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
132#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
133#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
134
135#ifdef __KERNEL__
136struct old_sigaction {
137 __sighandler_t sa_handler;
138 old_sigset_t sa_mask;
139 unsigned long sa_flags;
140 void (*sa_restorer)(void);
141};
142
143struct sigaction {
144 __sighandler_t sa_handler;
145 unsigned long sa_flags;
146 void (*sa_restorer)(void);
147 sigset_t sa_mask; /* mask last for extensibility */
148};
149
150struct k_sigaction {
151 struct sigaction sa;
152};
153#else
154/* Here we must cater to libcs that poke about in kernel headers. */
155
156struct sigaction {
157 union {
158 __sighandler_t _sa_handler;
159 void (*_sa_sigaction)(int, struct siginfo *, void *);
160 } _u;
161 sigset_t sa_mask;
162 unsigned long sa_flags;
163 void (*sa_restorer)(void);
164};
165
166#define sa_handler _u._sa_handler
167#define sa_sigaction _u._sa_sigaction
168
169#endif /* __KERNEL__ */
170
171typedef struct sigaltstack {
172 void *ss_sp;
173 int ss_flags;
174 size_t ss_size;
175} stack_t;
176
177#ifdef __KERNEL__
178#include <asm/sigcontext.h>
179
180#define sigmask(sig) (1UL << ((sig) - 1))
181#define ptrace_signal_deliver(regs, cookie) do { } while (0)
182
183#endif /* __KERNEL__ */
184
185#endif /* __ASM_SH64_SIGNAL_H */
diff --git a/include/asm-sh64/smp.h b/include/asm-sh64/smp.h
new file mode 100644
index 000000000000..4a4d0da39a84
--- /dev/null
+++ b/include/asm-sh64/smp.h
@@ -0,0 +1,15 @@
1#ifndef __ASM_SH64_SMP_H
2#define __ASM_SH64_SMP_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/smp.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#endif /* __ASM_SH64_SMP_H */
diff --git a/include/asm-sh64/socket.h b/include/asm-sh64/socket.h
new file mode 100644
index 000000000000..1853f7246ab0
--- /dev/null
+++ b/include/asm-sh64/socket.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_SOCKET_H
2#define __ASM_SH64_SOCKET_H
3
4#include <asm-sh/socket.h>
5
6#endif /* __ASM_SH64_SOCKET_H */
diff --git a/include/asm-sh64/sockios.h b/include/asm-sh64/sockios.h
new file mode 100644
index 000000000000..1ae23ae82977
--- /dev/null
+++ b/include/asm-sh64/sockios.h
@@ -0,0 +1,24 @@
1#ifndef __ASM_SH64_SOCKIOS_H
2#define __ASM_SH64_SOCKIOS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/sockios.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15/* Socket-level I/O control calls. */
16#define FIOGETOWN _IOR('f', 123, int)
17#define FIOSETOWN _IOW('f', 124, int)
18
19#define SIOCATMARK _IOR('s', 7, int)
20#define SIOCSPGRP _IOW('s', 8, pid_t)
21#define SIOCGPGRP _IOR('s', 9, pid_t)
22
23#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp - linux-specific */
24#endif /* __ASM_SH64_SOCKIOS_H */
diff --git a/include/asm-sh64/spinlock.h b/include/asm-sh64/spinlock.h
new file mode 100644
index 000000000000..296b0c9b24a2
--- /dev/null
+++ b/include/asm-sh64/spinlock.h
@@ -0,0 +1,17 @@
1#ifndef __ASM_SH64_SPINLOCK_H
2#define __ASM_SH64_SPINLOCK_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/spinlock.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#error "No SMP on SH64"
16
17#endif /* __ASM_SH64_SPINLOCK_H */
diff --git a/include/asm-sh64/stat.h b/include/asm-sh64/stat.h
new file mode 100644
index 000000000000..86f551b1987e
--- /dev/null
+++ b/include/asm-sh64/stat.h
@@ -0,0 +1,88 @@
1#ifndef __ASM_SH64_STAT_H
2#define __ASM_SH64_STAT_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/stat.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15struct __old_kernel_stat {
16 unsigned short st_dev;
17 unsigned short st_ino;
18 unsigned short st_mode;
19 unsigned short st_nlink;
20 unsigned short st_uid;
21 unsigned short st_gid;
22 unsigned short st_rdev;
23 unsigned long st_size;
24 unsigned long st_atime;
25 unsigned long st_mtime;
26 unsigned long st_ctime;
27};
28
29struct stat {
30 unsigned short st_dev;
31 unsigned short __pad1;
32 unsigned long st_ino;
33 unsigned short st_mode;
34 unsigned short st_nlink;
35 unsigned short st_uid;
36 unsigned short st_gid;
37 unsigned short st_rdev;
38 unsigned short __pad2;
39 unsigned long st_size;
40 unsigned long st_blksize;
41 unsigned long st_blocks;
42 unsigned long st_atime;
43 unsigned long st_atime_nsec;
44 unsigned long st_mtime;
45 unsigned long st_mtime_nsec;
46 unsigned long st_ctime;
47 unsigned long st_ctime_nsec;
48 unsigned long __unused4;
49 unsigned long __unused5;
50};
51
52/* This matches struct stat64 in glibc2.1, hence the absolutely
53 * insane amounts of padding around dev_t's.
54 */
55struct stat64 {
56 unsigned short st_dev;
57 unsigned char __pad0[10];
58
59 unsigned long st_ino;
60 unsigned int st_mode;
61 unsigned int st_nlink;
62
63 unsigned long st_uid;
64 unsigned long st_gid;
65
66 unsigned short st_rdev;
67 unsigned char __pad3[10];
68
69 long long st_size;
70 unsigned long st_blksize;
71
72 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
73 unsigned long __pad4; /* future possible st_blocks high bits */
74
75 unsigned long st_atime;
76 unsigned long st_atime_nsec;
77
78 unsigned long st_mtime;
79 unsigned long st_mtime_nsec;
80
81 unsigned long st_ctime;
82 unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
83
84 unsigned long __unused1;
85 unsigned long __unused2;
86};
87
88#endif /* __ASM_SH64_STAT_H */
diff --git a/include/asm-sh64/statfs.h b/include/asm-sh64/statfs.h
new file mode 100644
index 000000000000..083fd79b2417
--- /dev/null
+++ b/include/asm-sh64/statfs.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_STATFS_H
2#define __ASM_SH64_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif /* __ASM_SH64_STATFS_H */
diff --git a/include/asm-sh64/string.h b/include/asm-sh64/string.h
new file mode 100644
index 000000000000..8a7357366ce8
--- /dev/null
+++ b/include/asm-sh64/string.h
@@ -0,0 +1,21 @@
1#ifndef __ASM_SH64_STRING_H
2#define __ASM_SH64_STRING_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/string.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 * Empty on purpose. ARCH SH64 ASM libs are out of the current project scope.
14 *
15 */
16
17#define __HAVE_ARCH_MEMCPY
18
19extern void *memcpy(void *dest, const void *src, size_t count);
20
21#endif
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
new file mode 100644
index 000000000000..42510e496eb5
--- /dev/null
+++ b/include/asm-sh64/system.h
@@ -0,0 +1,195 @@
1#ifndef __ASM_SH64_SYSTEM_H
2#define __ASM_SH64_SYSTEM_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/system.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 * Copyright (C) 2004 Richard Curnow
14 *
15 */
16
17#include <linux/config.h>
18#include <asm/registers.h>
19#include <asm/processor.h>
20
21/*
22 * switch_to() should switch tasks to task nr n, first
23 */
24
25typedef struct {
26 unsigned long seg;
27} mm_segment_t;
28
29extern struct task_struct *sh64_switch_to(struct task_struct *prev,
30 struct thread_struct *prev_thread,
31 struct task_struct *next,
32 struct thread_struct *next_thread);
33
34#define switch_to(prev,next,last) \
35 do {\
36 if (last_task_used_math != next) {\
37 struct pt_regs *regs = next->thread.uregs;\
38 if (regs) regs->sr |= SR_FD;\
39 }\
40 last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
41 } while(0)
42
43#define nop() __asm__ __volatile__ ("nop")
44
45#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
46
47#define tas(ptr) (xchg((ptr), 1))
48
49extern void __xchg_called_with_bad_pointer(void);
50
51#define mb() __asm__ __volatile__ ("synco": : :"memory")
52#define rmb() mb()
53#define wmb() __asm__ __volatile__ ("synco": : :"memory")
54#define read_barrier_depends() do { } while (0)
55
56#ifdef CONFIG_SMP
57#define smp_mb() mb()
58#define smp_rmb() rmb()
59#define smp_wmb() wmb()
60#define smp_read_barrier_depends() read_barrier_depends()
61#else
62#define smp_mb() barrier()
63#define smp_rmb() barrier()
64#define smp_wmb() barrier()
65#define smp_read_barrier_depends() do { } while (0)
66#endif /* CONFIG_SMP */
67
68#define set_rmb(var, value) do { xchg(&var, value); } while (0)
69#define set_mb(var, value) set_rmb(var, value)
70#define set_wmb(var, value) do { var = value; wmb(); } while (0)
71
72/* Interrupt Control */
73#ifndef HARD_CLI
74#define SR_MASK_L 0x000000f0L
75#define SR_MASK_LL 0x00000000000000f0LL
76#else
77#define SR_MASK_L 0x10000000L
78#define SR_MASK_LL 0x0000000010000000LL
79#endif
80
81static __inline__ void local_irq_enable(void)
82{
83 /* cli/sti based on SR.BL */
84 unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
85
86 __asm__ __volatile__("getcon " __SR ", %0\n\t"
87 "and %0, %1, %0\n\t"
88 "putcon %0, " __SR "\n\t"
89 : "=&r" (__dummy0)
90 : "r" (__dummy1));
91}
92
93static __inline__ void local_irq_disable(void)
94{
95 /* cli/sti based on SR.BL */
96 unsigned long long __dummy0, __dummy1=SR_MASK_LL;
97 __asm__ __volatile__("getcon " __SR ", %0\n\t"
98 "or %0, %1, %0\n\t"
99 "putcon %0, " __SR "\n\t"
100 : "=&r" (__dummy0)
101 : "r" (__dummy1));
102}
103
104#define local_save_flags(x) \
105(__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
106 __asm__ __volatile__( \
107 "getcon " __SR ", %0\n\t" \
108 "and %0, %1, %0" \
109 : "=&r" (x) \
110 : "r" (__dummy));}))
111
112#define local_irq_save(x) \
113(__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
114 __asm__ __volatile__( \
115 "getcon " __SR ", %1\n\t" \
116 "or %1, r63, %0\n\t" \
117 "or %1, %2, %1\n\t" \
118 "putcon %1, " __SR "\n\t" \
119 "and %0, %2, %0" \
120 : "=&r" (x), "=&r" (__d1) \
121 : "r" (__d2));}));
122
123#define local_irq_restore(x) do { \
124 if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
125 local_irq_enable(); /* yes...re-enable */ \
126} while (0)
127
128#define irqs_disabled() \
129({ \
130 unsigned long flags; \
131 local_save_flags(flags); \
132 (flags != 0); \
133})
134
135extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
136{
137 unsigned long flags, retval;
138
139 local_irq_save(flags);
140 retval = *m;
141 *m = val;
142 local_irq_restore(flags);
143 return retval;
144}
145
146extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
147{
148 unsigned long flags, retval;
149
150 local_irq_save(flags);
151 retval = *m;
152 *m = val & 0xff;
153 local_irq_restore(flags);
154 return retval;
155}
156
157static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
158{
159 switch (size) {
160 case 4:
161 return xchg_u32(ptr, x);
162 break;
163 case 1:
164 return xchg_u8(ptr, x);
165 break;
166 }
167 __xchg_called_with_bad_pointer();
168 return x;
169}
170
171/* XXX
172 * disable hlt during certain critical i/o operations
173 */
174#define HAVE_DISABLE_HLT
175void disable_hlt(void);
176void enable_hlt(void);
177
178
179#define smp_mb() barrier()
180#define smp_rmb() barrier()
181#define smp_wmb() barrier()
182
183#ifdef CONFIG_SH_ALPHANUMERIC
184/* This is only used for debugging. */
185extern void print_seg(char *file,int line);
186#define PLS() print_seg(__FILE__,__LINE__)
187#else /* CONFIG_SH_ALPHANUMERIC */
188#define PLS()
189#endif /* CONFIG_SH_ALPHANUMERIC */
190
191#define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
192
193#define arch_align_stack(x) (x)
194
195#endif /* __ASM_SH64_SYSTEM_H */
diff --git a/include/asm-sh64/termbits.h b/include/asm-sh64/termbits.h
new file mode 100644
index 000000000000..86bde5ec1414
--- /dev/null
+++ b/include/asm-sh64/termbits.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_TERMBITS_H
2#define __ASM_SH64_TERMBITS_H
3
4#include <asm-sh/termbits.h>
5
6#endif /* __ASM_SH64_TERMBITS_H */
diff --git a/include/asm-sh64/termios.h b/include/asm-sh64/termios.h
new file mode 100644
index 000000000000..4a9c7fb411bc
--- /dev/null
+++ b/include/asm-sh64/termios.h
@@ -0,0 +1,117 @@
1#ifndef __ASM_SH64_TERMIOS_H
2#define __ASM_SH64_TERMIOS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/termios.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <asm/termbits.h>
16#include <asm/ioctls.h>
17
18struct winsize {
19 unsigned short ws_row;
20 unsigned short ws_col;
21 unsigned short ws_xpixel;
22 unsigned short ws_ypixel;
23};
24
25#define NCC 8
26struct termio {
27 unsigned short c_iflag; /* input mode flags */
28 unsigned short c_oflag; /* output mode flags */
29 unsigned short c_cflag; /* control mode flags */
30 unsigned short c_lflag; /* local mode flags */
31 unsigned char c_line; /* line discipline */
32 unsigned char c_cc[NCC]; /* control characters */
33};
34
35/* modem lines */
36#define TIOCM_LE 0x001
37#define TIOCM_DTR 0x002
38#define TIOCM_RTS 0x004
39#define TIOCM_ST 0x008
40#define TIOCM_SR 0x010
41#define TIOCM_CTS 0x020
42#define TIOCM_CAR 0x040
43#define TIOCM_RNG 0x080
44#define TIOCM_DSR 0x100
45#define TIOCM_CD TIOCM_CAR
46#define TIOCM_RI TIOCM_RNG
47#define TIOCM_OUT1 0x2000
48#define TIOCM_OUT2 0x4000
49#define TIOCM_LOOP 0x8000
50
51/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
52
53/* line disciplines */
54#define N_TTY 0
55#define N_SLIP 1
56#define N_MOUSE 2
57#define N_PPP 3
58#define N_STRIP 4
59#define N_AX25 5
60#define N_X25 6 /* X.25 async */
61#define N_6PACK 7
62#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
63#define N_R3964 9 /* Reserved for Simatic R3964 module */
64#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
65#define N_IRDA 11 /* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */
66#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
67#define N_HDLC 13 /* synchronous HDLC */
68#define N_SYNC_PPP 14
69#define N_HCI 15 /* Bluetooth HCI UART */
70
71#ifdef __KERNEL__
72
73/* intr=^C quit=^\ erase=del kill=^U
74 eof=^D vtime=\0 vmin=\1 sxtc=\0
75 start=^Q stop=^S susp=^Z eol=\0
76 reprint=^R discard=^U werase=^W lnext=^V
77 eol2=\0
78*/
79#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
80
81/*
82 * Translate a "termio" structure into a "termios". Ugh.
83 */
84#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
85 unsigned short __tmp; \
86 get_user(__tmp,&(termio)->x); \
87 *(unsigned short *) &(termios)->x = __tmp; \
88}
89
90#define user_termio_to_kernel_termios(termios, termio) \
91({ \
92 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
93 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
94 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
95 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
96 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
97})
98
99/*
100 * Translate a "termios" structure into a "termio". Ugh.
101 */
102#define kernel_termios_to_user_termio(termio, termios) \
103({ \
104 put_user((termios)->c_iflag, &(termio)->c_iflag); \
105 put_user((termios)->c_oflag, &(termio)->c_oflag); \
106 put_user((termios)->c_cflag, &(termio)->c_cflag); \
107 put_user((termios)->c_lflag, &(termio)->c_lflag); \
108 put_user((termios)->c_line, &(termio)->c_line); \
109 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
110})
111
112#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
113#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
114
115#endif /* __KERNEL__ */
116
117#endif /* __ASM_SH64_TERMIOS_H */
diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h
new file mode 100644
index 000000000000..e65f394da472
--- /dev/null
+++ b/include/asm-sh64/thread_info.h
@@ -0,0 +1,87 @@
1#ifndef __ASM_SH64_THREAD_INFO_H
2#define __ASM_SH64_THREAD_INFO_H
3
4/*
5 * SuperH 5 version
6 * Copyright (C) 2003 Paul Mundt
7 */
8
9#ifdef __KERNEL__
10
11#ifndef __ASSEMBLY__
12#include <asm/registers.h>
13
14/*
15 * low level task data that entry.S needs immediate access to
16 * - this struct should fit entirely inside of one cache line
17 * - this struct shares the supervisor stack pages
18 * - if the contents of this structure are changed, the assembly constants must also be changed
19 */
20struct thread_info {
21 struct task_struct *task; /* main task structure */
22 struct exec_domain *exec_domain; /* execution domain */
23 unsigned long flags; /* low level flags */
24 /* Put the 4 32-bit fields together to make asm offsetting easier. */
25 __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
26 __u16 cpu;
27
28 mm_segment_t addr_limit;
29 struct restart_block restart_block;
30
31 __u8 supervisor_stack[0];
32};
33
34/*
35 * macros/functions for gaining access to the thread information structure
36 */
37#define INIT_THREAD_INFO(tsk) \
38{ \
39 .task = &tsk, \
40 .exec_domain = &default_exec_domain, \
41 .flags = 0, \
42 .cpu = 0, \
43 .preempt_count = 1, \
44 .addr_limit = KERNEL_DS, \
45 .restart_block = { \
46 .fn = do_no_restart_syscall, \
47 }, \
48}
49
50#define init_thread_info (init_thread_union.thread_info)
51#define init_stack (init_thread_union.stack)
52
53/* how to get the thread information struct from C */
54static inline struct thread_info *current_thread_info(void)
55{
56 struct thread_info *ti;
57
58 __asm__ __volatile__ ("getcon " __KCR0 ", %0\n\t" : "=r" (ti));
59
60 return ti;
61}
62
63/* thread information allocation */
64
65
66
67#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
68#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
69#define get_thread_info(ti) get_task_struct((ti)->task)
70#define put_thread_info(ti) put_task_struct((ti)->task)
71
72#endif /* __ASSEMBLY__ */
73
74#define THREAD_SIZE 8192
75
76#define PREEMPT_ACTIVE 0x4000000
77
78/* thread information flags */
79#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
80#define TIF_SIGPENDING 2 /* signal pending */
81#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
82#define TIF_MEMDIE 4
83
84
85#endif /* __KERNEL__ */
86
87#endif /* __ASM_SH64_THREAD_INFO_H */
diff --git a/include/asm-sh64/timex.h b/include/asm-sh64/timex.h
new file mode 100644
index 000000000000..af0b79269661
--- /dev/null
+++ b/include/asm-sh64/timex.h
@@ -0,0 +1,34 @@
1#ifndef __ASM_SH64_TIMEX_H
2#define __ASM_SH64_TIMEX_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/timex.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 * sh-5 architecture timex specifications
15 *
16 */
17
18#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
19#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
20#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
21 (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
22 << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
23
24typedef unsigned long cycles_t;
25
26static __inline__ cycles_t get_cycles (void)
27{
28 return 0;
29}
30
31#define vxtime_lock() do {} while (0)
32#define vxtime_unlock() do {} while (0)
33
34#endif /* __ASM_SH64_TIMEX_H */
diff --git a/include/asm-sh64/tlb.h b/include/asm-sh64/tlb.h
new file mode 100644
index 000000000000..4979408bd88c
--- /dev/null
+++ b/include/asm-sh64/tlb.h
@@ -0,0 +1,92 @@
1/*
2 * include/asm-sh64/tlb.h
3 *
4 * Copyright (C) 2003 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 */
11#ifndef __ASM_SH64_TLB_H
12#define __ASM_SH64_TLB_H
13
14/*
15 * Note! These are mostly unused, we just need the xTLB_LAST_VAR_UNRESTRICTED
16 * for head.S! Once this limitation is gone, we can clean the rest of this up.
17 */
18
19/* ITLB defines */
20#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
21#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
22
23/* DTLB defines */
24#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
25#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
26
27#ifndef __ASSEMBLY__
28
29/**
30 * for_each_dtlb_entry
31 *
32 * @tlb: TLB entry
33 *
34 * Iterate over free (non-wired) DTLB entries
35 */
36#define for_each_dtlb_entry(tlb) \
37 for (tlb = cpu_data->dtlb.first; \
38 tlb <= cpu_data->dtlb.last; \
39 tlb += cpu_data->dtlb.step)
40
41/**
42 * for_each_itlb_entry
43 *
44 * @tlb: TLB entry
45 *
46 * Iterate over free (non-wired) ITLB entries
47 */
48#define for_each_itlb_entry(tlb) \
49 for (tlb = cpu_data->itlb.first; \
50 tlb <= cpu_data->itlb.last; \
51 tlb += cpu_data->itlb.step)
52
53/**
54 * __flush_tlb_slot
55 *
56 * @slot: Address of TLB slot.
57 *
58 * Flushes TLB slot @slot.
59 */
60static inline void __flush_tlb_slot(unsigned long long slot)
61{
62 __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
63}
64
65/* arch/sh64/mm/tlb.c */
66extern int sh64_tlb_init(void);
67extern unsigned long long sh64_next_free_dtlb_entry(void);
68extern unsigned long long sh64_get_wired_dtlb_entry(void);
69extern int sh64_put_wired_dtlb_entry(unsigned long long entry);
70
71extern void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr);
72extern void sh64_teardown_tlb_slot(unsigned long long config_addr);
73
74#define tlb_start_vma(tlb, vma) \
75 flush_cache_range(vma, vma->vm_start, vma->vm_end)
76
77#define tlb_end_vma(tlb, vma) \
78 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
79
80#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
81
82/*
83 * Flush whole TLBs for MM
84 */
85#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
86
87#include <asm-generic/tlb.h>
88
89#endif /* __ASSEMBLY__ */
90
91#endif /* __ASM_SH64_TLB_H */
92
diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h
new file mode 100644
index 000000000000..15c0719eecc3
--- /dev/null
+++ b/include/asm-sh64/tlbflush.h
@@ -0,0 +1,31 @@
1#ifndef __ASM_SH64_TLBFLUSH_H
2#define __ASM_SH64_TLBFLUSH_H
3
4#include <asm/pgalloc.h>
5
6/*
7 * TLB flushing:
8 *
9 * - flush_tlb() flushes the current mm struct TLBs
10 * - flush_tlb_all() flushes all processes TLBs
11 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
12 * - flush_tlb_page(vma, vmaddr) flushes one page
13 * - flush_tlb_range(mm, start, end) flushes a range of pages
14 *
15 */
16
17extern void flush_tlb(void);
18extern void flush_tlb_all(void);
19extern void flush_tlb_mm(struct mm_struct *mm);
20extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end);
22extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
23extern inline void flush_tlb_pgtables(struct mm_struct *mm,
24 unsigned long start, unsigned long end)
25{
26}
27
28extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
29
30#endif /* __ASM_SH64_TLBFLUSH_H */
31
diff --git a/include/asm-sh64/topology.h b/include/asm-sh64/topology.h
new file mode 100644
index 000000000000..34211787345f
--- /dev/null
+++ b/include/asm-sh64/topology.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH64_TOPOLOGY_H
2#define __ASM_SH64_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif /* __ASM_SH64_TOPOLOGY_H */
diff --git a/include/asm-sh64/types.h b/include/asm-sh64/types.h
new file mode 100644
index 000000000000..41d4d2f82aa9
--- /dev/null
+++ b/include/asm-sh64/types.h
@@ -0,0 +1,76 @@
1#ifndef __ASM_SH64_TYPES_H
2#define __ASM_SH64_TYPES_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/types.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#ifndef __ASSEMBLY__
16
17typedef unsigned short umode_t;
18
19/*
20 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
21 * header files exported to user space
22 */
23
24typedef __signed__ char __s8;
25typedef unsigned char __u8;
26
27typedef __signed__ short __s16;
28typedef unsigned short __u16;
29
30typedef __signed__ int __s32;
31typedef unsigned int __u32;
32
33#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
34typedef __signed__ long long __s64;
35typedef unsigned long long __u64;
36#endif
37
38#endif /* __ASSEMBLY__ */
39
40/*
41 * These aren't exported outside the kernel to avoid name space clashes
42 */
43#ifdef __KERNEL__
44
45#ifndef __ASSEMBLY__
46
47typedef __signed__ char s8;
48typedef unsigned char u8;
49
50typedef __signed__ short s16;
51typedef unsigned short u16;
52
53typedef __signed__ int s32;
54typedef unsigned int u32;
55
56typedef __signed__ long long s64;
57typedef unsigned long long u64;
58
59/* DMA addresses come in generic and 64-bit flavours. */
60
61#ifdef CONFIG_HIGHMEM64G
62typedef u64 dma_addr_t;
63#else
64typedef u32 dma_addr_t;
65#endif
66typedef u64 dma64_addr_t;
67
68typedef unsigned int kmem_bufctl_t;
69
70#endif /* __ASSEMBLY__ */
71
72#define BITS_PER_LONG 32
73
74#endif /* __KERNEL__ */
75
76#endif /* __ASM_SH64_TYPES_H */
diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh64/uaccess.h
new file mode 100644
index 000000000000..a33654d576a1
--- /dev/null
+++ b/include/asm-sh64/uaccess.h
@@ -0,0 +1,327 @@
1#ifndef __ASM_SH64_UACCESS_H
2#define __ASM_SH64_UACCESS_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/uaccess.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003, 2004 Paul Mundt
13 *
14 * User space memory access functions
15 *
16 * Copyright (C) 1999 Niibe Yutaka
17 *
18 * Based on:
19 * MIPS implementation version 1.15 by
20 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
21 * and i386 version.
22 *
23 */
24
25#include <linux/errno.h>
26#include <linux/sched.h>
27
28#define VERIFY_READ 0
29#define VERIFY_WRITE 1
30
31/*
32 * The fs value determines whether argument validity checking should be
33 * performed or not. If get_fs() == USER_DS, checking is performed, with
34 * get_fs() == KERNEL_DS, checking is bypassed.
35 *
36 * For historical reasons (Data Segment Register?), these macros are misnamed.
37 */
38
39#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
40
41#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
42#define USER_DS MAKE_MM_SEG(0x80000000)
43
44#define get_ds() (KERNEL_DS)
45#define get_fs() (current_thread_info()->addr_limit)
46#define set_fs(x) (current_thread_info()->addr_limit=(x))
47
48#define segment_eq(a,b) ((a).seg == (b).seg)
49
50#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
51
52/*
53 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
54 *
55 * sum := addr + size; carry? --> flag = true;
56 * if (sum >= addr_limit) flag = true;
57 */
58#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1)
59
60#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
61#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
62
63/* this function will go away soon - use access_ok() instead */
64extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
65{
66 return access_ok(type,addr,size) ? 0 : -EFAULT;
67}
68
69/*
70 * Uh, these should become the main single-value transfer routines ...
71 * They automatically use the right size if we just have the right
72 * pointer type ...
73 *
74 * As MIPS uses the same address space for kernel and user data, we
75 * can just do these as direct assignments.
76 *
77 * Careful to not
78 * (a) re-use the arguments for side effects (sizeof is ok)
79 * (b) require any knowledge of processes at this stage
80 */
81#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
82#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
83
84/*
85 * The "__xxx" versions do not do address space checking, useful when
86 * doing multiple accesses to the same area (the user has to do the
87 * checks by hand with "access_ok()")
88 */
89#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
90#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
91
92/*
93 * The "xxx_ret" versions return constant specified in third argument, if
94 * something bad happens. These macros can be optimized for the
95 * case of just returning from the function xxx_ret is used.
96 */
97
98#define put_user_ret(x,ptr,ret) ({ \
99if (put_user(x,ptr)) return ret; })
100
101#define get_user_ret(x,ptr,ret) ({ \
102if (get_user(x,ptr)) return ret; })
103
104#define __put_user_ret(x,ptr,ret) ({ \
105if (__put_user(x,ptr)) return ret; })
106
107#define __get_user_ret(x,ptr,ret) ({ \
108if (__get_user(x,ptr)) return ret; })
109
110struct __large_struct { unsigned long buf[100]; };
111#define __m(x) (*(struct __large_struct *)(x))
112
113#define __get_user_size(x,ptr,size,retval) \
114do { \
115 retval = 0; \
116 switch (size) { \
117 case 1: \
118 retval = __get_user_asm_b(x, ptr); \
119 break; \
120 case 2: \
121 retval = __get_user_asm_w(x, ptr); \
122 break; \
123 case 4: \
124 retval = __get_user_asm_l(x, ptr); \
125 break; \
126 case 8: \
127 retval = __get_user_asm_q(x, ptr); \
128 break; \
129 default: \
130 __get_user_unknown(); \
131 break; \
132 } \
133} while (0)
134
135#define __get_user_nocheck(x,ptr,size) \
136({ \
137 long __gu_addr = (long)(ptr); \
138 long __gu_err; \
139 __typeof(*(ptr)) __gu_val; \
140 __asm__ ("":"=r" (__gu_val)); \
141 __asm__ ("":"=r" (__gu_err)); \
142 __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \
143 (x) = (__typeof__(*(ptr))) __gu_val; \
144 __gu_err; \
145})
146
147#define __get_user_check(x,ptr,size) \
148({ \
149 long __gu_addr = (long)(ptr); \
150 long __gu_err = -EFAULT; \
151 __typeof(*(ptr)) __gu_val; \
152 __asm__ ("":"=r" (__gu_val)); \
153 __asm__ ("":"=r" (__gu_err)); \
154 if (__access_ok(__gu_addr, (size))) \
155 __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \
156 (x) = (__typeof__(*(ptr))) __gu_val; \
157 __gu_err; \
158})
159
160extern long __get_user_asm_b(void *, long);
161extern long __get_user_asm_w(void *, long);
162extern long __get_user_asm_l(void *, long);
163extern long __get_user_asm_q(void *, long);
164extern void __get_user_unknown(void);
165
166#define __put_user_size(x,ptr,size,retval) \
167do { \
168 retval = 0; \
169 switch (size) { \
170 case 1: \
171 retval = __put_user_asm_b(x, ptr); \
172 break; \
173 case 2: \
174 retval = __put_user_asm_w(x, ptr); \
175 break; \
176 case 4: \
177 retval = __put_user_asm_l(x, ptr); \
178 break; \
179 case 8: \
180 retval = __put_user_asm_q(x, ptr); \
181 break; \
182 default: \
183 __put_user_unknown(); \
184 } \
185} while (0)
186
187#define __put_user_nocheck(x,ptr,size) \
188({ \
189 long __pu_err; \
190 __typeof__(*(ptr)) __pu_val = (x); \
191 __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \
192 __pu_err; \
193})
194
195#define __put_user_check(x,ptr,size) \
196({ \
197 long __pu_err = -EFAULT; \
198 long __pu_addr = (long)(ptr); \
199 __typeof__(*(ptr)) __pu_val = (x); \
200 \
201 if (__access_ok(__pu_addr, (size))) \
202 __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\
203 __pu_err; \
204})
205
206extern long __put_user_asm_b(void *, long);
207extern long __put_user_asm_w(void *, long);
208extern long __put_user_asm_l(void *, long);
209extern long __put_user_asm_q(void *, long);
210extern void __put_user_unknown(void);
211
212
213/* Generic arbitrary sized copy. */
214/* Return the number of bytes NOT copied */
215/* XXX: should be such that: 4byte and the rest. */
216extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
217
218#define copy_to_user(to,from,n) ({ \
219void *__copy_to = (void *) (to); \
220__kernel_size_t __copy_size = (__kernel_size_t) (n); \
221__kernel_size_t __copy_res; \
222if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
223__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
224} else __copy_res = __copy_size; \
225__copy_res; })
226
227#define copy_to_user_ret(to,from,n,retval) ({ \
228if (copy_to_user(to,from,n)) \
229 return retval; \
230})
231
232#define __copy_to_user(to,from,n) \
233 __copy_user((void *)(to), \
234 (void *)(from), n)
235
236#define __copy_to_user_ret(to,from,n,retval) ({ \
237if (__copy_to_user(to,from,n)) \
238 return retval; \
239})
240
241#define copy_from_user(to,from,n) ({ \
242void *__copy_to = (void *) (to); \
243void *__copy_from = (void *) (from); \
244__kernel_size_t __copy_size = (__kernel_size_t) (n); \
245__kernel_size_t __copy_res; \
246if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
247__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
248} else __copy_res = __copy_size; \
249__copy_res; })
250
251#define copy_from_user_ret(to,from,n,retval) ({ \
252if (copy_from_user(to,from,n)) \
253 return retval; \
254})
255
256#define __copy_from_user(to,from,n) \
257 __copy_user((void *)(to), \
258 (void *)(from), n)
259
260#define __copy_from_user_ret(to,from,n,retval) ({ \
261if (__copy_from_user(to,from,n)) \
262 return retval; \
263})
264
265#define __copy_to_user_inatomic __copy_to_user
266#define __copy_from_user_inatomic __copy_from_user
267
268/* XXX: Not sure it works well..
269 should be such that: 4byte clear and the rest. */
270extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
271
272#define clear_user(addr,n) ({ \
273void * __cl_addr = (addr); \
274unsigned long __cl_size = (n); \
275if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
276__cl_size = __clear_user(__cl_addr, __cl_size); \
277__cl_size; })
278
279extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
280
281#define strncpy_from_user(dest,src,count) ({ \
282unsigned long __sfu_src = (unsigned long) (src); \
283int __sfu_count = (int) (count); \
284long __sfu_res = -EFAULT; \
285if(__access_ok(__sfu_src, __sfu_count)) { \
286__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
287} __sfu_res; })
288
289#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
290
291/*
292 * Return the size of a string (including the ending 0!)
293 */
294extern long __strnlen_user(const char *__s, long __n);
295
296extern __inline__ long strnlen_user(const char *s, long n)
297{
298 if (!__addr_ok(s))
299 return 0;
300 else
301 return __strnlen_user(s, n);
302}
303
304struct exception_table_entry
305{
306 unsigned long insn, fixup;
307};
308
309#define ARCH_HAS_SEARCH_EXTABLE
310
311/* If gcc inlines memset, it will use st.q instructions. Therefore, we need
312 kmalloc allocations to be 8-byte aligned. Without this, the alignment
313 becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on
314 sh64 at the moment). */
315#define ARCH_KMALLOC_MINALIGN 8
316
317/*
318 * We want 8-byte alignment for the slab caches as well, otherwise we have
319 * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create().
320 */
321#define ARCH_SLAB_MINALIGN 8
322
323/* Returns 0 if exception not found and fixup.unit otherwise. */
324extern unsigned long search_exception_table(unsigned long addr);
325extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
326
327#endif /* __ASM_SH64_UACCESS_H */
diff --git a/include/asm-sh64/ucontext.h b/include/asm-sh64/ucontext.h
new file mode 100644
index 000000000000..cf77a08551ca
--- /dev/null
+++ b/include/asm-sh64/ucontext.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_SH64_UCONTEXT_H
2#define __ASM_SH64_UCONTEXT_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/ucontext.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15struct ucontext {
16 unsigned long uc_flags;
17 struct ucontext *uc_link;
18 stack_t uc_stack;
19 struct sigcontext uc_mcontext;
20 sigset_t uc_sigmask; /* mask last for extensibility */
21};
22
23#endif /* __ASM_SH64_UCONTEXT_H */
diff --git a/include/asm-sh64/unaligned.h b/include/asm-sh64/unaligned.h
new file mode 100644
index 000000000000..74481b186ae8
--- /dev/null
+++ b/include/asm-sh64/unaligned.h
@@ -0,0 +1,17 @@
1#ifndef __ASM_SH64_UNALIGNED_H
2#define __ASM_SH64_UNALIGNED_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/unaligned.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <asm-generic/unaligned.h>
16
17#endif /* __ASM_SH64_UNALIGNED_H */
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
new file mode 100644
index 000000000000..95f0b130405c
--- /dev/null
+++ b/include/asm-sh64/unistd.h
@@ -0,0 +1,560 @@
1#ifndef __ASM_SH64_UNISTD_H
2#define __ASM_SH64_UNISTD_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/unistd.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 * Copyright (C) 2004 Sean McGoogan
14 *
15 * This file contains the system call numbers.
16 *
17 */
18
19#define __NR_setup 0 /* used only by init, to get system going */
20#define __NR_exit 1
21#define __NR_fork 2
22#define __NR_read 3
23#define __NR_write 4
24#define __NR_open 5
25#define __NR_close 6
26#define __NR_waitpid 7
27#define __NR_creat 8
28#define __NR_link 9
29#define __NR_unlink 10
30#define __NR_execve 11
31#define __NR_chdir 12
32#define __NR_time 13
33#define __NR_mknod 14
34#define __NR_chmod 15
35#define __NR_lchown 16
36#define __NR_break 17
37#define __NR_oldstat 18
38#define __NR_lseek 19
39#define __NR_getpid 20
40#define __NR_mount 21
41#define __NR_umount 22
42#define __NR_setuid 23
43#define __NR_getuid 24
44#define __NR_stime 25
45#define __NR_ptrace 26
46#define __NR_alarm 27
47#define __NR_oldfstat 28
48#define __NR_pause 29
49#define __NR_utime 30
50#define __NR_stty 31
51#define __NR_gtty 32
52#define __NR_access 33
53#define __NR_nice 34
54#define __NR_ftime 35
55#define __NR_sync 36
56#define __NR_kill 37
57#define __NR_rename 38
58#define __NR_mkdir 39
59#define __NR_rmdir 40
60#define __NR_dup 41
61#define __NR_pipe 42
62#define __NR_times 43
63#define __NR_prof 44
64#define __NR_brk 45
65#define __NR_setgid 46
66#define __NR_getgid 47
67#define __NR_signal 48
68#define __NR_geteuid 49
69#define __NR_getegid 50
70#define __NR_acct 51
71#define __NR_umount2 52
72#define __NR_lock 53
73#define __NR_ioctl 54
74#define __NR_fcntl 55
75#define __NR_mpx 56
76#define __NR_setpgid 57
77#define __NR_ulimit 58
78#define __NR_oldolduname 59
79#define __NR_umask 60
80#define __NR_chroot 61
81#define __NR_ustat 62
82#define __NR_dup2 63
83#define __NR_getppid 64
84#define __NR_getpgrp 65
85#define __NR_setsid 66
86#define __NR_sigaction 67
87#define __NR_sgetmask 68
88#define __NR_ssetmask 69
89#define __NR_setreuid 70
90#define __NR_setregid 71
91#define __NR_sigsuspend 72
92#define __NR_sigpending 73
93#define __NR_sethostname 74
94#define __NR_setrlimit 75
95#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
96#define __NR_getrusage 77
97#define __NR_gettimeofday 78
98#define __NR_settimeofday 79
99#define __NR_getgroups 80
100#define __NR_setgroups 81
101#define __NR_select 82
102#define __NR_symlink 83
103#define __NR_oldlstat 84
104#define __NR_readlink 85
105#define __NR_uselib 86
106#define __NR_swapon 87
107#define __NR_reboot 88
108#define __NR_readdir 89
109#define __NR_mmap 90
110#define __NR_munmap 91
111#define __NR_truncate 92
112#define __NR_ftruncate 93
113#define __NR_fchmod 94
114#define __NR_fchown 95
115#define __NR_getpriority 96
116#define __NR_setpriority 97
117#define __NR_profil 98
118#define __NR_statfs 99
119#define __NR_fstatfs 100
120#define __NR_ioperm 101
121#define __NR_socketcall 102 /* old implementation of socket systemcall */
122#define __NR_syslog 103
123#define __NR_setitimer 104
124#define __NR_getitimer 105
125#define __NR_stat 106
126#define __NR_lstat 107
127#define __NR_fstat 108
128#define __NR_olduname 109
129#define __NR_iopl 110
130#define __NR_vhangup 111
131#define __NR_idle 112
132#define __NR_vm86old 113
133#define __NR_wait4 114
134#define __NR_swapoff 115
135#define __NR_sysinfo 116
136#define __NR_ipc 117
137#define __NR_fsync 118
138#define __NR_sigreturn 119
139#define __NR_clone 120
140#define __NR_setdomainname 121
141#define __NR_uname 122
142#define __NR_modify_ldt 123
143#define __NR_adjtimex 124
144#define __NR_mprotect 125
145#define __NR_sigprocmask 126
146#define __NR_create_module 127
147#define __NR_init_module 128
148#define __NR_delete_module 129
149#define __NR_get_kernel_syms 130
150#define __NR_quotactl 131
151#define __NR_getpgid 132
152#define __NR_fchdir 133
153#define __NR_bdflush 134
154#define __NR_sysfs 135
155#define __NR_personality 136
156#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
157#define __NR_setfsuid 138
158#define __NR_setfsgid 139
159#define __NR__llseek 140
160#define __NR_getdents 141
161#define __NR__newselect 142
162#define __NR_flock 143
163#define __NR_msync 144
164#define __NR_readv 145
165#define __NR_writev 146
166#define __NR_getsid 147
167#define __NR_fdatasync 148
168#define __NR__sysctl 149
169#define __NR_mlock 150
170#define __NR_munlock 151
171#define __NR_mlockall 152
172#define __NR_munlockall 153
173#define __NR_sched_setparam 154
174#define __NR_sched_getparam 155
175#define __NR_sched_setscheduler 156
176#define __NR_sched_getscheduler 157
177#define __NR_sched_yield 158
178#define __NR_sched_get_priority_max 159
179#define __NR_sched_get_priority_min 160
180#define __NR_sched_rr_get_interval 161
181#define __NR_nanosleep 162
182#define __NR_mremap 163
183#define __NR_setresuid 164
184#define __NR_getresuid 165
185#define __NR_vm86 166
186#define __NR_query_module 167
187#define __NR_poll 168
188#define __NR_nfsservctl 169
189#define __NR_setresgid 170
190#define __NR_getresgid 171
191#define __NR_prctl 172
192#define __NR_rt_sigreturn 173
193#define __NR_rt_sigaction 174
194#define __NR_rt_sigprocmask 175
195#define __NR_rt_sigpending 176
196#define __NR_rt_sigtimedwait 177
197#define __NR_rt_sigqueueinfo 178
198#define __NR_rt_sigsuspend 179
199#define __NR_pread 180
200#define __NR_pwrite 181
201#define __NR_chown 182
202#define __NR_getcwd 183
203#define __NR_capget 184
204#define __NR_capset 185
205#define __NR_sigaltstack 186
206#define __NR_sendfile 187
207#define __NR_streams1 188 /* some people actually want it */
208#define __NR_streams2 189 /* some people actually want it */
209#define __NR_vfork 190
210#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
211#define __NR_mmap2 192
212#define __NR_truncate64 193
213#define __NR_ftruncate64 194
214#define __NR_stat64 195
215#define __NR_lstat64 196
216#define __NR_fstat64 197
217#define __NR_lchown32 198
218#define __NR_getuid32 199
219#define __NR_getgid32 200
220#define __NR_geteuid32 201
221#define __NR_getegid32 202
222#define __NR_setreuid32 203
223#define __NR_setregid32 204
224#define __NR_getgroups32 205
225#define __NR_setgroups32 206
226#define __NR_fchown32 207
227#define __NR_setresuid32 208
228#define __NR_getresuid32 209
229#define __NR_setresgid32 210
230#define __NR_getresgid32 211
231#define __NR_chown32 212
232#define __NR_setuid32 213
233#define __NR_setgid32 214
234#define __NR_setfsuid32 215
235#define __NR_setfsgid32 216
236#define __NR_pivot_root 217
237#define __NR_mincore 218
238#define __NR_madvise 219
239
240/* Non-multiplexed socket family */
241#define __NR_socket 220
242#define __NR_bind 221
243#define __NR_connect 222
244#define __NR_listen 223
245#define __NR_accept 224
246#define __NR_getsockname 225
247#define __NR_getpeername 226
248#define __NR_socketpair 227
249#define __NR_send 228
250#define __NR_sendto 229
251#define __NR_recv 230
252#define __NR_recvfrom 231
253#define __NR_shutdown 232
254#define __NR_setsockopt 233
255#define __NR_getsockopt 234
256#define __NR_sendmsg 235
257#define __NR_recvmsg 236
258
259/* Non-multiplexed IPC family */
260#define __NR_semop 237
261#define __NR_semget 238
262#define __NR_semctl 239
263#define __NR_msgsnd 240
264#define __NR_msgrcv 241
265#define __NR_msgget 242
266#define __NR_msgctl 243
267#if 0
268#define __NR_shmatcall 244
269#endif
270#define __NR_shmdt 245
271#define __NR_shmget 246
272#define __NR_shmctl 247
273
274#define __NR_getdents64 248
275#define __NR_fcntl64 249
276/* 223 is unused */
277#define __NR_gettid 252
278#define __NR_readahead 253
279#define __NR_setxattr 254
280#define __NR_lsetxattr 255
281#define __NR_fsetxattr 256
282#define __NR_getxattr 257
283#define __NR_lgetxattr 258
284#define __NR_fgetxattr 269
285#define __NR_listxattr 260
286#define __NR_llistxattr 261
287#define __NR_flistxattr 262
288#define __NR_removexattr 263
289#define __NR_lremovexattr 264
290#define __NR_fremovexattr 265
291#define __NR_tkill 266
292#define __NR_sendfile64 267
293#define __NR_futex 268
294#define __NR_sched_setaffinity 269
295#define __NR_sched_getaffinity 270
296#define __NR_set_thread_area 271
297#define __NR_get_thread_area 272
298#define __NR_io_setup 273
299#define __NR_io_destroy 274
300#define __NR_io_getevents 275
301#define __NR_io_submit 276
302#define __NR_io_cancel 277
303#define __NR_fadvise64 278
304#define __NR_exit_group 280
305
306#define __NR_lookup_dcookie 281
307#define __NR_epoll_create 282
308#define __NR_epoll_ctl 283
309#define __NR_epoll_wait 284
310#define __NR_remap_file_pages 285
311#define __NR_set_tid_address 286
312#define __NR_timer_create 287
313#define __NR_timer_settime (__NR_timer_create+1)
314#define __NR_timer_gettime (__NR_timer_create+2)
315#define __NR_timer_getoverrun (__NR_timer_create+3)
316#define __NR_timer_delete (__NR_timer_create+4)
317#define __NR_clock_settime (__NR_timer_create+5)
318#define __NR_clock_gettime (__NR_timer_create+6)
319#define __NR_clock_getres (__NR_timer_create+7)
320#define __NR_clock_nanosleep (__NR_timer_create+8)
321#define __NR_statfs64 296
322#define __NR_fstatfs64 297
323#define __NR_tgkill 298
324#define __NR_utimes 299
325#define __NR_fadvise64_64 300
326#define __NR_vserver 301
327#define __NR_mbind 302
328#define __NR_get_mempolicy 303
329#define __NR_set_mempolicy 304
330#define __NR_mq_open 305
331#define __NR_mq_unlink (__NR_mq_open+1)
332#define __NR_mq_timedsend (__NR_mq_open+2)
333#define __NR_mq_timedreceive (__NR_mq_open+3)
334#define __NR_mq_notify (__NR_mq_open+4)
335#define __NR_mq_getsetattr (__NR_mq_open+5)
336#define __NR_sys_kexec_load 311
337#define __NR_waitid 312
338#define __NR_add_key 313
339#define __NR_request_key 314
340#define __NR_keyctl 315
341
342#define NR_syscalls 316
343
344/* user-visible error numbers are in the range -1 - -125: see <asm-sh64/errno.h> */
345
346#define __syscall_return(type, res) \
347do { \
348 /* Note: when returning from kernel the return value is in r9 \
349 ** This prevents conflicts between return value and arg1 \
350 ** when dispatching signal handler, in other words makes \
351 ** life easier in the system call epilogue (see entry.S) \
352 */ \
353 register unsigned long __sr2 __asm__ ("r2") = res; \
354 if ((unsigned long)(res) >= (unsigned long)(-125)) { \
355 errno = -(res); \
356 __sr2 = -1; \
357 } \
358 return (type) (__sr2); \
359} while (0)
360
361/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
362
363#define _syscall0(type,name) \
364type name(void) \
365{ \
366register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
367__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "()" \
368 : "=r" (__sc0) \
369 : "r" (__sc0) ); \
370__syscall_return(type,__sc0); \
371}
372
373 /*
374 * The apparent spurious "dummy" assembler comment is *needed*,
375 * as without it, the compiler treats the arg<n> variables
376 * as no longer live just before the asm. The compiler can
377 * then optimize the storage into any registers it wishes.
378 * The additional dummy statement forces the compiler to put
379 * the arguments into the correct registers before the TRAPA.
380 */
381#define _syscall1(type,name,type1,arg1) \
382type name(type1 arg1) \
383{ \
384register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
385register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
386__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2)" \
387 : "=r" (__sc0) \
388 : "r" (__sc0), "r" (__sc2)); \
389__asm__ __volatile__ ("!dummy %0 %1" \
390 : \
391 : "r" (__sc0), "r" (__sc2)); \
392__syscall_return(type,__sc0); \
393}
394
395#define _syscall2(type,name,type1,arg1,type2,arg2) \
396type name(type1 arg1,type2 arg2) \
397{ \
398register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
399register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
400register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
401__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3)" \
402 : "=r" (__sc0) \
403 : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
404__asm__ __volatile__ ("!dummy %0 %1 %2" \
405 : \
406 : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
407__syscall_return(type,__sc0); \
408}
409
410#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
411type name(type1 arg1,type2 arg2,type3 arg3) \
412{ \
413register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
414register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
415register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
416register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
417__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4)" \
418 : "=r" (__sc0) \
419 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
420__asm__ __volatile__ ("!dummy %0 %1 %2 %3" \
421 : \
422 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
423__syscall_return(type,__sc0); \
424}
425
426#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
427type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
428{ \
429register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
430register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
431register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
432register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
433register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
434__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5)" \
435 : "=r" (__sc0) \
436 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
437__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4" \
438 : \
439 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
440__syscall_return(type,__sc0); \
441}
442
443#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
444type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
445{ \
446register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
447register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
448register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
449register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
450register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
451register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
452__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6)" \
453 : "=r" (__sc0) \
454 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
455 "r" (__sc6)); \
456__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5" \
457 : \
458 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
459 "r" (__sc6)); \
460__syscall_return(type,__sc0); \
461}
462
463#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
464type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
465{ \
466register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
467register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
468register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
469register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
470register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
471register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
472register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6; \
473__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)" \
474 : "=r" (__sc0) \
475 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
476 "r" (__sc6), "r" (__sc7)); \
477__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5 %6" \
478 : \
479 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
480 "r" (__sc6), "r" (__sc7)); \
481__syscall_return(type,__sc0); \
482}
483
484#ifdef __KERNEL__
485#define __ARCH_WANT_IPC_PARSE_VERSION
486#define __ARCH_WANT_OLD_READDIR
487#define __ARCH_WANT_OLD_STAT
488#define __ARCH_WANT_STAT64
489#define __ARCH_WANT_SYS_ALARM
490#define __ARCH_WANT_SYS_GETHOSTNAME
491#define __ARCH_WANT_SYS_PAUSE
492#define __ARCH_WANT_SYS_SGETMASK
493#define __ARCH_WANT_SYS_SIGNAL
494#define __ARCH_WANT_SYS_TIME
495#define __ARCH_WANT_SYS_UTIME
496#define __ARCH_WANT_SYS_WAITPID
497#define __ARCH_WANT_SYS_SOCKETCALL
498#define __ARCH_WANT_SYS_FADVISE64
499#define __ARCH_WANT_SYS_GETPGRP
500#define __ARCH_WANT_SYS_LLSEEK
501#define __ARCH_WANT_SYS_NICE
502#define __ARCH_WANT_SYS_OLD_GETRLIMIT
503#define __ARCH_WANT_SYS_OLDUMOUNT
504#define __ARCH_WANT_SYS_SIGPENDING
505#define __ARCH_WANT_SYS_SIGPROCMASK
506#define __ARCH_WANT_SYS_RT_SIGACTION
507#endif
508
509#ifdef __KERNEL_SYSCALLS__
510
511/* Copy from sh */
512#include <linux/compiler.h>
513#include <linux/types.h>
514#include <asm/ptrace.h>
515
516/*
517 * we need this inline - forking from kernel space will result
518 * in NO COPY ON WRITE (!!!), until an execve is executed. This
519 * is no problem, but for the stack. This is handled by not letting
520 * main() use the stack at all after fork(). Thus, no function
521 * calls - which means inline code for fork too, as otherwise we
522 * would use the stack upon exit from 'fork()'.
523 *
524 * Actually only pause and fork are needed inline, so that there
525 * won't be any messing with the stack from main(), but we define
526 * some others too.
527 */
528#define __NR__exit __NR_exit
529static inline _syscall0(int,pause)
530static inline _syscall1(int,setup,int,magic)
531static inline _syscall0(int,sync)
532static inline _syscall0(pid_t,setsid)
533static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
534static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
535static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
536static inline _syscall1(int,dup,int,fd)
537static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
538static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
539static inline _syscall1(int,close,int,fd)
540static inline _syscall1(int,_exit,int,exitcode)
541static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
542static inline _syscall1(int,delete_module,const char *,name)
543
544static inline pid_t wait(int * wait_stat)
545{
546 return waitpid(-1,wait_stat,0);
547}
548#endif
549
550/*
551 * "Conditional" syscalls
552 *
553 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
554 * but it doesn't work on all toolchains, so we just do it by hand
555 */
556#ifndef cond_syscall
557#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
558#endif
559
560#endif /* __ASM_SH64_UNISTD_H */
diff --git a/include/asm-sh64/user.h b/include/asm-sh64/user.h
new file mode 100644
index 000000000000..8f32f39a8ca9
--- /dev/null
+++ b/include/asm-sh64/user.h
@@ -0,0 +1,71 @@
1#ifndef __ASM_SH64_USER_H
2#define __ASM_SH64_USER_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/user.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 */
14
15#include <linux/types.h>
16#include <asm/processor.h>
17#include <asm/ptrace.h>
18#include <asm/page.h>
19
20/*
21 * Core file format: The core file is written in such a way that gdb
22 * can understand it and provide useful information to the user (under
23 * linux we use the `trad-core' bfd). The file contents are as follows:
24 *
25 * upage: 1 page consisting of a user struct that tells gdb
26 * what is present in the file. Directly after this is a
27 * copy of the task_struct, which is currently not used by gdb,
28 * but it may come in handy at some point. All of the registers
29 * are stored as part of the upage. The upage should always be
30 * only one page long.
31 * data: The data segment follows next. We use current->end_text to
32 * current->brk to pick up all of the user variables, plus any memory
33 * that may have been sbrk'ed. No attempt is made to determine if a
34 * page is demand-zero or if a page is totally unused, we just cover
35 * the entire range. All of the addresses are rounded in such a way
36 * that an integral number of pages is written.
37 * stack: We need the stack information in order to get a meaningful
38 * backtrace. We need to write the data from usp to
39 * current->start_stack, so we round each of these in order to be able
40 * to write an integer number of pages.
41 */
42
43struct user_fpu_struct {
44 unsigned long long fp_regs[32];
45 unsigned int fpscr;
46};
47
48struct user {
49 struct pt_regs regs; /* entire machine state */
50 struct user_fpu_struct fpu; /* Math Co-processor registers */
51 int u_fpvalid; /* True if math co-processor being used */
52 size_t u_tsize; /* text size (pages) */
53 size_t u_dsize; /* data size (pages) */
54 size_t u_ssize; /* stack size (pages) */
55 unsigned long start_code; /* text starting address */
56 unsigned long start_data; /* data starting address */
57 unsigned long start_stack; /* stack starting address */
58 long int signal; /* signal causing core dump */
59 struct regs * u_ar0; /* help gdb find registers */
60 struct user_fpu_struct* u_fpstate; /* Math Co-processor pointer */
61 unsigned long magic; /* identifies a core file */
62 char u_comm[32]; /* user command name */
63};
64
65#define NBPG PAGE_SIZE
66#define UPAGES 1
67#define HOST_TEXT_START_ADDR (u.start_code)
68#define HOST_DATA_START_ADDR (u.start_data)
69#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
70
71#endif /* __ASM_SH64_USER_H */