diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-01-05 04:50:33 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-01-05 04:50:33 -0500 |
commit | 353816f43d1fb340ff2d9a911dd5d0799c09f6a5 (patch) | |
tree | 517290fd884d286fe2971137ac89f89e3567785a /arch/sh/include | |
parent | 160bbab3000dafccbe43688e48208cecf4deb879 (diff) | |
parent | fe0bdec68b77020281dc814805edfe594ae89e0f (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/poodle.c
arch/arm/mach-pxa/spitz.c
Diffstat (limited to 'arch/sh/include')
34 files changed, 797 insertions, 226 deletions
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 2702d81bfc0..36736c7e93d 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h | |||
@@ -49,5 +49,16 @@ | |||
49 | /* Check if an address can be reached in 29 bits */ | 49 | /* Check if an address can be reached in 29 bits */ |
50 | #define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000) | 50 | #define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000) |
51 | 51 | ||
52 | #ifdef CONFIG_SH_STORE_QUEUES | ||
53 | /* | ||
54 | * This is a special case for the SH-4 store queues, as pages for this | ||
55 | * space still need to be faulted in before it's possible to flush the | ||
56 | * store queue cache for writeout to the remapped region. | ||
57 | */ | ||
58 | #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) | ||
59 | #else | ||
60 | #define P3_ADDR_MAX P4SEG | ||
61 | #endif | ||
62 | |||
52 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
53 | #endif /* __ASM_SH_ADDRSPACE_H */ | 64 | #endif /* __ASM_SH_ADDRSPACE_H */ |
diff --git a/arch/sh/include/asm/bitops-grb.h b/arch/sh/include/asm/bitops-grb.h index a5907b94395..e73af33acbf 100644 --- a/arch/sh/include/asm/bitops-grb.h +++ b/arch/sh/include/asm/bitops-grb.h | |||
@@ -166,4 +166,7 @@ static inline int test_and_change_bit(int nr, volatile void * addr) | |||
166 | 166 | ||
167 | return retval; | 167 | return retval; |
168 | } | 168 | } |
169 | |||
170 | #include <asm-generic/bitops/non-atomic.h> | ||
171 | |||
169 | #endif /* __ASM_SH_BITOPS_GRB_H */ | 172 | #endif /* __ASM_SH_BITOPS_GRB_H */ |
diff --git a/arch/sh/include/asm/bitops-irq.h b/arch/sh/include/asm/bitops-irq.h deleted file mode 100644 index 653a1275058..00000000000 --- a/arch/sh/include/asm/bitops-irq.h +++ /dev/null | |||
@@ -1,91 +0,0 @@ | |||
1 | #ifndef __ASM_SH_BITOPS_IRQ_H | ||
2 | #define __ASM_SH_BITOPS_IRQ_H | ||
3 | |||
4 | static inline void set_bit(int nr, volatile void *addr) | ||
5 | { | ||
6 | int mask; | ||
7 | volatile unsigned int *a = addr; | ||
8 | unsigned long flags; | ||
9 | |||
10 | a += nr >> 5; | ||
11 | mask = 1 << (nr & 0x1f); | ||
12 | local_irq_save(flags); | ||
13 | *a |= mask; | ||
14 | local_irq_restore(flags); | ||
15 | } | ||
16 | |||
17 | static inline void clear_bit(int nr, volatile void *addr) | ||
18 | { | ||
19 | int mask; | ||
20 | volatile unsigned int *a = addr; | ||
21 | unsigned long flags; | ||
22 | |||
23 | a += nr >> 5; | ||
24 | mask = 1 << (nr & 0x1f); | ||
25 | local_irq_save(flags); | ||
26 | *a &= ~mask; | ||
27 | local_irq_restore(flags); | ||
28 | } | ||
29 | |||
30 | static inline void change_bit(int nr, volatile void *addr) | ||
31 | { | ||
32 | int mask; | ||
33 | volatile unsigned int *a = addr; | ||
34 | unsigned long flags; | ||
35 | |||
36 | a += nr >> 5; | ||
37 | mask = 1 << (nr & 0x1f); | ||
38 | local_irq_save(flags); | ||
39 | *a ^= mask; | ||
40 | local_irq_restore(flags); | ||
41 | } | ||
42 | |||
43 | static inline int test_and_set_bit(int nr, volatile void *addr) | ||
44 | { | ||
45 | int mask, retval; | ||
46 | volatile unsigned int *a = addr; | ||
47 | unsigned long flags; | ||
48 | |||
49 | a += nr >> 5; | ||
50 | mask = 1 << (nr & 0x1f); | ||
51 | local_irq_save(flags); | ||
52 | retval = (mask & *a) != 0; | ||
53 | *a |= mask; | ||
54 | local_irq_restore(flags); | ||
55 | |||
56 | return retval; | ||
57 | } | ||
58 | |||
59 | static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
60 | { | ||
61 | int mask, retval; | ||
62 | volatile unsigned int *a = addr; | ||
63 | unsigned long flags; | ||
64 | |||
65 | a += nr >> 5; | ||
66 | mask = 1 << (nr & 0x1f); | ||
67 | local_irq_save(flags); | ||
68 | retval = (mask & *a) != 0; | ||
69 | *a &= ~mask; | ||
70 | local_irq_restore(flags); | ||
71 | |||
72 | return retval; | ||
73 | } | ||
74 | |||
75 | static inline int test_and_change_bit(int nr, volatile void *addr) | ||
76 | { | ||
77 | int mask, retval; | ||
78 | volatile unsigned int *a = addr; | ||
79 | unsigned long flags; | ||
80 | |||
81 | a += nr >> 5; | ||
82 | mask = 1 << (nr & 0x1f); | ||
83 | local_irq_save(flags); | ||
84 | retval = (mask & *a) != 0; | ||
85 | *a ^= mask; | ||
86 | local_irq_restore(flags); | ||
87 | |||
88 | return retval; | ||
89 | } | ||
90 | |||
91 | #endif /* __ASM_SH_BITOPS_IRQ_H */ | ||
diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h index 43b8e1a8239..1d2fc0b010a 100644 --- a/arch/sh/include/asm/bitops-llsc.h +++ b/arch/sh/include/asm/bitops-llsc.h | |||
@@ -141,4 +141,6 @@ static inline int test_and_change_bit(int nr, volatile void * addr) | |||
141 | return retval != 0; | 141 | return retval != 0; |
142 | } | 142 | } |
143 | 143 | ||
144 | #include <asm-generic/bitops/non-atomic.h> | ||
145 | |||
144 | #endif /* __ASM_SH_BITOPS_LLSC_H */ | 146 | #endif /* __ASM_SH_BITOPS_LLSC_H */ |
diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h new file mode 100644 index 00000000000..f0ae7e9218e --- /dev/null +++ b/arch/sh/include/asm/bitops-op32.h | |||
@@ -0,0 +1,142 @@ | |||
1 | #ifndef __ASM_SH_BITOPS_OP32_H | ||
2 | #define __ASM_SH_BITOPS_OP32_H | ||
3 | |||
4 | /* | ||
5 | * The bit modifying instructions on SH-2A are only capable of working | ||
6 | * with a 3-bit immediate, which signifies the shift position for the bit | ||
7 | * being worked on. | ||
8 | */ | ||
9 | #if defined(__BIG_ENDIAN) | ||
10 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | ||
11 | #define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE) | ||
12 | #define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE) | ||
13 | #else | ||
14 | #define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE) | ||
15 | #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) | ||
16 | #endif | ||
17 | |||
18 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) | ||
19 | |||
20 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
21 | { | ||
22 | if (IS_IMMEDIATE(nr)) { | ||
23 | __asm__ __volatile__ ( | ||
24 | "bset.b %1, @(%O2,%0) ! __set_bit\n\t" | ||
25 | : "+r" (addr) | ||
26 | : "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr)) | ||
27 | : "t", "memory" | ||
28 | ); | ||
29 | } else { | ||
30 | unsigned long mask = BIT_MASK(nr); | ||
31 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
32 | |||
33 | *p |= mask; | ||
34 | } | ||
35 | } | ||
36 | |||
37 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
38 | { | ||
39 | if (IS_IMMEDIATE(nr)) { | ||
40 | __asm__ __volatile__ ( | ||
41 | "bclr.b %1, @(%O2,%0) ! __clear_bit\n\t" | ||
42 | : "+r" (addr) | ||
43 | : "i" (BYTE_OFFSET(nr)), | ||
44 | "i" (BYTE_NUMBER(nr)) | ||
45 | : "t", "memory" | ||
46 | ); | ||
47 | } else { | ||
48 | unsigned long mask = BIT_MASK(nr); | ||
49 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
50 | |||
51 | *p &= ~mask; | ||
52 | } | ||
53 | } | ||
54 | |||
55 | /** | ||
56 | * __change_bit - Toggle a bit in memory | ||
57 | * @nr: the bit to change | ||
58 | * @addr: the address to start counting from | ||
59 | * | ||
60 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
61 | * If it's called on the same region of memory simultaneously, the effect | ||
62 | * may be that only one operation succeeds. | ||
63 | */ | ||
64 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
65 | { | ||
66 | if (IS_IMMEDIATE(nr)) { | ||
67 | __asm__ __volatile__ ( | ||
68 | "bxor.b %1, @(%O2,%0) ! __change_bit\n\t" | ||
69 | : "+r" (addr) | ||
70 | : "i" (BYTE_OFFSET(nr)), | ||
71 | "i" (BYTE_NUMBER(nr)) | ||
72 | : "t", "memory" | ||
73 | ); | ||
74 | } else { | ||
75 | unsigned long mask = BIT_MASK(nr); | ||
76 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
77 | |||
78 | *p ^= mask; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * __test_and_set_bit - Set a bit and return its old value | ||
84 | * @nr: Bit to set | ||
85 | * @addr: Address to count from | ||
86 | * | ||
87 | * This operation is non-atomic and can be reordered. | ||
88 | * If two examples of this operation race, one can appear to succeed | ||
89 | * but actually fail. You must protect multiple accesses with a lock. | ||
90 | */ | ||
91 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
92 | { | ||
93 | unsigned long mask = BIT_MASK(nr); | ||
94 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
95 | unsigned long old = *p; | ||
96 | |||
97 | *p = old | mask; | ||
98 | return (old & mask) != 0; | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * __test_and_clear_bit - Clear a bit and return its old value | ||
103 | * @nr: Bit to clear | ||
104 | * @addr: Address to count from | ||
105 | * | ||
106 | * This operation is non-atomic and can be reordered. | ||
107 | * If two examples of this operation race, one can appear to succeed | ||
108 | * but actually fail. You must protect multiple accesses with a lock. | ||
109 | */ | ||
110 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
111 | { | ||
112 | unsigned long mask = BIT_MASK(nr); | ||
113 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
114 | unsigned long old = *p; | ||
115 | |||
116 | *p = old & ~mask; | ||
117 | return (old & mask) != 0; | ||
118 | } | ||
119 | |||
120 | /* WARNING: non atomic and it can be reordered! */ | ||
121 | static inline int __test_and_change_bit(int nr, | ||
122 | volatile unsigned long *addr) | ||
123 | { | ||
124 | unsigned long mask = BIT_MASK(nr); | ||
125 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
126 | unsigned long old = *p; | ||
127 | |||
128 | *p = old ^ mask; | ||
129 | return (old & mask) != 0; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * test_bit - Determine whether a bit is set | ||
134 | * @nr: bit number to test | ||
135 | * @addr: Address to start counting from | ||
136 | */ | ||
137 | static inline int test_bit(int nr, const volatile unsigned long *addr) | ||
138 | { | ||
139 | return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
140 | } | ||
141 | |||
142 | #endif /* __ASM_SH_BITOPS_OP32_H */ | ||
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 367930d8e5a..ebe595b7ab1 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h | |||
@@ -13,21 +13,22 @@ | |||
13 | 13 | ||
14 | #ifdef CONFIG_GUSA_RB | 14 | #ifdef CONFIG_GUSA_RB |
15 | #include <asm/bitops-grb.h> | 15 | #include <asm/bitops-grb.h> |
16 | #elif defined(CONFIG_CPU_SH2A) | ||
17 | #include <asm-generic/bitops/atomic.h> | ||
18 | #include <asm/bitops-op32.h> | ||
16 | #elif defined(CONFIG_CPU_SH4A) | 19 | #elif defined(CONFIG_CPU_SH4A) |
17 | #include <asm/bitops-llsc.h> | 20 | #include <asm/bitops-llsc.h> |
18 | #else | 21 | #else |
19 | #include <asm/bitops-irq.h> | 22 | #include <asm-generic/bitops/atomic.h> |
23 | #include <asm-generic/bitops/non-atomic.h> | ||
20 | #endif | 24 | #endif |
21 | 25 | ||
22 | |||
23 | /* | 26 | /* |
24 | * clear_bit() doesn't provide any barrier for the compiler. | 27 | * clear_bit() doesn't provide any barrier for the compiler. |
25 | */ | 28 | */ |
26 | #define smp_mb__before_clear_bit() barrier() | 29 | #define smp_mb__before_clear_bit() barrier() |
27 | #define smp_mb__after_clear_bit() barrier() | 30 | #define smp_mb__after_clear_bit() barrier() |
28 | 31 | ||
29 | #include <asm-generic/bitops/non-atomic.h> | ||
30 | |||
31 | #ifdef CONFIG_SUPERH32 | 32 | #ifdef CONFIG_SUPERH32 |
32 | static inline unsigned long ffz(unsigned long word) | 33 | static inline unsigned long ffz(unsigned long word) |
33 | { | 34 | { |
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 121b2ecddfc..4924ff6f543 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h | |||
@@ -25,7 +25,7 @@ static void __init check_bugs(void) | |||
25 | case CPU_SH7619: | 25 | case CPU_SH7619: |
26 | *p++ = '2'; | 26 | *p++ = '2'; |
27 | break; | 27 | break; |
28 | case CPU_SH7203 ... CPU_MXG: | 28 | case CPU_SH7201 ... CPU_MXG: |
29 | *p++ = '2'; | 29 | *p++ = '2'; |
30 | *p++ = 'a'; | 30 | *p++ = 'a'; |
31 | break; | 31 | break; |
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 9eb9036a1bd..ccb1d93bb04 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h | |||
@@ -108,13 +108,11 @@ typedef struct user_fpu_struct elf_fpregset_t; | |||
108 | #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) | 108 | #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) |
109 | #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) | 109 | #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) |
110 | 110 | ||
111 | #ifdef CONFIG_SUPERH32 | ||
112 | /* | 111 | /* |
113 | * Enable dump using regset. | 112 | * Enable dump using regset. |
114 | * This covers all of general/DSP/FPU regs. | 113 | * This covers all of general/DSP/FPU regs. |
115 | */ | 114 | */ |
116 | #define CORE_DUMP_USE_REGSET | 115 | #define CORE_DUMP_USE_REGSET |
117 | #endif | ||
118 | 116 | ||
119 | #define USE_ELF_CORE_DUMP | 117 | #define USE_ELF_CORE_DUMP |
120 | #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC | 118 | #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC |
@@ -204,7 +202,7 @@ do { \ | |||
204 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | 202 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES |
205 | struct linux_binprm; | 203 | struct linux_binprm; |
206 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 204 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
207 | int executable_stack); | 205 | int uses_interp); |
208 | 206 | ||
209 | extern unsigned int vdso_enabled; | 207 | extern unsigned int vdso_enabled; |
210 | extern void __kernel_vsyscall; | 208 | extern void __kernel_vsyscall; |
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 3aed362c946..8fea7d8c825 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h | |||
@@ -1,8 +1,34 @@ | |||
1 | #ifndef __ASM_SH_FTRACE_H | 1 | #ifndef __ASM_SH_FTRACE_H |
2 | #define __ASM_SH_FTRACE_H | 2 | #define __ASM_SH_FTRACE_H |
3 | 3 | ||
4 | #ifdef CONFIG_FUNCTION_TRACER | ||
5 | |||
6 | #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ | ||
7 | |||
4 | #ifndef __ASSEMBLY__ | 8 | #ifndef __ASSEMBLY__ |
5 | extern void mcount(void); | 9 | extern void mcount(void); |
6 | #endif | 10 | |
11 | #define MCOUNT_ADDR ((long)(mcount)) | ||
12 | |||
13 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
14 | #define CALLER_ADDR ((long)(ftrace_caller)) | ||
15 | #define STUB_ADDR ((long)(ftrace_stub)) | ||
16 | |||
17 | #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) | ||
18 | |||
19 | struct dyn_arch_ftrace { | ||
20 | /* No extra data needed on sh */ | ||
21 | }; | ||
22 | |||
23 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
24 | |||
25 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | ||
26 | { | ||
27 | /* 'addr' is the memory table address. */ | ||
28 | return addr; | ||
29 | } | ||
30 | |||
31 | #endif /* __ASSEMBLY__ */ | ||
32 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
7 | 33 | ||
8 | #endif /* __ASM_SH_FTRACE_H */ | 34 | #endif /* __ASM_SH_FTRACE_H */ |
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 65eaae34e75..61f6dae4053 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -260,6 +260,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |||
260 | 260 | ||
261 | return (void __iomem *)P2SEGADDR(offset); | 261 | return (void __iomem *)P2SEGADDR(offset); |
262 | } | 262 | } |
263 | |||
264 | /* P4 above the store queues are always mapped. */ | ||
265 | if (unlikely(offset >= P3_ADDR_MAX)) | ||
266 | return (void __iomem *)P4SEGADDR(offset); | ||
263 | #endif | 267 | #endif |
264 | 268 | ||
265 | return __ioremap(offset, size, flags); | 269 | return __ioremap(offset, size, flags); |
diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h index 24e42078f36..72704ed725e 100644 --- a/arch/sh/include/asm/kgdb.h +++ b/arch/sh/include/asm/kgdb.h | |||
@@ -1,21 +1,7 @@ | |||
1 | /* | 1 | #ifndef __ASM_SH_KGDB_H |
2 | * May be copied or modified under the terms of the GNU General Public | 2 | #define __ASM_SH_KGDB_H |
3 | * License. See linux/COPYING for more information. | ||
4 | * | ||
5 | * Based on original code by Glenn Engel, Jim Kingdon, | ||
6 | * David Grothe <dave@gcom.com>, Tigran Aivazian, <tigran@sco.com> and | ||
7 | * Amit S. Kale <akale@veritas.com> | ||
8 | * | ||
9 | * Super-H port based on sh-stub.c (Ben Lee and Steve Chamberlain) by | ||
10 | * Henry Bell <henry.bell@st.com> | ||
11 | * | ||
12 | * Header file for low-level support for remote debug using GDB. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef __KGDB_H | ||
17 | #define __KGDB_H | ||
18 | 3 | ||
4 | #include <asm/cacheflush.h> | ||
19 | #include <asm/ptrace.h> | 5 | #include <asm/ptrace.h> |
20 | 6 | ||
21 | /* Same as pt_regs but has vbr in place of syscall_nr */ | 7 | /* Same as pt_regs but has vbr in place of syscall_nr */ |
@@ -30,40 +16,26 @@ struct kgdb_regs { | |||
30 | unsigned long vbr; | 16 | unsigned long vbr; |
31 | }; | 17 | }; |
32 | 18 | ||
33 | /* State info */ | 19 | enum regnames { |
34 | extern char kgdb_in_gdb_mode; | 20 | GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7, |
35 | extern int kgdb_nofault; /* Ignore bus errors (in gdb mem access) */ | 21 | GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15, |
36 | extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ | ||
37 | 22 | ||
38 | /* SCI */ | 23 | GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR, |
39 | extern int kgdb_portnum; | 24 | }; |
40 | extern int kgdb_baud; | ||
41 | extern char kgdb_parity; | ||
42 | extern char kgdb_bits; | ||
43 | 25 | ||
44 | /* Init and interface stuff */ | 26 | #define NUMREGBYTES ((GDB_VBR + 1) * 4) |
45 | extern int kgdb_init(void); | ||
46 | extern int (*kgdb_getchar)(void); | ||
47 | extern void (*kgdb_putchar)(int); | ||
48 | 27 | ||
49 | /* Trap functions */ | 28 | static inline void arch_kgdb_breakpoint(void) |
50 | typedef void (kgdb_debug_hook_t)(struct pt_regs *regs); | 29 | { |
51 | typedef void (kgdb_bus_error_hook_t)(void); | 30 | __asm__ __volatile__ ("trapa #0x3c\n"); |
52 | extern kgdb_debug_hook_t *kgdb_debug_hook; | 31 | } |
53 | extern kgdb_bus_error_hook_t *kgdb_bus_err_hook; | ||
54 | 32 | ||
55 | /* Console */ | 33 | /* State info */ |
56 | struct console; | 34 | extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ |
57 | void kgdb_console_write(struct console *co, const char *s, unsigned count); | ||
58 | extern int kgdb_console_setup(struct console *, char *); | ||
59 | 35 | ||
60 | /* Prototypes for jmp fns */ | 36 | #define BUFMAX 2048 |
61 | #define _JBLEN 9 | ||
62 | typedef int jmp_buf[_JBLEN]; | ||
63 | extern void longjmp(jmp_buf __jmpb, int __retval); | ||
64 | extern int setjmp(jmp_buf __jmpb); | ||
65 | 37 | ||
66 | /* Forced breakpoint */ | 38 | #define CACHE_FLUSH_IS_SAFE 1 |
67 | #define breakpoint() __asm__ __volatile__("trapa #0x3c") | 39 | #define BREAK_INSTR_SIZE 2 |
68 | 40 | ||
69 | #endif | 41 | #endif /* __ASM_SH_KGDB_H */ |
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index f1bae02ef7b..64b1c16a0f0 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h | |||
@@ -14,8 +14,6 @@ | |||
14 | #include <linux/time.h> | 14 | #include <linux/time.h> |
15 | #include <asm/machtypes.h> | 15 | #include <asm/machtypes.h> |
16 | 16 | ||
17 | struct device; | ||
18 | |||
19 | struct sh_machine_vector { | 17 | struct sh_machine_vector { |
20 | void (*mv_setup)(char **cmdline_p); | 18 | void (*mv_setup)(char **cmdline_p); |
21 | const char *mv_name; | 19 | const char *mv_name; |
@@ -45,9 +43,6 @@ struct sh_machine_vector { | |||
45 | int (*mv_irq_demux)(int irq); | 43 | int (*mv_irq_demux)(int irq); |
46 | 44 | ||
47 | void (*mv_init_irq)(void); | 45 | void (*mv_init_irq)(void); |
48 | void (*mv_init_pci)(void); | ||
49 | |||
50 | void (*mv_heartbeat)(void); | ||
51 | 46 | ||
52 | void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); | 47 | void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); |
53 | void (*mv_ioport_unmap)(void __iomem *); | 48 | void (*mv_ioport_unmap)(void __iomem *); |
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 04c0c9733ad..5d9157bd474 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #define MMU_CONTEXT_ASID_MASK 0x000000ff | 22 | #define MMU_CONTEXT_ASID_MASK 0x000000ff |
23 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 | 23 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 |
24 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 | 24 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 |
25 | #define NO_CONTEXT 0 | 25 | #define NO_CONTEXT 0UL |
26 | 26 | ||
27 | /* ASID is 8-bit value, so it can't be 0x100 */ | 27 | /* ASID is 8-bit value, so it can't be 0x100 */ |
28 | #define MMU_NO_ASID 0x100 | 28 | #define MMU_NO_ASID 0x100 |
@@ -130,7 +130,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
130 | #define destroy_context(mm) do { } while (0) | 130 | #define destroy_context(mm) do { } while (0) |
131 | #define set_asid(asid) do { } while (0) | 131 | #define set_asid(asid) do { } while (0) |
132 | #define get_asid() (0) | 132 | #define get_asid() (0) |
133 | #define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) | 133 | #define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; }) |
134 | #define switch_and_save_asid(asid) (0) | 134 | #define switch_and_save_asid(asid) (0) |
135 | #define set_TTB(pgd) do { } while (0) | 135 | #define set_TTB(pgd) do { } while (0) |
136 | #define get_TTB() (0) | 136 | #define get_TTB() (0) |
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h new file mode 100644 index 00000000000..ee839ee58ac --- /dev/null +++ b/arch/sh/include/asm/mutex-llsc.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * arch/sh/include/asm/mutex-llsc.h | ||
3 | * | ||
4 | * SH-4A optimized mutex locking primitives | ||
5 | * | ||
6 | * Please look into asm-generic/mutex-xchg.h for a formal definition. | ||
7 | */ | ||
8 | #ifndef __ASM_SH_MUTEX_LLSC_H | ||
9 | #define __ASM_SH_MUTEX_LLSC_H | ||
10 | |||
11 | /* | ||
12 | * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure. | ||
13 | * with a bastardized atomic decrement (it is not a reliable atomic decrement | ||
14 | * but it satisfies the defined semantics for our purpose, while being | ||
15 | * smaller and faster than a real atomic decrement or atomic swap. | ||
16 | * The idea is to attempt decrementing the lock value only once. If once | ||
17 | * decremented it isn't zero, or if its store-back fails due to a dispute | ||
18 | * on the exclusive store, we simply bail out immediately through the slow | ||
19 | * path where the lock will be reattempted until it succeeds. | ||
20 | */ | ||
21 | static inline void | ||
22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
23 | { | ||
24 | int __ex_flag, __res; | ||
25 | |||
26 | __asm__ __volatile__ ( | ||
27 | "movli.l @%2, %0 \n" | ||
28 | "add #-1, %0 \n" | ||
29 | "movco.l %0, @%2 \n" | ||
30 | "movt %1 \n" | ||
31 | : "=&z" (__res), "=&r" (__ex_flag) | ||
32 | : "r" (&(count)->counter) | ||
33 | : "t"); | ||
34 | |||
35 | __res |= !__ex_flag; | ||
36 | if (unlikely(__res != 0)) | ||
37 | fail_fn(count); | ||
38 | } | ||
39 | |||
40 | static inline int | ||
41 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
42 | { | ||
43 | int __ex_flag, __res; | ||
44 | |||
45 | __asm__ __volatile__ ( | ||
46 | "movli.l @%2, %0 \n" | ||
47 | "add #-1, %0 \n" | ||
48 | "movco.l %0, @%2 \n" | ||
49 | "movt %1 \n" | ||
50 | : "=&z" (__res), "=&r" (__ex_flag) | ||
51 | : "r" (&(count)->counter) | ||
52 | : "t"); | ||
53 | |||
54 | __res |= !__ex_flag; | ||
55 | if (unlikely(__res != 0)) | ||
56 | __res = fail_fn(count); | ||
57 | |||
58 | return __res; | ||
59 | } | ||
60 | |||
61 | static inline void | ||
62 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
63 | { | ||
64 | int __ex_flag, __res; | ||
65 | |||
66 | __asm__ __volatile__ ( | ||
67 | "movli.l @%2, %0 \n\t" | ||
68 | "add #1, %0 \n\t" | ||
69 | "movco.l %0, @%2 \n\t" | ||
70 | "movt %1 \n\t" | ||
71 | : "=&z" (__res), "=&r" (__ex_flag) | ||
72 | : "r" (&(count)->counter) | ||
73 | : "t"); | ||
74 | |||
75 | __res |= !__ex_flag; | ||
76 | if (unlikely(__res <= 0)) | ||
77 | fail_fn(count); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * If the unlock was done on a contended lock, or if the unlock simply fails | ||
82 | * then the mutex remains locked. | ||
83 | */ | ||
84 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
85 | |||
86 | /* | ||
87 | * For __mutex_fastpath_trylock we do an atomic decrement and check the | ||
88 | * result and put it in the __res variable. | ||
89 | */ | ||
90 | static inline int | ||
91 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
92 | { | ||
93 | int __res, __orig; | ||
94 | |||
95 | __asm__ __volatile__ ( | ||
96 | "1: movli.l @%2, %0 \n\t" | ||
97 | "dt %0 \n\t" | ||
98 | "movco.l %0,@%2 \n\t" | ||
99 | "bf 1b \n\t" | ||
100 | "cmp/eq #0,%0 \n\t" | ||
101 | "bt 2f \n\t" | ||
102 | "mov #0, %1 \n\t" | ||
103 | "bf 3f \n\t" | ||
104 | "2: mov #1, %1 \n\t" | ||
105 | "3: " | ||
106 | : "=&z" (__orig), "=&r" (__res) | ||
107 | : "r" (&count->counter) | ||
108 | : "t"); | ||
109 | |||
110 | return __res; | ||
111 | } | ||
112 | #endif /* __ASM_SH_MUTEX_LLSC_H */ | ||
diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h index 458c1f7fbc1..d8e37716a4a 100644 --- a/arch/sh/include/asm/mutex.h +++ b/arch/sh/include/asm/mutex.h | |||
@@ -5,5 +5,8 @@ | |||
5 | * implementation in place, or pick the atomic_xchg() based generic | 5 | * implementation in place, or pick the atomic_xchg() based generic |
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | 6 | * implementation. (see asm-generic/mutex-xchg.h for details) |
7 | */ | 7 | */ |
8 | 8 | #if defined(CONFIG_CPU_SH4A) | |
9 | #include <asm/mutex-llsc.h> | ||
10 | #else | ||
9 | #include <asm-generic/mutex-dec.h> | 11 | #include <asm-generic/mutex-dec.h> |
12 | #endif | ||
diff --git a/arch/sh/include/asm/pm.h b/arch/sh/include/asm/pm.h deleted file mode 100644 index 56fdbd6b1c9..00000000000 --- a/arch/sh/include/asm/pm.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright 2006 (c) Andriy Skulysh <askulysh@gmail.com> | ||
7 | * | ||
8 | */ | ||
9 | #ifndef __ASM_SH_PM_H | ||
10 | #define __ASM_SH_PM_H | ||
11 | |||
12 | extern u8 wakeup_start; | ||
13 | extern u8 wakeup_end; | ||
14 | |||
15 | void pm_enter(void); | ||
16 | |||
17 | #endif | ||
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 693364a20ad..1ef4b24d761 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h | |||
@@ -18,7 +18,7 @@ enum cpu_type { | |||
18 | CPU_SH7619, | 18 | CPU_SH7619, |
19 | 19 | ||
20 | /* SH-2A types */ | 20 | /* SH-2A types */ |
21 | CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, | 21 | CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, |
22 | 22 | ||
23 | /* SH-3 types */ | 23 | /* SH-3 types */ |
24 | CPU_SH7705, CPU_SH7706, CPU_SH7707, | 24 | CPU_SH7705, CPU_SH7706, CPU_SH7707, |
@@ -82,6 +82,9 @@ extern struct sh_cpuinfo cpu_data[]; | |||
82 | #define current_cpu_data cpu_data[smp_processor_id()] | 82 | #define current_cpu_data cpu_data[smp_processor_id()] |
83 | #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] | 83 | #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] |
84 | 84 | ||
85 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") | ||
86 | #define cpu_relax() barrier() | ||
87 | |||
85 | /* Forward decl */ | 88 | /* Forward decl */ |
86 | struct seq_operations; | 89 | struct seq_operations; |
87 | 90 | ||
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index a46a0207e97..d79063c5eb9 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h | |||
@@ -175,6 +175,15 @@ static __inline__ void enable_fpu(void) | |||
175 | 175 | ||
176 | void show_trace(struct task_struct *tsk, unsigned long *sp, | 176 | void show_trace(struct task_struct *tsk, unsigned long *sp, |
177 | struct pt_regs *regs); | 177 | struct pt_regs *regs); |
178 | |||
179 | #ifdef CONFIG_DUMP_CODE | ||
180 | void show_code(struct pt_regs *regs); | ||
181 | #else | ||
182 | static inline void show_code(struct pt_regs *regs) | ||
183 | { | ||
184 | } | ||
185 | #endif | ||
186 | |||
178 | extern unsigned long get_wchan(struct task_struct *p); | 187 | extern unsigned long get_wchan(struct task_struct *p); |
179 | 188 | ||
180 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) | 189 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) |
@@ -182,9 +191,6 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
182 | 191 | ||
183 | #define user_stack_pointer(regs) ((regs)->regs[15]) | 192 | #define user_stack_pointer(regs) ((regs)->regs[15]) |
184 | 193 | ||
185 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") | ||
186 | #define cpu_relax() barrier() | ||
187 | |||
188 | #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ | 194 | #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ |
189 | defined(CONFIG_CPU_SH4) | 195 | defined(CONFIG_CPU_SH4) |
190 | #define PREFETCH_STRIDE L1_CACHE_BYTES | 196 | #define PREFETCH_STRIDE L1_CACHE_BYTES |
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index b0b4824dfc4..803177fcf08 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h | |||
@@ -226,9 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
226 | #define KSTK_EIP(tsk) ((tsk)->thread.pc) | 226 | #define KSTK_EIP(tsk) ((tsk)->thread.pc) |
227 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) | 227 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) |
228 | 228 | ||
229 | #define user_stack_pointer(regs) ((regs)->sp) | 229 | #define user_stack_pointer(regs) ((regs)->regs[15]) |
230 | |||
231 | #define cpu_relax() barrier() | ||
232 | 230 | ||
233 | #endif /* __ASSEMBLY__ */ | 231 | #endif /* __ASSEMBLY__ */ |
234 | #endif /* __ASM_SH_PROCESSOR_64_H */ | 232 | #endif /* __ASM_SH_PROCESSOR_64_H */ |
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 3ad18e91bca..12912ab80c1 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h | |||
@@ -86,6 +86,7 @@ struct pt_dspregs { | |||
86 | unsigned long re; | 86 | unsigned long re; |
87 | unsigned long mod; | 87 | unsigned long mod; |
88 | }; | 88 | }; |
89 | #endif | ||
89 | 90 | ||
90 | #define PTRACE_GETREGS 12 /* General registers */ | 91 | #define PTRACE_GETREGS 12 /* General registers */ |
91 | #define PTRACE_SETREGS 13 | 92 | #define PTRACE_SETREGS 13 |
@@ -100,7 +101,6 @@ struct pt_dspregs { | |||
100 | 101 | ||
101 | #define PTRACE_GETDSPREGS 55 /* DSP registers */ | 102 | #define PTRACE_GETDSPREGS 55 /* DSP registers */ |
102 | #define PTRACE_SETDSPREGS 56 | 103 | #define PTRACE_SETDSPREGS 56 |
103 | #endif | ||
104 | 104 | ||
105 | #ifdef __KERNEL__ | 105 | #ifdef __KERNEL__ |
106 | #include <asm/addrspace.h> | 106 | #include <asm/addrspace.h> |
diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h index 0ca261956e3..d9c96d7cf6c 100644 --- a/arch/sh/include/asm/sh_bios.h +++ b/arch/sh/include/asm/sh_bios.h | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | extern void sh_bios_console_write(const char *buf, unsigned int len); | 11 | extern void sh_bios_console_write(const char *buf, unsigned int len); |
12 | extern void sh_bios_char_out(char ch); | 12 | extern void sh_bios_char_out(char ch); |
13 | extern int sh_bios_in_gdb_mode(void); | ||
14 | extern void sh_bios_gdb_detach(void); | 13 | extern void sh_bios_gdb_detach(void); |
15 | 14 | ||
16 | extern void sh_bios_get_node_addr(unsigned char *node_addr); | 15 | extern void sh_bios_get_node_addr(unsigned char *node_addr); |
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 85b660c17eb..c24e9c6a173 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h | |||
@@ -31,7 +31,7 @@ enum { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | void smp_message_recv(unsigned int msg); | 33 | void smp_message_recv(unsigned int msg); |
34 | void smp_timer_broadcast(cpumask_t mask); | 34 | void smp_timer_broadcast(const struct cpumask *mask); |
35 | 35 | ||
36 | void local_timer_interrupt(void); | 36 | void local_timer_interrupt(void); |
37 | void local_timer_setup(unsigned int cpu); | 37 | void local_timer_setup(unsigned int cpu); |
diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h index aa1fef229c7..74200717262 100644 --- a/arch/sh/include/asm/string_64.h +++ b/arch/sh/include/asm/string_64.h | |||
@@ -1,17 +1,20 @@ | |||
1 | #ifndef __ASM_SH_STRING_64_H | 1 | #ifndef __ASM_SH_STRING_64_H |
2 | #define __ASM_SH_STRING_64_H | 2 | #define __ASM_SH_STRING_64_H |
3 | 3 | ||
4 | /* | 4 | #ifdef __KERNEL__ |
5 | * include/asm-sh/string_64.h | 5 | |
6 | * | 6 | #define __HAVE_ARCH_MEMSET |
7 | * Copyright (C) 2000, 2001 Paolo Alberelli | 7 | extern void *memset(void *__s, int __c, size_t __count); |
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | 8 | ||
14 | #define __HAVE_ARCH_MEMCPY | 9 | #define __HAVE_ARCH_MEMCPY |
15 | extern void *memcpy(void *dest, const void *src, size_t count); | 10 | extern void *memcpy(void *dest, const void *src, size_t count); |
16 | 11 | ||
12 | #define __HAVE_ARCH_STRLEN | ||
13 | extern size_t strlen(const char *); | ||
14 | |||
15 | #define __HAVE_ARCH_STRCPY | ||
16 | extern char *strcpy(char *__dest, const char *__src); | ||
17 | |||
18 | #endif /* __KERNEL__ */ | ||
19 | |||
17 | #endif /* __ASM_SH_STRING_64_H */ | 20 | #endif /* __ASM_SH_STRING_64_H */ |
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 54773f26cd4..05a868a71ef 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | #include <asm/ptrace.h> | 6 | #include <asm/ptrace.h> |
7 | 7 | ||
8 | /* The system call number is given by the user in %g1 */ | 8 | /* The system call number is given by the user in R3 */ |
9 | static inline long syscall_get_nr(struct task_struct *task, | 9 | static inline long syscall_get_nr(struct task_struct *task, |
10 | struct pt_regs *regs) | 10 | struct pt_regs *regs) |
11 | { | 11 | { |
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index bcaaa8ca4d7..e1143b9784d 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h | |||
@@ -1,6 +1,80 @@ | |||
1 | #ifndef __ASM_SH_SYSCALL_64_H | 1 | #ifndef __ASM_SH_SYSCALL_64_H |
2 | #define __ASM_SH_SYSCALL_64_H | 2 | #define __ASM_SH_SYSCALL_64_H |
3 | 3 | ||
4 | #include <asm-generic/syscall.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/sched.h> | ||
6 | #include <asm/ptrace.h> | ||
7 | |||
8 | /* The system call number is given by the user in R9 */ | ||
9 | static inline long syscall_get_nr(struct task_struct *task, | ||
10 | struct pt_regs *regs) | ||
11 | { | ||
12 | return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L; | ||
13 | } | ||
14 | |||
15 | static inline void syscall_rollback(struct task_struct *task, | ||
16 | struct pt_regs *regs) | ||
17 | { | ||
18 | /* | ||
19 | * XXX: This needs some thought. On SH we don't | ||
20 | * save away the original R9 value anywhere. | ||
21 | */ | ||
22 | } | ||
23 | |||
24 | static inline bool syscall_has_error(struct pt_regs *regs) | ||
25 | { | ||
26 | return (regs->sr & 0x1) ? true : false; | ||
27 | } | ||
28 | static inline void syscall_set_error(struct pt_regs *regs) | ||
29 | { | ||
30 | regs->sr |= 0x1; | ||
31 | } | ||
32 | static inline void syscall_clear_error(struct pt_regs *regs) | ||
33 | { | ||
34 | regs->sr &= ~0x1; | ||
35 | } | ||
36 | |||
37 | static inline long syscall_get_error(struct task_struct *task, | ||
38 | struct pt_regs *regs) | ||
39 | { | ||
40 | return syscall_has_error(regs) ? regs->regs[9] : 0; | ||
41 | } | ||
42 | |||
43 | static inline long syscall_get_return_value(struct task_struct *task, | ||
44 | struct pt_regs *regs) | ||
45 | { | ||
46 | return regs->regs[9]; | ||
47 | } | ||
48 | |||
49 | static inline void syscall_set_return_value(struct task_struct *task, | ||
50 | struct pt_regs *regs, | ||
51 | int error, long val) | ||
52 | { | ||
53 | if (error) { | ||
54 | syscall_set_error(regs); | ||
55 | regs->regs[9] = -error; | ||
56 | } else { | ||
57 | syscall_clear_error(regs); | ||
58 | regs->regs[9] = val; | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static inline void syscall_get_arguments(struct task_struct *task, | ||
63 | struct pt_regs *regs, | ||
64 | unsigned int i, unsigned int n, | ||
65 | unsigned long *args) | ||
66 | { | ||
67 | BUG_ON(i + n > 6); | ||
68 | memcpy(args, ®s->regs[2 + i], n * sizeof(args[0])); | ||
69 | } | ||
70 | |||
71 | static inline void syscall_set_arguments(struct task_struct *task, | ||
72 | struct pt_regs *regs, | ||
73 | unsigned int i, unsigned int n, | ||
74 | const unsigned long *args) | ||
75 | { | ||
76 | BUG_ON(i + n > 6); | ||
77 | memcpy(®s->regs[2 + i], args, n * sizeof(args[0])); | ||
78 | } | ||
5 | 79 | ||
6 | #endif /* __ASM_SH_SYSCALL_64_H */ | 80 | #endif /* __ASM_SH_SYSCALL_64_H */ |
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 6160fe44516..c9ec6af8e74 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h | |||
@@ -175,6 +175,8 @@ asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) | |||
175 | BUILD_TRAP_HANDLER(address_error); | 175 | BUILD_TRAP_HANDLER(address_error); |
176 | BUILD_TRAP_HANDLER(debug); | 176 | BUILD_TRAP_HANDLER(debug); |
177 | BUILD_TRAP_HANDLER(bug); | 177 | BUILD_TRAP_HANDLER(bug); |
178 | BUILD_TRAP_HANDLER(breakpoint); | ||
179 | BUILD_TRAP_HANDLER(singlestep); | ||
178 | BUILD_TRAP_HANDLER(fpu_error); | 180 | BUILD_TRAP_HANDLER(fpu_error); |
179 | BUILD_TRAP_HANDLER(fpu_state_restore); | 181 | BUILD_TRAP_HANDLER(fpu_state_restore); |
180 | 182 | ||
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 95f0085e098..066f0fba590 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | /* sched_domains SD_NODE_INIT for sh machines */ | 6 | /* sched_domains SD_NODE_INIT for sh machines */ |
7 | #define SD_NODE_INIT (struct sched_domain) { \ | 7 | #define SD_NODE_INIT (struct sched_domain) { \ |
8 | .span = CPU_MASK_NONE, \ | ||
9 | .parent = NULL, \ | 8 | .parent = NULL, \ |
10 | .child = NULL, \ | 9 | .child = NULL, \ |
11 | .groups = NULL, \ | 10 | .groups = NULL, \ |
@@ -33,6 +32,7 @@ | |||
33 | #define parent_node(node) ((void)(node),0) | 32 | #define parent_node(node) ((void)(node),0) |
34 | 33 | ||
35 | #define node_to_cpumask(node) ((void)node, cpu_online_map) | 34 | #define node_to_cpumask(node) ((void)node, cpu_online_map) |
35 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | ||
36 | #define node_to_first_cpu(node) ((void)(node),0) | 36 | #define node_to_first_cpu(node) ((void)(node),0) |
37 | 37 | ||
38 | #define pcibus_to_node(bus) ((void)(bus), -1) | 38 | #define pcibus_to_node(bus) ((void)(bus), -1) |
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h new file mode 100644 index 00000000000..d8f89770275 --- /dev/null +++ b/arch/sh/include/asm/unaligned-sh4a.h | |||
@@ -0,0 +1,258 @@ | |||
1 | #ifndef __ASM_SH_UNALIGNED_SH4A_H | ||
2 | #define __ASM_SH_UNALIGNED_SH4A_H | ||
3 | |||
4 | /* | ||
5 | * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. | ||
6 | * Support for 16 and 64-bit accesses are done through shifting and | ||
7 | * masking relative to the endianness. Unaligned stores are not supported | ||
8 | * by the instruction encoding, so these continue to use the packed | ||
9 | * struct. | ||
10 | * | ||
11 | * The same note as with the movli.l/movco.l pair applies here, as long | ||
12 | * as the load is gauranteed to be inlined, nothing else will hook in to | ||
13 | * r0 and we get the return value for free. | ||
14 | * | ||
15 | * NOTE: Due to the fact we require r0 encoding, care should be taken to | ||
16 | * avoid mixing these heavily with other r0 consumers, such as the atomic | ||
17 | * ops. Failure to adhere to this can result in the compiler running out | ||
18 | * of spill registers and blowing up when building at low optimization | ||
19 | * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. | ||
20 | */ | ||
21 | #include <linux/types.h> | ||
22 | #include <asm/byteorder.h> | ||
23 | |||
24 | static __always_inline u32 __get_unaligned_cpu32(const u8 *p) | ||
25 | { | ||
26 | unsigned long unaligned; | ||
27 | |||
28 | __asm__ __volatile__ ( | ||
29 | "movua.l @%1, %0\n\t" | ||
30 | : "=z" (unaligned) | ||
31 | : "r" (p) | ||
32 | ); | ||
33 | |||
34 | return unaligned; | ||
35 | } | ||
36 | |||
37 | struct __una_u16 { u16 x __attribute__((packed)); }; | ||
38 | struct __una_u32 { u32 x __attribute__((packed)); }; | ||
39 | struct __una_u64 { u64 x __attribute__((packed)); }; | ||
40 | |||
41 | static inline u16 __get_unaligned_cpu16(const u8 *p) | ||
42 | { | ||
43 | #ifdef __LITTLE_ENDIAN | ||
44 | return __get_unaligned_cpu32(p) & 0xffff; | ||
45 | #else | ||
46 | return __get_unaligned_cpu32(p) >> 16; | ||
47 | #endif | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Even though movua.l supports auto-increment on the read side, it can | ||
52 | * only store to r0 due to instruction encoding constraints, so just let | ||
53 | * the compiler sort it out on its own. | ||
54 | */ | ||
55 | static inline u64 __get_unaligned_cpu64(const u8 *p) | ||
56 | { | ||
57 | #ifdef __LITTLE_ENDIAN | ||
58 | return (u64)__get_unaligned_cpu32(p + 4) << 32 | | ||
59 | __get_unaligned_cpu32(p); | ||
60 | #else | ||
61 | return (u64)__get_unaligned_cpu32(p) << 32 | | ||
62 | __get_unaligned_cpu32(p + 4); | ||
63 | #endif | ||
64 | } | ||
65 | |||
66 | static inline u16 get_unaligned_le16(const void *p) | ||
67 | { | ||
68 | return le16_to_cpu(__get_unaligned_cpu16(p)); | ||
69 | } | ||
70 | |||
71 | static inline u32 get_unaligned_le32(const void *p) | ||
72 | { | ||
73 | return le32_to_cpu(__get_unaligned_cpu32(p)); | ||
74 | } | ||
75 | |||
76 | static inline u64 get_unaligned_le64(const void *p) | ||
77 | { | ||
78 | return le64_to_cpu(__get_unaligned_cpu64(p)); | ||
79 | } | ||
80 | |||
81 | static inline u16 get_unaligned_be16(const void *p) | ||
82 | { | ||
83 | return be16_to_cpu(__get_unaligned_cpu16(p)); | ||
84 | } | ||
85 | |||
86 | static inline u32 get_unaligned_be32(const void *p) | ||
87 | { | ||
88 | return be32_to_cpu(__get_unaligned_cpu32(p)); | ||
89 | } | ||
90 | |||
91 | static inline u64 get_unaligned_be64(const void *p) | ||
92 | { | ||
93 | return be64_to_cpu(__get_unaligned_cpu64(p)); | ||
94 | } | ||
95 | |||
96 | static inline void __put_le16_noalign(u8 *p, u16 val) | ||
97 | { | ||
98 | *p++ = val; | ||
99 | *p++ = val >> 8; | ||
100 | } | ||
101 | |||
102 | static inline void __put_le32_noalign(u8 *p, u32 val) | ||
103 | { | ||
104 | __put_le16_noalign(p, val); | ||
105 | __put_le16_noalign(p + 2, val >> 16); | ||
106 | } | ||
107 | |||
108 | static inline void __put_le64_noalign(u8 *p, u64 val) | ||
109 | { | ||
110 | __put_le32_noalign(p, val); | ||
111 | __put_le32_noalign(p + 4, val >> 32); | ||
112 | } | ||
113 | |||
114 | static inline void __put_be16_noalign(u8 *p, u16 val) | ||
115 | { | ||
116 | *p++ = val >> 8; | ||
117 | *p++ = val; | ||
118 | } | ||
119 | |||
120 | static inline void __put_be32_noalign(u8 *p, u32 val) | ||
121 | { | ||
122 | __put_be16_noalign(p, val >> 16); | ||
123 | __put_be16_noalign(p + 2, val); | ||
124 | } | ||
125 | |||
126 | static inline void __put_be64_noalign(u8 *p, u64 val) | ||
127 | { | ||
128 | __put_be32_noalign(p, val >> 32); | ||
129 | __put_be32_noalign(p + 4, val); | ||
130 | } | ||
131 | |||
132 | static inline void put_unaligned_le16(u16 val, void *p) | ||
133 | { | ||
134 | #ifdef __LITTLE_ENDIAN | ||
135 | ((struct __una_u16 *)p)->x = val; | ||
136 | #else | ||
137 | __put_le16_noalign(p, val); | ||
138 | #endif | ||
139 | } | ||
140 | |||
141 | static inline void put_unaligned_le32(u32 val, void *p) | ||
142 | { | ||
143 | #ifdef __LITTLE_ENDIAN | ||
144 | ((struct __una_u32 *)p)->x = val; | ||
145 | #else | ||
146 | __put_le32_noalign(p, val); | ||
147 | #endif | ||
148 | } | ||
149 | |||
150 | static inline void put_unaligned_le64(u64 val, void *p) | ||
151 | { | ||
152 | #ifdef __LITTLE_ENDIAN | ||
153 | ((struct __una_u64 *)p)->x = val; | ||
154 | #else | ||
155 | __put_le64_noalign(p, val); | ||
156 | #endif | ||
157 | } | ||
158 | |||
159 | static inline void put_unaligned_be16(u16 val, void *p) | ||
160 | { | ||
161 | #ifdef __BIG_ENDIAN | ||
162 | ((struct __una_u16 *)p)->x = val; | ||
163 | #else | ||
164 | __put_be16_noalign(p, val); | ||
165 | #endif | ||
166 | } | ||
167 | |||
168 | static inline void put_unaligned_be32(u32 val, void *p) | ||
169 | { | ||
170 | #ifdef __BIG_ENDIAN | ||
171 | ((struct __una_u32 *)p)->x = val; | ||
172 | #else | ||
173 | __put_be32_noalign(p, val); | ||
174 | #endif | ||
175 | } | ||
176 | |||
177 | static inline void put_unaligned_be64(u64 val, void *p) | ||
178 | { | ||
179 | #ifdef __BIG_ENDIAN | ||
180 | ((struct __una_u64 *)p)->x = val; | ||
181 | #else | ||
182 | __put_be64_noalign(p, val); | ||
183 | #endif | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Cause a link-time error if we try an unaligned access other than | ||
188 | * 1,2,4 or 8 bytes long | ||
189 | */ | ||
190 | extern void __bad_unaligned_access_size(void); | ||
191 | |||
192 | #define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ | ||
193 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ | ||
194 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ | ||
195 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ | ||
196 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ | ||
197 | __bad_unaligned_access_size())))); \ | ||
198 | })) | ||
199 | |||
200 | #define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ | ||
201 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ | ||
202 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ | ||
203 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ | ||
204 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ | ||
205 | __bad_unaligned_access_size())))); \ | ||
206 | })) | ||
207 | |||
208 | #define __put_unaligned_le(val, ptr) ({ \ | ||
209 | void *__gu_p = (ptr); \ | ||
210 | switch (sizeof(*(ptr))) { \ | ||
211 | case 1: \ | ||
212 | *(u8 *)__gu_p = (__force u8)(val); \ | ||
213 | break; \ | ||
214 | case 2: \ | ||
215 | put_unaligned_le16((__force u16)(val), __gu_p); \ | ||
216 | break; \ | ||
217 | case 4: \ | ||
218 | put_unaligned_le32((__force u32)(val), __gu_p); \ | ||
219 | break; \ | ||
220 | case 8: \ | ||
221 | put_unaligned_le64((__force u64)(val), __gu_p); \ | ||
222 | break; \ | ||
223 | default: \ | ||
224 | __bad_unaligned_access_size(); \ | ||
225 | break; \ | ||
226 | } \ | ||
227 | (void)0; }) | ||
228 | |||
229 | #define __put_unaligned_be(val, ptr) ({ \ | ||
230 | void *__gu_p = (ptr); \ | ||
231 | switch (sizeof(*(ptr))) { \ | ||
232 | case 1: \ | ||
233 | *(u8 *)__gu_p = (__force u8)(val); \ | ||
234 | break; \ | ||
235 | case 2: \ | ||
236 | put_unaligned_be16((__force u16)(val), __gu_p); \ | ||
237 | break; \ | ||
238 | case 4: \ | ||
239 | put_unaligned_be32((__force u32)(val), __gu_p); \ | ||
240 | break; \ | ||
241 | case 8: \ | ||
242 | put_unaligned_be64((__force u64)(val), __gu_p); \ | ||
243 | break; \ | ||
244 | default: \ | ||
245 | __bad_unaligned_access_size(); \ | ||
246 | break; \ | ||
247 | } \ | ||
248 | (void)0; }) | ||
249 | |||
250 | #ifdef __LITTLE_ENDIAN | ||
251 | # define get_unaligned __get_unaligned_le | ||
252 | # define put_unaligned __put_unaligned_le | ||
253 | #else | ||
254 | # define get_unaligned __get_unaligned_be | ||
255 | # define put_unaligned __put_unaligned_be | ||
256 | #endif | ||
257 | |||
258 | #endif /* __ASM_SH_UNALIGNED_SH4A_H */ | ||
diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h index c1641a01d50..8c0ad5e4487 100644 --- a/arch/sh/include/asm/unaligned.h +++ b/arch/sh/include/asm/unaligned.h | |||
@@ -1,7 +1,11 @@ | |||
1 | #ifndef _ASM_SH_UNALIGNED_H | 1 | #ifndef _ASM_SH_UNALIGNED_H |
2 | #define _ASM_SH_UNALIGNED_H | 2 | #define _ASM_SH_UNALIGNED_H |
3 | 3 | ||
4 | /* SH can't handle unaligned accesses. */ | 4 | #ifdef CONFIG_CPU_SH4A |
5 | /* SH-4A can handle unaligned loads in a relatively neutered fashion. */ | ||
6 | #include <asm/unaligned-sh4a.h> | ||
7 | #else | ||
8 | /* Otherwise, SH can't handle unaligned accesses. */ | ||
5 | #ifdef __LITTLE_ENDIAN__ | 9 | #ifdef __LITTLE_ENDIAN__ |
6 | # include <linux/unaligned/le_struct.h> | 10 | # include <linux/unaligned/le_struct.h> |
7 | # include <linux/unaligned/be_byteshift.h> | 11 | # include <linux/unaligned/be_byteshift.h> |
@@ -15,5 +19,6 @@ | |||
15 | # define get_unaligned __get_unaligned_be | 19 | # define get_unaligned __get_unaligned_be |
16 | # define put_unaligned __put_unaligned_be | 20 | # define put_unaligned __put_unaligned_be |
17 | #endif | 21 | #endif |
22 | #endif | ||
18 | 23 | ||
19 | #endif /* _ASM_SH_UNALIGNED_H */ | 24 | #endif /* _ASM_SH_UNALIGNED_H */ |
diff --git a/arch/sh/include/cpu-sh3/cpu/gpio.h b/arch/sh/include/cpu-sh3/cpu/gpio.h index 4e53eb314b8..9a22b882f3d 100644 --- a/arch/sh/include/cpu-sh3/cpu/gpio.h +++ b/arch/sh/include/cpu-sh3/cpu/gpio.h | |||
@@ -62,6 +62,20 @@ | |||
62 | #define PORT_PSELC 0xA4050128UL | 62 | #define PORT_PSELC 0xA4050128UL |
63 | #define PORT_PSELD 0xA405012AUL | 63 | #define PORT_PSELD 0xA405012AUL |
64 | 64 | ||
65 | #elif defined(CONFIG_CPU_SUBTYPE_SH7709) | ||
66 | |||
67 | /* Control registers */ | ||
68 | #define PORT_PACR 0xa4000100UL | ||
69 | #define PORT_PBCR 0xa4000102UL | ||
70 | #define PORT_PCCR 0xa4000104UL | ||
71 | #define PORT_PFCR 0xa400010aUL | ||
72 | |||
73 | /* Data registers */ | ||
74 | #define PORT_PADR 0xa4000120UL | ||
75 | #define PORT_PBDR 0xa4000122UL | ||
76 | #define PORT_PCDR 0xa4000124UL | ||
77 | #define PORT_PFDR 0xa400012aUL | ||
78 | |||
65 | #endif | 79 | #endif |
66 | 80 | ||
67 | #endif | 81 | #endif |
diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h index 5bdc9d9be3d..efc43b32346 100644 --- a/arch/sh/include/mach-common/mach/edosk7705.h +++ b/arch/sh/include/mach-common/mach/edosk7705.h | |||
@@ -1,30 +1,7 @@ | |||
1 | /* | 1 | #ifndef __ASM_SH_EDOSK7705_H |
2 | * include/asm-sh/edosk7705.h | 2 | #define __ASM_SH_EDOSK7705_H |
3 | * | ||
4 | * Modified version of io_se.h for the EDOSK7705 specific functions. | ||
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * IO functions for an Hitachi EDOSK7705 development board | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASM_SH_EDOSK7705_IO_H | ||
13 | #define __ASM_SH_EDOSK7705_IO_H | ||
14 | 3 | ||
4 | #define __IO_PREFIX sh_edosk7705 | ||
15 | #include <asm/io_generic.h> | 5 | #include <asm/io_generic.h> |
16 | 6 | ||
17 | extern unsigned char sh_edosk7705_inb(unsigned long port); | 7 | #endif /* __ASM_SH_EDOSK7705_H */ |
18 | extern unsigned int sh_edosk7705_inl(unsigned long port); | ||
19 | |||
20 | extern void sh_edosk7705_outb(unsigned char value, unsigned long port); | ||
21 | extern void sh_edosk7705_outl(unsigned int value, unsigned long port); | ||
22 | |||
23 | extern void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count); | ||
24 | extern void sh_edosk7705_insl(unsigned long port, void *addr, unsigned long count); | ||
25 | extern void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count); | ||
26 | extern void sh_edosk7705_outsl(unsigned long port, const void *addr, unsigned long count); | ||
27 | |||
28 | extern unsigned long sh_edosk7705_isa_port2addr(unsigned long offset); | ||
29 | |||
30 | #endif /* __ASM_SH_EDOSK7705_IO_H */ | ||
diff --git a/arch/sh/include/mach-se/mach/mrshpc.h b/arch/sh/include/mach-se/mach/mrshpc.h new file mode 100644 index 00000000000..56287ee8563 --- /dev/null +++ b/arch/sh/include/mach-se/mach/mrshpc.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef __MACH_SE_MRSHPC_H | ||
2 | #define __MACH_SE_MRSHPC_H | ||
3 | |||
4 | #include <linux/io.h> | ||
5 | |||
6 | static inline void __init mrshpc_setup_windows(void) | ||
7 | { | ||
8 | if ((__raw_readw(MRSHPC_CSR) & 0x000c) != 0) | ||
9 | return; /* Not detected */ | ||
10 | |||
11 | if ((__raw_readw(MRSHPC_CSR) & 0x0080) == 0) { | ||
12 | __raw_writew(0x0674, MRSHPC_CPWCR); /* Card Vcc is 3.3v? */ | ||
13 | } else { | ||
14 | __raw_writew(0x0678, MRSHPC_CPWCR); /* Card Vcc is 5V */ | ||
15 | } | ||
16 | |||
17 | /* | ||
18 | * PC-Card window open | ||
19 | * flag == COMMON/ATTRIBUTE/IO | ||
20 | */ | ||
21 | /* common window open */ | ||
22 | __raw_writew(0x8a84, MRSHPC_MW0CR1); | ||
23 | if((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) | ||
24 | /* common mode & bus width 16bit SWAP = 1*/ | ||
25 | __raw_writew(0x0b00, MRSHPC_MW0CR2); | ||
26 | else | ||
27 | /* common mode & bus width 16bit SWAP = 0*/ | ||
28 | __raw_writew(0x0300, MRSHPC_MW0CR2); | ||
29 | |||
30 | /* attribute window open */ | ||
31 | __raw_writew(0x8a85, MRSHPC_MW1CR1); | ||
32 | if ((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) | ||
33 | /* attribute mode & bus width 16bit SWAP = 1*/ | ||
34 | __raw_writew(0x0a00, MRSHPC_MW1CR2); | ||
35 | else | ||
36 | /* attribute mode & bus width 16bit SWAP = 0*/ | ||
37 | __raw_writew(0x0200, MRSHPC_MW1CR2); | ||
38 | |||
39 | /* I/O window open */ | ||
40 | __raw_writew(0x8a86, MRSHPC_IOWCR1); | ||
41 | __raw_writew(0x0008, MRSHPC_CDCR); /* I/O card mode */ | ||
42 | if ((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) | ||
43 | __raw_writew(0x0a00, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 1*/ | ||
44 | else | ||
45 | __raw_writew(0x0200, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 0*/ | ||
46 | |||
47 | __raw_writew(0x2000, MRSHPC_ICR); | ||
48 | __raw_writeb(0x00, PA_MRSHPC_MW2 + 0x206); | ||
49 | __raw_writeb(0x42, PA_MRSHPC_MW2 + 0x200); | ||
50 | } | ||
51 | |||
52 | #endif /* __MACH_SE_MRSHPC_H */ | ||
diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h index eb23000e1bb..14be91c5a2f 100644 --- a/arch/sh/include/mach-se/mach/se.h +++ b/arch/sh/include/mach-se/mach/se.h | |||
@@ -68,6 +68,24 @@ | |||
68 | #define BCR_ILCRF (PA_BCR + 10) | 68 | #define BCR_ILCRF (PA_BCR + 10) |
69 | #define BCR_ILCRG (PA_BCR + 12) | 69 | #define BCR_ILCRG (PA_BCR + 12) |
70 | 70 | ||
71 | #if defined(CONFIG_CPU_SUBTYPE_SH7709) | ||
72 | #define INTC_IRR0 0xa4000004UL | ||
73 | #define INTC_IRR1 0xa4000006UL | ||
74 | #define INTC_IRR2 0xa4000008UL | ||
75 | |||
76 | #define INTC_ICR0 0xfffffee0UL | ||
77 | #define INTC_ICR1 0xa4000010UL | ||
78 | #define INTC_ICR2 0xa4000012UL | ||
79 | #define INTC_INTER 0xa4000014UL | ||
80 | |||
81 | #define INTC_IPRC 0xa4000016UL | ||
82 | #define INTC_IPRD 0xa4000018UL | ||
83 | #define INTC_IPRE 0xa400001aUL | ||
84 | |||
85 | #define IRQ0_IRQ 32 | ||
86 | #define IRQ1_IRQ 33 | ||
87 | #endif | ||
88 | |||
71 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) | 89 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) |
72 | #define IRQ_STNIC 12 | 90 | #define IRQ_STNIC 12 |
73 | #define IRQ_CFCARD 14 | 91 | #define IRQ_CFCARD 14 |
diff --git a/arch/sh/include/mach-se/mach/se7343.h b/arch/sh/include/mach-se/mach/se7343.h index 98458460e63..749914b400f 100644 --- a/arch/sh/include/mach-se/mach/se7343.h +++ b/arch/sh/include/mach-se/mach/se7343.h | |||
@@ -118,9 +118,6 @@ | |||
118 | #define FPGA_IN 0xb1400000 | 118 | #define FPGA_IN 0xb1400000 |
119 | #define FPGA_OUT 0xb1400002 | 119 | #define FPGA_OUT 0xb1400002 |
120 | 120 | ||
121 | #define __IO_PREFIX sh7343se | ||
122 | #include <asm/io_generic.h> | ||
123 | |||
124 | #define IRQ0_IRQ 32 | 121 | #define IRQ0_IRQ 32 |
125 | #define IRQ1_IRQ 33 | 122 | #define IRQ1_IRQ 33 |
126 | #define IRQ4_IRQ 36 | 123 | #define IRQ4_IRQ 36 |
@@ -132,8 +129,10 @@ | |||
132 | #define SE7343_FPGA_IRQ_MRSHPC3 3 | 129 | #define SE7343_FPGA_IRQ_MRSHPC3 3 |
133 | #define SE7343_FPGA_IRQ_SMC 6 /* EXT_IRQ2 */ | 130 | #define SE7343_FPGA_IRQ_SMC 6 /* EXT_IRQ2 */ |
134 | #define SE7343_FPGA_IRQ_USB 8 | 131 | #define SE7343_FPGA_IRQ_USB 8 |
132 | #define SE7343_FPGA_IRQ_UARTA 10 | ||
133 | #define SE7343_FPGA_IRQ_UARTB 11 | ||
135 | 134 | ||
136 | #define SE7343_FPGA_IRQ_NR 11 | 135 | #define SE7343_FPGA_IRQ_NR 12 |
137 | #define SE7343_FPGA_IRQ_BASE 120 | 136 | #define SE7343_FPGA_IRQ_BASE 120 |
138 | 137 | ||
139 | #define MRSHPC_IRQ3 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC3) | 138 | #define MRSHPC_IRQ3 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC3) |
@@ -142,6 +141,8 @@ | |||
142 | #define MRSHPC_IRQ0 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC0) | 141 | #define MRSHPC_IRQ0 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC0) |
143 | #define SMC_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_SMC) | 142 | #define SMC_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_SMC) |
144 | #define USB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_USB) | 143 | #define USB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_USB) |
144 | #define UARTA_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTA) | ||
145 | #define UARTB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTB) | ||
145 | 146 | ||
146 | /* arch/sh/boards/se/7343/irq.c */ | 147 | /* arch/sh/boards/se/7343/irq.c */ |
147 | void init_7343se_IRQ(void); | 148 | void init_7343se_IRQ(void); |