aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r--arch/sh/include/asm/.gitignore1
-rw-r--r--arch/sh/include/asm/Kbuild4
-rw-r--r--arch/sh/include/asm/addrspace.h15
-rw-r--r--arch/sh/include/asm/alignment.h21
-rw-r--r--arch/sh/include/asm/asm-offsets.h1
-rw-r--r--arch/sh/include/asm/atomic-grb.h46
-rw-r--r--arch/sh/include/asm/atomic-llsc.h27
-rw-r--r--arch/sh/include/asm/atomic.h82
-rw-r--r--arch/sh/include/asm/bitops.h4
-rw-r--r--arch/sh/include/asm/bugs.h4
-rw-r--r--arch/sh/include/asm/cacheflush.h13
-rw-r--r--arch/sh/include/asm/clock.h11
-rw-r--r--arch/sh/include/asm/cmpxchg-grb.h7
-rw-r--r--arch/sh/include/asm/dma-mapping.h233
-rw-r--r--arch/sh/include/asm/dma-register.h51
-rw-r--r--arch/sh/include/asm/dma-sh.h63
-rw-r--r--arch/sh/include/asm/dma.h6
-rw-r--r--arch/sh/include/asm/dmaengine.h93
-rw-r--r--arch/sh/include/asm/dwarf.h27
-rw-r--r--arch/sh/include/asm/elf.h7
-rw-r--r--arch/sh/include/asm/fixmap.h27
-rw-r--r--arch/sh/include/asm/fpu.h55
-rw-r--r--arch/sh/include/asm/ftrace.h17
-rw-r--r--arch/sh/include/asm/gpio.h82
-rw-r--r--arch/sh/include/asm/hardirq.h13
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h67
-rw-r--r--arch/sh/include/asm/io.h181
-rw-r--r--arch/sh/include/asm/irqflags.h31
-rw-r--r--arch/sh/include/asm/irqflags_32.h99
-rw-r--r--arch/sh/include/asm/irqflags_64.h85
-rw-r--r--arch/sh/include/asm/kdebug.h2
-rw-r--r--arch/sh/include/asm/machvec.h2
-rw-r--r--arch/sh/include/asm/mmu.h82
-rw-r--r--arch/sh/include/asm/mmu_context.h6
-rw-r--r--arch/sh/include/asm/mmu_context_32.h4
-rw-r--r--arch/sh/include/asm/module.h17
-rw-r--r--arch/sh/include/asm/page.h19
-rw-r--r--arch/sh/include/asm/pci.h94
-rw-r--r--arch/sh/include/asm/perf_event.h31
-rw-r--r--arch/sh/include/asm/pgalloc.h32
-rw-r--r--arch/sh/include/asm/pgtable-2level.h23
-rw-r--r--arch/sh/include/asm/pgtable-3level.h56
-rw-r--r--arch/sh/include/asm/pgtable.h55
-rw-r--r--arch/sh/include/asm/pgtable_32.h11
-rw-r--r--arch/sh/include/asm/pgtable_64.h26
-rw-r--r--arch/sh/include/asm/processor.h21
-rw-r--r--arch/sh/include/asm/processor_32.h38
-rw-r--r--arch/sh/include/asm/processor_64.h23
-rw-r--r--arch/sh/include/asm/ptrace.h22
-rw-r--r--arch/sh/include/asm/reboot.h21
-rw-r--r--arch/sh/include/asm/scatterlist.h2
-rw-r--r--arch/sh/include/asm/setup.h1
-rw-r--r--arch/sh/include/asm/sh_bios.h15
-rw-r--r--arch/sh/include/asm/sh_eth.h1
-rw-r--r--arch/sh/include/asm/sh_keysc.h14
-rw-r--r--arch/sh/include/asm/siu.h26
-rw-r--r--arch/sh/include/asm/spinlock.h58
-rw-r--r--arch/sh/include/asm/spinlock_types.h8
-rw-r--r--arch/sh/include/asm/suspend.h66
-rw-r--r--arch/sh/include/asm/syscall.h2
-rw-r--r--arch/sh/include/asm/syscalls.h5
-rw-r--r--arch/sh/include/asm/system.h13
-rw-r--r--arch/sh/include/asm/system_32.h44
-rw-r--r--arch/sh/include/asm/system_64.h36
-rw-r--r--arch/sh/include/asm/thread_info.h40
-rw-r--r--arch/sh/include/asm/timex.h10
-rw-r--r--arch/sh/include/asm/tlb.h17
-rw-r--r--arch/sh/include/asm/topology.h10
-rw-r--r--arch/sh/include/asm/ubc.h75
-rw-r--r--arch/sh/include/asm/uncached.h18
-rw-r--r--arch/sh/include/asm/unistd_32.h4
-rw-r--r--arch/sh/include/asm/unistd_64.h6
-rw-r--r--arch/sh/include/asm/vmlinux.lds.h8
-rw-r--r--arch/sh/include/asm/watchdog.h67
74 files changed, 1400 insertions, 1104 deletions
diff --git a/arch/sh/include/asm/.gitignore b/arch/sh/include/asm/.gitignore
deleted file mode 100644
index 378db779fb6c..000000000000
--- a/arch/sh/include/asm/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
1machtypes.h
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index e121c30f797d..46cb93477bcb 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,6 +1,8 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += cachectl.h cpu-features.h 3header-y += cachectl.h
4header-y += cpu-features.h
5header-y += hw_breakpoint.h
4 6
5unifdef-y += unistd_32.h 7unifdef-y += unistd_32.h
6unifdef-y += unistd_64.h 8unifdef-y += unistd_64.h
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 80d40813e057..446b3831c214 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -28,10 +28,7 @@
28/* Returns the privileged segment base of a given address */ 28/* Returns the privileged segment base of a given address */
29#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) 29#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
30 30
31/* Returns the physical address of a PnSEG (n=1,2) address */ 31#ifdef CONFIG_29BIT
32#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
33
34#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
35/* 32/*
36 * Map an address to a certain privileged segment 33 * Map an address to a certain privileged segment
37 */ 34 */
@@ -43,7 +40,15 @@
43 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) 40 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
44#define P4SEGADDR(a) \ 41#define P4SEGADDR(a) \
45 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) 42 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
46#endif /* 29BIT || PMB_FIXED */ 43#else
44/*
45 * These will never work in 32-bit, don't even bother.
46 */
47#define P1SEGADDR(a) __futile_remapping_attempt
48#define P2SEGADDR(a) __futile_remapping_attempt
49#define P3SEGADDR(a) __futile_remapping_attempt
50#define P4SEGADDR(a) __futile_remapping_attempt
51#endif
47#endif /* P1SEG */ 52#endif /* P1SEG */
48 53
49/* Check if an address can be reached in 29 bits */ 54/* Check if an address can be reached in 29 bits */
diff --git a/arch/sh/include/asm/alignment.h b/arch/sh/include/asm/alignment.h
new file mode 100644
index 000000000000..b12efecf5294
--- /dev/null
+++ b/arch/sh/include/asm/alignment.h
@@ -0,0 +1,21 @@
1#ifndef __ASM_SH_ALIGNMENT_H
2#define __ASM_SH_ALIGNMENT_H
3
4#include <linux/types.h>
5
6extern void inc_unaligned_byte_access(void);
7extern void inc_unaligned_word_access(void);
8extern void inc_unaligned_dword_access(void);
9extern void inc_unaligned_multi_access(void);
10extern void inc_unaligned_user_access(void);
11extern void inc_unaligned_kernel_access(void);
12
13#define UM_WARN (1 << 0)
14#define UM_FIXUP (1 << 1)
15#define UM_SIGNAL (1 << 2)
16
17extern unsigned int unaligned_user_action(void);
18
19extern void unaligned_fixups_notify(struct task_struct *, insn_size_t, struct pt_regs *);
20
21#endif /* __ASM_SH_ALIGNMENT_H */
diff --git a/arch/sh/include/asm/asm-offsets.h b/arch/sh/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/sh/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 4c5b7dbfcedb..a273c88578fc 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -120,50 +120,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
120 : "memory" , "r0", "r1"); 120 : "memory" , "r0", "r1");
121} 121}
122 122
123static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
124{
125 int ret;
126
127 __asm__ __volatile__ (
128 " .align 2 \n\t"
129 " mova 1f, r0 \n\t"
130 " nop \n\t"
131 " mov r15, r1 \n\t"
132 " mov #-8, r15 \n\t"
133 " mov.l @%1, %0 \n\t"
134 " cmp/eq %2, %0 \n\t"
135 " bf 1f \n\t"
136 " mov.l %3, @%1 \n\t"
137 "1: mov r1, r15 \n\t"
138 : "=&r" (ret)
139 : "r" (v), "r" (old), "r" (new)
140 : "memory" , "r0", "r1" , "t");
141
142 return ret;
143}
144
145static inline int atomic_add_unless(atomic_t *v, int a, int u)
146{
147 int ret;
148 unsigned long tmp;
149
150 __asm__ __volatile__ (
151 " .align 2 \n\t"
152 " mova 1f, r0 \n\t"
153 " nop \n\t"
154 " mov r15, r1 \n\t"
155 " mov #-12, r15 \n\t"
156 " mov.l @%2, %1 \n\t"
157 " mov %1, %0 \n\t"
158 " cmp/eq %4, %0 \n\t"
159 " bt/s 1f \n\t"
160 " add %3, %1 \n\t"
161 " mov.l %1, @%2 \n\t"
162 "1: mov r1, r15 \n\t"
163 : "=&r" (ret), "=&r" (tmp)
164 : "r" (v), "r" (a), "r" (u)
165 : "memory" , "r0", "r1" , "t");
166
167 return ret != u;
168}
169#endif /* __ASM_SH_ATOMIC_GRB_H */ 123#endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index b040e1e08610..4b00b78e3f4f 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -104,31 +104,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
104 : "t"); 104 : "t");
105} 105}
106 106
107#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
108
109/**
110 * atomic_add_unless - add unless the number is a given value
111 * @v: pointer of type atomic_t
112 * @a: the amount to add to v...
113 * @u: ...unless v is equal to u.
114 *
115 * Atomically adds @a to @v, so long as it was not @u.
116 * Returns non-zero if @v was not @u, and zero otherwise.
117 */
118static inline int atomic_add_unless(atomic_t *v, int a, int u)
119{
120 int c, old;
121 c = atomic_read(v);
122 for (;;) {
123 if (unlikely(c == (u)))
124 break;
125 old = atomic_cmpxchg((v), c, c + (a));
126 if (likely(old == c))
127 break;
128 c = old;
129 }
130
131 return c != (u);
132}
133
134#endif /* __ASM_SH_ATOMIC_LLSC_H */ 107#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index e8e78137c6f5..275a448ae8c2 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -25,64 +25,48 @@
25#endif 25#endif
26 26
27#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 27#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
28#define atomic_dec_return(v) atomic_sub_return(1, (v))
29#define atomic_inc_return(v) atomic_add_return(1, (v))
30#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
31#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
32#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
33#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
28 34
29#define atomic_dec_return(v) atomic_sub_return(1,(v)) 35#define atomic_inc(v) atomic_add(1, (v))
30#define atomic_inc_return(v) atomic_add_return(1,(v)) 36#define atomic_dec(v) atomic_sub(1, (v))
31 37
32/* 38#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
33 * atomic_inc_and_test - increment and test 39#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
40
41/**
42 * atomic_add_unless - add unless the number is a given value
34 * @v: pointer of type atomic_t 43 * @v: pointer of type atomic_t
44 * @a: the amount to add to v...
45 * @u: ...unless v is equal to u.
35 * 46 *
36 * Atomically increments @v by 1 47 * Atomically adds @a to @v, so long as it was not @u.
37 * and returns true if the result is zero, or false for all 48 * Returns non-zero if @v was not @u, and zero otherwise.
38 * other cases.
39 */ 49 */
40#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
41
42#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
43#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
44
45#define atomic_inc(v) atomic_add(1,(v))
46#define atomic_dec(v) atomic_sub(1,(v))
47
48#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
49static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
50{
51 int ret;
52 unsigned long flags;
53
54 local_irq_save(flags);
55 ret = v->counter;
56 if (likely(ret == old))
57 v->counter = new;
58 local_irq_restore(flags);
59
60 return ret;
61}
62
63static inline int atomic_add_unless(atomic_t *v, int a, int u) 50static inline int atomic_add_unless(atomic_t *v, int a, int u)
64{ 51{
65 int ret; 52 int c, old;
66 unsigned long flags; 53 c = atomic_read(v);
67 54 for (;;) {
68 local_irq_save(flags); 55 if (unlikely(c == (u)))
69 ret = v->counter; 56 break;
70 if (ret != u) 57 old = atomic_cmpxchg((v), c, c + (a));
71 v->counter += a; 58 if (likely(old == c))
72 local_irq_restore(flags); 59 break;
73 60 c = old;
74 return ret != u; 61 }
62
63 return c != (u);
75} 64}
76#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
77
78#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
80 65
81/* Atomic operations are already serializing on SH */ 66#define smp_mb__before_atomic_dec() smp_mb()
82#define smp_mb__before_atomic_dec() barrier() 67#define smp_mb__after_atomic_dec() smp_mb()
83#define smp_mb__after_atomic_dec() barrier() 68#define smp_mb__before_atomic_inc() smp_mb()
84#define smp_mb__before_atomic_inc() barrier() 69#define smp_mb__after_atomic_inc() smp_mb()
85#define smp_mb__after_atomic_inc() barrier()
86 70
87#include <asm-generic/atomic-long.h> 71#include <asm-generic/atomic-long.h>
88#include <asm-generic/atomic64.h> 72#include <asm-generic/atomic64.h>
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index ebe595b7ab1f..98511e4d28cb 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -26,8 +26,8 @@
26/* 26/*
27 * clear_bit() doesn't provide any barrier for the compiler. 27 * clear_bit() doesn't provide any barrier for the compiler.
28 */ 28 */
29#define smp_mb__before_clear_bit() barrier() 29#define smp_mb__before_clear_bit() smp_mb()
30#define smp_mb__after_clear_bit() barrier() 30#define smp_mb__after_clear_bit() smp_mb()
31 31
32#ifdef CONFIG_SUPERH32 32#ifdef CONFIG_SUPERH32
33static inline unsigned long ffz(unsigned long word) 33static inline unsigned long ffz(unsigned long word)
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h
index 46260fcbdf4b..02a19a1c033a 100644
--- a/arch/sh/include/asm/bugs.h
+++ b/arch/sh/include/asm/bugs.h
@@ -14,11 +14,15 @@
14 14
15#include <asm/processor.h> 15#include <asm/processor.h>
16 16
17extern void select_idle_routine(void);
18
17static void __init check_bugs(void) 19static void __init check_bugs(void)
18{ 20{
19 extern unsigned long loops_per_jiffy; 21 extern unsigned long loops_per_jiffy;
20 char *p = &init_utsname()->machine[2]; /* "sh" */ 22 char *p = &init_utsname()->machine[2]; /* "sh" */
21 23
24 select_idle_routine();
25
22 current_cpu_data.loops_per_jiffy = loops_per_jiffy; 26 current_cpu_data.loops_per_jiffy = loops_per_jiffy;
23 27
24 switch (current_cpu_data.family) { 28 switch (current_cpu_data.family) {
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index c29918f3c819..1f4e562c5e8c 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -42,6 +42,7 @@ extern void flush_cache_page(struct vm_area_struct *vma,
42 unsigned long addr, unsigned long pfn); 42 unsigned long addr, unsigned long pfn);
43extern void flush_cache_range(struct vm_area_struct *vma, 43extern void flush_cache_range(struct vm_area_struct *vma,
44 unsigned long start, unsigned long end); 44 unsigned long start, unsigned long end);
45#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
45extern void flush_dcache_page(struct page *page); 46extern void flush_dcache_page(struct page *page);
46extern void flush_icache_range(unsigned long start, unsigned long end); 47extern void flush_icache_range(unsigned long start, unsigned long end);
47extern void flush_icache_page(struct vm_area_struct *vma, 48extern void flush_icache_page(struct vm_area_struct *vma,
@@ -62,6 +63,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
62 if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) 63 if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
63 __flush_anon_page(page, vmaddr); 64 __flush_anon_page(page, vmaddr);
64} 65}
66static inline void flush_kernel_vmap_range(void *addr, int size)
67{
68 __flush_wback_region(addr, size);
69}
70static inline void invalidate_kernel_vmap_range(void *addr, int size)
71{
72 __flush_invalidate_region(addr, size);
73}
65 74
66#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 75#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
67static inline void flush_kernel_dcache_page(struct page *page) 76static inline void flush_kernel_dcache_page(struct page *page)
@@ -77,8 +86,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
77 struct page *page, unsigned long vaddr, void *dst, const void *src, 86 struct page *page, unsigned long vaddr, void *dst, const void *src,
78 unsigned long len); 87 unsigned long len);
79 88
80#define flush_cache_vmap(start, end) flush_cache_all() 89#define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
81#define flush_cache_vunmap(start, end) flush_cache_all() 90#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
82 91
83#define flush_dcache_mmap_lock(mapping) do { } while (0) 92#define flush_dcache_mmap_lock(mapping) do { } while (0)
84#define flush_dcache_mmap_unlock(mapping) do { } while (0) 93#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h
index 9fe7d7f8af40..11da4c5beb68 100644
--- a/arch/sh/include/asm/clock.h
+++ b/arch/sh/include/asm/clock.h
@@ -146,8 +146,17 @@ int sh_clk_mstp32_register(struct clk *clks, int nr);
146 .flags = _flags, \ 146 .flags = _flags, \
147} 147}
148 148
149struct clk_div4_table {
150 struct clk_div_mult_table *div_mult_table;
151 void (*kick)(struct clk *clk);
152};
153
149int sh_clk_div4_register(struct clk *clks, int nr, 154int sh_clk_div4_register(struct clk *clks, int nr,
150 struct clk_div_mult_table *table); 155 struct clk_div4_table *table);
156int sh_clk_div4_enable_register(struct clk *clks, int nr,
157 struct clk_div4_table *table);
158int sh_clk_div4_reparent_register(struct clk *clks, int nr,
159 struct clk_div4_table *table);
151 160
152#define SH_CLK_DIV6(_name, _parent, _reg, _flags) \ 161#define SH_CLK_DIV6(_name, _parent, _reg, _flags) \
153{ \ 162{ \
diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h
index e2681abe764f..4676bf57693a 100644
--- a/arch/sh/include/asm/cmpxchg-grb.h
+++ b/arch/sh/include/asm/cmpxchg-grb.h
@@ -57,11 +57,10 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
57 " mov.l @%1, %0 \n\t" /* load old value */ 57 " mov.l @%1, %0 \n\t" /* load old value */
58 " cmp/eq %0, %2 \n\t" 58 " cmp/eq %0, %2 \n\t"
59 " bf 1f \n\t" /* if not equal */ 59 " bf 1f \n\t" /* if not equal */
60 " mov.l %2, @%1 \n\t" /* store new value */ 60 " mov.l %3, @%1 \n\t" /* store new value */
61 "1: mov r1, r15 \n\t" /* LOGOUT */ 61 "1: mov r1, r15 \n\t" /* LOGOUT */
62 : "=&r" (retval), 62 : "=&r" (retval)
63 "+r" (m) 63 : "r" (m), "r" (old), "r" (new)
64 : "r" (new)
65 : "memory" , "r0", "r1", "t"); 64 : "memory" , "r0", "r1", "t");
66 65
67 return retval; 66 return retval;
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 69d56dd4c968..bea3337a426a 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -1,219 +1,106 @@
1#ifndef __ASM_SH_DMA_MAPPING_H 1#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4extern struct dma_map_ops *dma_ops;
5#include <linux/scatterlist.h> 5extern void no_iommu_init(void);
6#include <linux/dma-debug.h> 6
7#include <asm/cacheflush.h> 7static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8#include <asm/io.h> 8{
9 return dma_ops;
10}
11
9#include <asm-generic/dma-coherent.h> 12#include <asm-generic/dma-coherent.h>
13#include <asm-generic/dma-mapping-common.h>
14
15static inline int dma_supported(struct device *dev, u64 mask)
16{
17 struct dma_map_ops *ops = get_dma_ops(dev);
10 18
11extern struct bus_type pci_bus_type; 19 if (ops->dma_supported)
20 return ops->dma_supported(dev, mask);
12 21
13#define dma_supported(dev, mask) (1) 22 return 1;
23}
14 24
15static inline int dma_set_mask(struct device *dev, u64 mask) 25static inline int dma_set_mask(struct device *dev, u64 mask)
16{ 26{
27 struct dma_map_ops *ops = get_dma_ops(dev);
28
17 if (!dev->dma_mask || !dma_supported(dev, mask)) 29 if (!dev->dma_mask || !dma_supported(dev, mask))
18 return -EIO; 30 return -EIO;
31 if (ops->set_dma_mask)
32 return ops->set_dma_mask(dev, mask);
19 33
20 *dev->dma_mask = mask; 34 *dev->dma_mask = mask;
21 35
22 return 0; 36 return 0;
23} 37}
24 38
25void *dma_alloc_coherent(struct device *dev, size_t size,
26 dma_addr_t *dma_handle, gfp_t flag);
27
28void dma_free_coherent(struct device *dev, size_t size,
29 void *vaddr, dma_addr_t dma_handle);
30
31void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 39void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
32 enum dma_data_direction dir); 40 enum dma_data_direction dir);
33 41
34#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 42#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
35#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 43#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
36#define dma_is_consistent(d, h) (1)
37
38static inline dma_addr_t dma_map_single(struct device *dev,
39 void *ptr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
44#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
45 if (dev->bus == &pci_bus_type)
46 return addr;
47#endif
48 dma_cache_sync(dev, ptr, size, dir);
49 44
50 debug_dma_map_page(dev, virt_to_page(ptr), 45#ifdef CONFIG_DMA_COHERENT
51 (unsigned long)ptr & ~PAGE_MASK, size, 46#define dma_is_consistent(d, h) (1)
52 dir, addr, true); 47#else
53 48#define dma_is_consistent(d, h) (0)
54 return addr;
55}
56
57static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 size_t size, enum dma_data_direction dir)
59{
60 debug_dma_unmap_page(dev, addr, size, dir, true);
61}
62
63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
64 int nents, enum dma_data_direction dir)
65{
66 int i;
67
68 for (i = 0; i < nents; i++) {
69#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
70 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
71#endif
72 sg[i].dma_address = sg_phys(&sg[i]);
73 sg[i].dma_length = sg[i].length;
74 }
75
76 debug_dma_map_sg(dev, sg, nents, i, dir);
77
78 return nents;
79}
80
81static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84 debug_dma_unmap_sg(dev, sg, nents, dir);
85}
86
87static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
88 unsigned long offset, size_t size,
89 enum dma_data_direction dir)
90{
91 return dma_map_single(dev, page_address(page) + offset, size, dir);
92}
93
94static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
95 size_t size, enum dma_data_direction dir)
96{
97 dma_unmap_single(dev, dma_address, size, dir);
98}
99
100static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
101 size_t size, enum dma_data_direction dir)
102{
103#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
104 if (dev->bus == &pci_bus_type)
105 return;
106#endif 49#endif
107 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
108}
109 50
110static inline void dma_sync_single_range(struct device *dev, 51static inline int dma_get_cache_alignment(void)
111 dma_addr_t dma_handle,
112 unsigned long offset, size_t size,
113 enum dma_data_direction dir)
114{ 52{
115#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 53 /*
116 if (dev->bus == &pci_bus_type) 54 * Each processor family will define its own L1_CACHE_SHIFT,
117 return; 55 * L1_CACHE_BYTES wraps to this, so this is always safe.
118#endif 56 */
119 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); 57 return L1_CACHE_BYTES;
120} 58}
121 59
122static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, 60static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
123 int nelems, enum dma_data_direction dir)
124{ 61{
125 int i; 62 struct dma_map_ops *ops = get_dma_ops(dev);
126 63
127 for (i = 0; i < nelems; i++) { 64 if (ops->mapping_error)
128#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 65 return ops->mapping_error(dev, dma_addr);
129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
130#endif
131 sg[i].dma_address = sg_phys(&sg[i]);
132 sg[i].dma_length = sg[i].length;
133 }
134}
135 66
136static inline void dma_sync_single_for_cpu(struct device *dev, 67 return dma_addr == 0;
137 dma_addr_t dma_handle, size_t size,
138 enum dma_data_direction dir)
139{
140 __dma_sync_single(dev, dma_handle, size, dir);
141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
142}
143
144static inline void dma_sync_single_for_device(struct device *dev,
145 dma_addr_t dma_handle,
146 size_t size,
147 enum dma_data_direction dir)
148{
149 __dma_sync_single(dev, dma_handle, size, dir);
150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
151} 68}
152 69
153static inline void dma_sync_single_range_for_cpu(struct device *dev, 70static inline void *dma_alloc_coherent(struct device *dev, size_t size,
154 dma_addr_t dma_handle, 71 dma_addr_t *dma_handle, gfp_t gfp)
155 unsigned long offset,
156 size_t size,
157 enum dma_data_direction direction)
158{ 72{
159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); 73 struct dma_map_ops *ops = get_dma_ops(dev);
160 debug_dma_sync_single_range_for_cpu(dev, dma_handle, 74 void *memory;
161 offset, size, direction);
162}
163 75
164static inline void dma_sync_single_range_for_device(struct device *dev, 76 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
165 dma_addr_t dma_handle, 77 return memory;
166 unsigned long offset, 78 if (!ops->alloc_coherent)
167 size_t size, 79 return NULL;
168 enum dma_data_direction direction)
169{
170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
171 debug_dma_sync_single_range_for_device(dev, dma_handle,
172 offset, size, direction);
173}
174 80
81 memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
82 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
175 83
176static inline void dma_sync_sg_for_cpu(struct device *dev, 84 return memory;
177 struct scatterlist *sg, int nelems,
178 enum dma_data_direction dir)
179{
180 __dma_sync_sg(dev, sg, nelems, dir);
181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
182} 85}
183 86
184static inline void dma_sync_sg_for_device(struct device *dev, 87static inline void dma_free_coherent(struct device *dev, size_t size,
185 struct scatterlist *sg, int nelems, 88 void *vaddr, dma_addr_t dma_handle)
186 enum dma_data_direction dir)
187{ 89{
188 __dma_sync_sg(dev, sg, nelems, dir); 90 struct dma_map_ops *ops = get_dma_ops(dev);
189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
190}
191 91
192static inline int dma_get_cache_alignment(void) 92 if (dma_release_from_coherent(dev, get_order(size), vaddr))
193{ 93 return;
194 /*
195 * Each processor family will define its own L1_CACHE_SHIFT,
196 * L1_CACHE_BYTES wraps to this, so this is always safe.
197 */
198 return L1_CACHE_BYTES;
199}
200 94
201static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 95 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
202{ 96 if (ops->free_coherent)
203 return dma_addr == 0; 97 ops->free_coherent(dev, size, vaddr, dma_handle);
204} 98}
205 99
206#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 100/* arch/sh/mm/consistent.c */
207 101extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
208extern int 102 dma_addr_t *dma_addr, gfp_t flag);
209dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 103extern void dma_generic_free_coherent(struct device *dev, size_t size,
210 dma_addr_t device_addr, size_t size, int flags); 104 void *vaddr, dma_addr_t dma_handle);
211
212extern void
213dma_release_declared_memory(struct device *dev);
214
215extern void *
216dma_mark_declared_memory_occupied(struct device *dev,
217 dma_addr_t device_addr, size_t size);
218 105
219#endif /* __ASM_SH_DMA_MAPPING_H */ 106#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h
new file mode 100644
index 000000000000..51cd78feacff
--- /dev/null
+++ b/arch/sh/include/asm/dma-register.h
@@ -0,0 +1,51 @@
1/*
2 * Common header for the legacy SH DMA driver and the new dmaengine driver
3 *
4 * extracted from arch/sh/include/asm/dma-sh.h:
5 *
6 * Copyright (C) 2000 Takashi YOSHII
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#ifndef DMA_REGISTER_H
14#define DMA_REGISTER_H
15
16/* DMA register */
17#define SAR 0x00
18#define DAR 0x04
19#define TCR 0x08
20#define CHCR 0x0C
21#define DMAOR 0x40
22
23/* DMAOR definitions */
24#define DMAOR_AE 0x00000004
25#define DMAOR_NMIF 0x00000002
26#define DMAOR_DME 0x00000001
27
28/* Definitions for the SuperH DMAC */
29#define REQ_L 0x00000000
30#define REQ_E 0x00080000
31#define RACK_H 0x00000000
32#define RACK_L 0x00040000
33#define ACK_R 0x00000000
34#define ACK_W 0x00020000
35#define ACK_H 0x00000000
36#define ACK_L 0x00010000
37#define DM_INC 0x00004000
38#define DM_DEC 0x00008000
39#define DM_FIX 0x0000c000
40#define SM_INC 0x00001000
41#define SM_DEC 0x00002000
42#define SM_FIX 0x00003000
43#define RS_IN 0x00000200
44#define RS_OUT 0x00000300
45#define TS_BLK 0x00000040
46#define TM_BUR 0x00000020
47#define CHCR_DE 0x00000001
48#define CHCR_TE 0x00000002
49#define CHCR_IE 0x00000004
50
51#endif
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h
index 78eed3e0bdf5..f3acb8e34c6b 100644
--- a/arch/sh/include/asm/dma-sh.h
+++ b/arch/sh/include/asm/dma-sh.h
@@ -11,7 +11,8 @@
11#ifndef __DMA_SH_H 11#ifndef __DMA_SH_H
12#define __DMA_SH_H 12#define __DMA_SH_H
13 13
14#include <asm/dma.h> 14#include <asm/dma-register.h>
15#include <cpu/dma-register.h>
15#include <cpu/dma.h> 16#include <cpu/dma.h>
16 17
17/* DMAOR contorl: The DMAOR access size is different by CPU.*/ 18/* DMAOR contorl: The DMAOR access size is different by CPU.*/
@@ -20,14 +21,14 @@
20 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 21 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
21 defined(CONFIG_CPU_SUBTYPE_SH7785) 22 defined(CONFIG_CPU_SUBTYPE_SH7785)
22#define dmaor_read_reg(n) \ 23#define dmaor_read_reg(n) \
23 (n ? ctrl_inw(SH_DMAC_BASE1 + DMAOR) \ 24 (n ? __raw_readw(SH_DMAC_BASE1 + DMAOR) \
24 : ctrl_inw(SH_DMAC_BASE0 + DMAOR)) 25 : __raw_readw(SH_DMAC_BASE0 + DMAOR))
25#define dmaor_write_reg(n, data) \ 26#define dmaor_write_reg(n, data) \
26 (n ? ctrl_outw(data, SH_DMAC_BASE1 + DMAOR) \ 27 (n ? __raw_writew(data, SH_DMAC_BASE1 + DMAOR) \
27 : ctrl_outw(data, SH_DMAC_BASE0 + DMAOR)) 28 : __raw_writew(data, SH_DMAC_BASE0 + DMAOR))
28#else /* Other CPU */ 29#else /* Other CPU */
29#define dmaor_read_reg(n) ctrl_inw(SH_DMAC_BASE0 + DMAOR) 30#define dmaor_read_reg(n) __raw_readw(SH_DMAC_BASE0 + DMAOR)
30#define dmaor_write_reg(n, data) ctrl_outw(data, SH_DMAC_BASE0 + DMAOR) 31#define dmaor_write_reg(n, data) __raw_writew(data, SH_DMAC_BASE0 + DMAOR)
31#endif 32#endif
32 33
33static int dmte_irq_map[] __maybe_unused = { 34static int dmte_irq_map[] __maybe_unused = {
@@ -53,37 +54,11 @@ static int dmte_irq_map[] __maybe_unused = {
53#endif 54#endif
54}; 55};
55 56
56/* Definitions for the SuperH DMAC */
57#define REQ_L 0x00000000
58#define REQ_E 0x00080000
59#define RACK_H 0x00000000
60#define RACK_L 0x00040000
61#define ACK_R 0x00000000
62#define ACK_W 0x00020000
63#define ACK_H 0x00000000
64#define ACK_L 0x00010000
65#define DM_INC 0x00004000
66#define DM_DEC 0x00008000
67#define SM_INC 0x00001000
68#define SM_DEC 0x00002000
69#define RS_IN 0x00000200
70#define RS_OUT 0x00000300
71#define TS_BLK 0x00000040
72#define TM_BUR 0x00000020
73#define CHCR_DE 0x00000001
74#define CHCR_TE 0x00000002
75#define CHCR_IE 0x00000004
76
77/* DMAOR definitions */
78#define DMAOR_AE 0x00000004
79#define DMAOR_NMIF 0x00000002
80#define DMAOR_DME 0x00000001
81
82/* 57/*
83 * Define the default configuration for dual address memory-memory transfer. 58 * Define the default configuration for dual address memory-memory transfer.
84 * The 0x400 value represents auto-request, external->external. 59 * The 0x400 value represents auto-request, external->external.
85 */ 60 */
86#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32) 61#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT))
87 62
88/* DMA base address */ 63/* DMA base address */
89static u32 dma_base_addr[] __maybe_unused = { 64static u32 dma_base_addr[] __maybe_unused = {
@@ -109,24 +84,4 @@ static u32 dma_base_addr[] __maybe_unused = {
109#endif 84#endif
110}; 85};
111 86
112/* DMA register */
113#define SAR 0x00
114#define DAR 0x04
115#define TCR 0x08
116#define CHCR 0x0C
117#define DMAOR 0x40
118
119/*
120 * for dma engine
121 *
122 * SuperH DMA mode
123 */
124#define SHDMA_MIX_IRQ (1 << 1)
125#define SHDMA_DMAOR1 (1 << 2)
126#define SHDMA_DMAE1 (1 << 3)
127
128struct sh_dmae_pdata {
129 unsigned int mode;
130};
131
132#endif /* __DMA_SH_H */ 87#endif /* __DMA_SH_H */
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index 04ad0e1e637e..07373a074090 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -19,9 +19,11 @@
19#include <asm-generic/dma.h> 19#include <asm-generic/dma.h>
20 20
21#ifdef CONFIG_NR_DMA_CHANNELS 21#ifdef CONFIG_NR_DMA_CHANNELS
22# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) 22# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS)
23#elif defined(CONFIG_NR_ONCHIP_DMA_CHANNELS)
24# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS)
23#else 25#else
24# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) 26# define MAX_DMA_CHANNELS 0
25#endif 27#endif
26 28
27/* 29/*
diff --git a/arch/sh/include/asm/dmaengine.h b/arch/sh/include/asm/dmaengine.h
new file mode 100644
index 000000000000..bf2f30cf0a27
--- /dev/null
+++ b/arch/sh/include/asm/dmaengine.h
@@ -0,0 +1,93 @@
1/*
2 * Header for the new SH dmaengine driver
3 *
4 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef ASM_DMAENGINE_H
11#define ASM_DMAENGINE_H
12
13#include <linux/dmaengine.h>
14#include <linux/list.h>
15
16#include <asm/dma-register.h>
17
18#define SH_DMAC_MAX_CHANNELS 6
19
20enum sh_dmae_slave_chan_id {
21 SHDMA_SLAVE_SCIF0_TX,
22 SHDMA_SLAVE_SCIF0_RX,
23 SHDMA_SLAVE_SCIF1_TX,
24 SHDMA_SLAVE_SCIF1_RX,
25 SHDMA_SLAVE_SCIF2_TX,
26 SHDMA_SLAVE_SCIF2_RX,
27 SHDMA_SLAVE_SCIF3_TX,
28 SHDMA_SLAVE_SCIF3_RX,
29 SHDMA_SLAVE_SCIF4_TX,
30 SHDMA_SLAVE_SCIF4_RX,
31 SHDMA_SLAVE_SCIF5_TX,
32 SHDMA_SLAVE_SCIF5_RX,
33 SHDMA_SLAVE_SIUA_TX,
34 SHDMA_SLAVE_SIUA_RX,
35 SHDMA_SLAVE_SIUB_TX,
36 SHDMA_SLAVE_SIUB_RX,
37 SHDMA_SLAVE_NUMBER, /* Must stay last */
38};
39
40struct sh_dmae_slave_config {
41 enum sh_dmae_slave_chan_id slave_id;
42 dma_addr_t addr;
43 u32 chcr;
44 char mid_rid;
45};
46
47struct sh_dmae_channel {
48 unsigned int offset;
49 unsigned int dmars;
50 unsigned int dmars_bit;
51};
52
53struct sh_dmae_pdata {
54 struct sh_dmae_slave_config *slave;
55 int slave_num;
56 struct sh_dmae_channel *channel;
57 int channel_num;
58 unsigned int ts_low_shift;
59 unsigned int ts_low_mask;
60 unsigned int ts_high_shift;
61 unsigned int ts_high_mask;
62 unsigned int *ts_shift;
63 int ts_shift_num;
64 u16 dmaor_init;
65};
66
67struct device;
68
69/* Used by slave DMA clients to request DMA to/from a specific peripheral */
70struct sh_dmae_slave {
71 enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */
72 struct device *dma_dev; /* Set by the platform */
73 struct sh_dmae_slave_config *config; /* Set by the driver */
74};
75
76struct sh_dmae_regs {
77 u32 sar; /* SAR / source address */
78 u32 dar; /* DAR / destination address */
79 u32 tcr; /* TCR / transfer count */
80};
81
82struct sh_desc {
83 struct sh_dmae_regs hw;
84 struct list_head node;
85 struct dma_async_tx_descriptor async_tx;
86 enum dma_data_direction direction;
87 dma_cookie_t cookie;
88 size_t partial;
89 int chunks;
90 int mark;
91};
92
93#endif
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h
index ced6795891a6..d62abd1d0c05 100644
--- a/arch/sh/include/asm/dwarf.h
+++ b/arch/sh/include/asm/dwarf.h
@@ -194,6 +194,12 @@
194#define DWARF_ARCH_RA_REG 17 194#define DWARF_ARCH_RA_REG 17
195 195
196#ifndef __ASSEMBLY__ 196#ifndef __ASSEMBLY__
197
198#include <linux/compiler.h>
199#include <linux/bug.h>
200#include <linux/list.h>
201#include <linux/module.h>
202
197/* 203/*
198 * Read either the frame pointer (r14) or the stack pointer (r15). 204 * Read either the frame pointer (r14) or the stack pointer (r15).
199 * NOTE: this MUST be inlined. 205 * NOTE: this MUST be inlined.
@@ -237,10 +243,13 @@ struct dwarf_cie {
237 243
238 unsigned long cie_pointer; 244 unsigned long cie_pointer;
239 245
240 struct list_head link;
241
242 unsigned long flags; 246 unsigned long flags;
243#define DWARF_CIE_Z_AUGMENTATION (1 << 0) 247#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
248
249 /* linked-list entry if this CIE is from a module */
250 struct list_head link;
251
252 struct rb_node node;
244}; 253};
245 254
246/** 255/**
@@ -254,7 +263,11 @@ struct dwarf_fde {
254 unsigned long address_range; 263 unsigned long address_range;
255 unsigned char *instructions; 264 unsigned char *instructions;
256 unsigned char *end; 265 unsigned char *end;
266
267 /* linked-list entry if this FDE is from a module */
257 struct list_head link; 268 struct list_head link;
269
270 struct rb_node node;
258}; 271};
259 272
260/** 273/**
@@ -364,6 +377,12 @@ static inline unsigned int DW_CFA_operand(unsigned long insn)
364 377
365extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, 378extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
366 struct dwarf_frame *); 379 struct dwarf_frame *);
380extern void dwarf_free_frame(struct dwarf_frame *);
381
382extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *,
383 struct module *);
384extern void module_dwarf_cleanup(struct module *);
385
367#endif /* !__ASSEMBLY__ */ 386#endif /* !__ASSEMBLY__ */
368 387
369#define CFI_STARTPROC .cfi_startproc 388#define CFI_STARTPROC .cfi_startproc
@@ -391,6 +410,10 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
391static inline void dwarf_unwinder_init(void) 410static inline void dwarf_unwinder_init(void)
392{ 411{
393} 412}
413
414#define module_dwarf_finalize(hdr, sechdrs, me) (0)
415#define module_dwarf_cleanup(mod) do { } while (0)
416
394#endif 417#endif
395 418
396#endif /* CONFIG_DWARF_UNWINDER */ 419#endif /* CONFIG_DWARF_UNWINDER */
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ccb1d93bb043..ce830faeebbf 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
114 */ 114 */
115#define CORE_DUMP_USE_REGSET 115#define CORE_DUMP_USE_REGSET
116 116
117#define USE_ELF_CORE_DUMP
118#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC 117#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
119#define ELF_EXEC_PAGESIZE PAGE_SIZE 118#define ELF_EXEC_PAGESIZE PAGE_SIZE
120 119
@@ -212,7 +211,9 @@ extern void __kernel_vsyscall;
212 211
213#define VSYSCALL_AUX_ENT \ 212#define VSYSCALL_AUX_ENT \
214 if (vdso_enabled) \ 213 if (vdso_enabled) \
215 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); 214 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
215 else \
216 NEW_AUX_ENT(AT_IGNORE, 0);
216#else 217#else
217#define VSYSCALL_AUX_ENT 218#define VSYSCALL_AUX_ENT
218#endif /* CONFIG_VSYSCALL */ 219#endif /* CONFIG_VSYSCALL */
@@ -220,7 +221,7 @@ extern void __kernel_vsyscall;
220#ifdef CONFIG_SH_FPU 221#ifdef CONFIG_SH_FPU
221#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT) 222#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
222#else 223#else
223#define FPU_AUX_ENT 224#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
224#endif 225#endif
225 226
226extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; 227extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index 721fcc4d5e98..6e7cea453895 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -14,9 +14,9 @@
14#define _ASM_FIXMAP_H 14#define _ASM_FIXMAP_H
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/threads.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#ifdef CONFIG_HIGHMEM 19#ifdef CONFIG_HIGHMEM
19#include <linux/threads.h>
20#include <asm/kmap_types.h> 20#include <asm/kmap_types.h>
21#endif 21#endif
22 22
@@ -46,19 +46,38 @@
46 * fix-mapped? 46 * fix-mapped?
47 */ 47 */
48enum fixed_addresses { 48enum fixed_addresses {
49#define FIX_N_COLOURS 16 49 /*
50 * The FIX_CMAP entries are used by kmap_coherent() to get virtual
51 * addresses which are of a known color, and so their values are
52 * important. __fix_to_virt(FIX_CMAP_END - n) must give an address
53 * which is the same color as a page (n<<PAGE_SHIFT).
54 */
55#define FIX_N_COLOURS 8
50 FIX_CMAP_BEGIN, 56 FIX_CMAP_BEGIN,
51 FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, 57 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
52 FIX_UNCACHED, 58
53#ifdef CONFIG_HIGHMEM 59#ifdef CONFIG_HIGHMEM
54 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 60 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
55 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 61 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
56#endif 62#endif
63
64#ifdef CONFIG_IOREMAP_FIXED
65 /*
66 * FIX_IOREMAP entries are useful for mapping physical address
67 * space before ioremap() is useable, e.g. really early in boot
68 * before kmalloc() is working.
69 */
70#define FIX_N_IOREMAPS 32
71 FIX_IOREMAP_BEGIN,
72 FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS,
73#endif
74
57 __end_of_fixed_addresses 75 __end_of_fixed_addresses
58}; 76};
59 77
60extern void __set_fixmap(enum fixed_addresses idx, 78extern void __set_fixmap(enum fixed_addresses idx,
61 unsigned long phys, pgprot_t flags); 79 unsigned long phys, pgprot_t flags);
80extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
62 81
63#define set_fixmap(idx, phys) \ 82#define set_fixmap(idx, phys) \
64 __set_fixmap(idx, phys, PAGE_KERNEL) 83 __set_fixmap(idx, phys, PAGE_KERNEL)
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 1d3aee04b5cc..06c4281aab65 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -2,8 +2,8 @@
2#define __ASM_SH_FPU_H 2#define __ASM_SH_FPU_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/preempt.h> 5
6#include <asm/ptrace.h> 6struct task_struct;
7 7
8#ifdef CONFIG_SH_FPU 8#ifdef CONFIG_SH_FPU
9static inline void release_fpu(struct pt_regs *regs) 9static inline void release_fpu(struct pt_regs *regs)
@@ -16,59 +16,56 @@ static inline void grab_fpu(struct pt_regs *regs)
16 regs->sr &= ~SR_FD; 16 regs->sr &= ~SR_FD;
17} 17}
18 18
19struct task_struct; 19extern void save_fpu(struct task_struct *__tsk);
20 20extern void restore_fpu(struct task_struct *__tsk);
21extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); 21extern void fpu_state_restore(struct pt_regs *regs);
22extern void __fpu_state_restore(void);
22#else 23#else
23 24#define save_fpu(tsk) do { } while (0)
24#define release_fpu(regs) do { } while (0) 25#define restore_fpu(tsk) do { } while (0)
25#define grab_fpu(regs) do { } while (0) 26#define release_fpu(regs) do { } while (0)
26 27#define grab_fpu(regs) do { } while (0)
27static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs) 28#define fpu_state_restore(regs) do { } while (0)
28{ 29#define __fpu_state_restore(regs) do { } while (0)
29 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
30}
31#endif 30#endif
32 31
33struct user_regset; 32struct user_regset;
34 33
35extern int do_fpu_inst(unsigned short, struct pt_regs *); 34extern int do_fpu_inst(unsigned short, struct pt_regs *);
35extern int init_fpu(struct task_struct *);
36 36
37extern int fpregs_get(struct task_struct *target, 37extern int fpregs_get(struct task_struct *target,
38 const struct user_regset *regset, 38 const struct user_regset *regset,
39 unsigned int pos, unsigned int count, 39 unsigned int pos, unsigned int count,
40 void *kbuf, void __user *ubuf); 40 void *kbuf, void __user *ubuf);
41 41
42static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
43{
44 if (task_thread_info(tsk)->status & TS_USEDFPU) {
45 task_thread_info(tsk)->status &= ~TS_USEDFPU;
46 save_fpu(tsk);
47 release_fpu(regs);
48 } else
49 tsk->fpu_counter = 0;
50}
51
42static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) 52static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
43{ 53{
44 preempt_disable(); 54 preempt_disable();
45 if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) 55 __unlazy_fpu(tsk, regs);
46 save_fpu(tsk, regs);
47 preempt_enable(); 56 preempt_enable();
48} 57}
49 58
50static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs) 59static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
51{ 60{
52 preempt_disable(); 61 preempt_disable();
53 if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { 62 if (task_thread_info(tsk)->status & TS_USEDFPU) {
54 clear_tsk_thread_flag(tsk, TIF_USEDFPU); 63 task_thread_info(tsk)->status &= ~TS_USEDFPU;
55 release_fpu(regs); 64 release_fpu(regs);
56 } 65 }
57 preempt_enable(); 66 preempt_enable();
58} 67}
59 68
60static inline int init_fpu(struct task_struct *tsk)
61{
62 if (tsk_used_math(tsk)) {
63 if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
64 unlazy_fpu(tsk, task_pt_regs(tsk));
65 return 0;
66 }
67
68 set_stopped_child_used_math(tsk);
69 return 0;
70}
71
72#endif /* __ASSEMBLY__ */ 69#endif /* __ASSEMBLY__ */
73 70
74#endif /* __ASM_SH_FPU_H */ 71#endif /* __ASM_SH_FPU_H */
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
index 12f3a31f20af..13e9966464c2 100644
--- a/arch/sh/include/asm/ftrace.h
+++ b/arch/sh/include/asm/ftrace.h
@@ -35,4 +35,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
35#endif /* __ASSEMBLY__ */ 35#endif /* __ASSEMBLY__ */
36#endif /* CONFIG_FUNCTION_TRACER */ 36#endif /* CONFIG_FUNCTION_TRACER */
37 37
38#ifndef __ASSEMBLY__
39
40/* arch/sh/kernel/return_address.c */
41extern void *return_address(unsigned int);
42
43#define HAVE_ARCH_CALLER_ADDR
44
45#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
46#define CALLER_ADDR1 ((unsigned long)return_address(1))
47#define CALLER_ADDR2 ((unsigned long)return_address(2))
48#define CALLER_ADDR3 ((unsigned long)return_address(3))
49#define CALLER_ADDR4 ((unsigned long)return_address(4))
50#define CALLER_ADDR5 ((unsigned long)return_address(5))
51#define CALLER_ADDR6 ((unsigned long)return_address(6))
52
53#endif /* __ASSEMBLY__ */
54
38#endif /* __ASM_SH_FTRACE_H */ 55#endif /* __ASM_SH_FTRACE_H */
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h
index 61f93da2c62e..f8d9a731e903 100644
--- a/arch/sh/include/asm/gpio.h
+++ b/arch/sh/include/asm/gpio.h
@@ -20,7 +20,7 @@
20#endif 20#endif
21 21
22#define ARCH_NR_GPIOS 512 22#define ARCH_NR_GPIOS 512
23#include <asm-generic/gpio.h> 23#include <linux/sh_pfc.h>
24 24
25#ifdef CONFIG_GPIOLIB 25#ifdef CONFIG_GPIOLIB
26 26
@@ -53,84 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
53 53
54#endif /* CONFIG_GPIOLIB */ 54#endif /* CONFIG_GPIOLIB */
55 55
56typedef unsigned short pinmux_enum_t;
57typedef unsigned short pinmux_flag_t;
58
59#define PINMUX_TYPE_NONE 0
60#define PINMUX_TYPE_FUNCTION 1
61#define PINMUX_TYPE_GPIO 2
62#define PINMUX_TYPE_OUTPUT 3
63#define PINMUX_TYPE_INPUT 4
64#define PINMUX_TYPE_INPUT_PULLUP 5
65#define PINMUX_TYPE_INPUT_PULLDOWN 6
66
67#define PINMUX_FLAG_TYPE (0x7)
68#define PINMUX_FLAG_WANT_PULLUP (1 << 3)
69#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4)
70
71#define PINMUX_FLAG_DBIT_SHIFT 5
72#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
73#define PINMUX_FLAG_DREG_SHIFT 10
74#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
75
76struct pinmux_gpio {
77 pinmux_enum_t enum_id;
78 pinmux_flag_t flags;
79};
80
81#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark }
82#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
83
84struct pinmux_cfg_reg {
85 unsigned long reg, reg_width, field_width;
86 unsigned long *cnt;
87 pinmux_enum_t *enum_ids;
88};
89
90#define PINMUX_CFG_REG(name, r, r_width, f_width) \
91 .reg = r, .reg_width = r_width, .field_width = f_width, \
92 .cnt = (unsigned long [r_width / f_width]) {}, \
93 .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \
94
95struct pinmux_data_reg {
96 unsigned long reg, reg_width, reg_shadow;
97 pinmux_enum_t *enum_ids;
98};
99
100#define PINMUX_DATA_REG(name, r, r_width) \
101 .reg = r, .reg_width = r_width, \
102 .enum_ids = (pinmux_enum_t [r_width]) \
103
104struct pinmux_range {
105 pinmux_enum_t begin;
106 pinmux_enum_t end;
107 pinmux_enum_t force;
108};
109
110struct pinmux_info {
111 char *name;
112 pinmux_enum_t reserved_id;
113 struct pinmux_range data;
114 struct pinmux_range input;
115 struct pinmux_range input_pd;
116 struct pinmux_range input_pu;
117 struct pinmux_range output;
118 struct pinmux_range mark;
119 struct pinmux_range function;
120
121 unsigned first_gpio, last_gpio;
122
123 struct pinmux_gpio *gpios;
124 struct pinmux_cfg_reg *cfg_regs;
125 struct pinmux_data_reg *data_regs;
126
127 pinmux_enum_t *gpio_data;
128 unsigned int gpio_data_size;
129
130 unsigned long *gpio_in_use;
131 struct gpio_chip chip;
132};
133
134int register_pinmux(struct pinmux_info *pip);
135
136#endif /* __ASM_SH_GPIO_H */ 56#endif /* __ASM_SH_GPIO_H */
diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h
index a5be4afa790b..48b191313a99 100644
--- a/arch/sh/include/asm/hardirq.h
+++ b/arch/sh/include/asm/hardirq.h
@@ -1,9 +1,16 @@
1#ifndef __ASM_SH_HARDIRQ_H 1#ifndef __ASM_SH_HARDIRQ_H
2#define __ASM_SH_HARDIRQ_H 2#define __ASM_SH_HARDIRQ_H
3 3
4extern void ack_bad_irq(unsigned int irq); 4#include <linux/threads.h>
5#define ack_bad_irq ack_bad_irq 5#include <linux/irq.h>
6
7typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10} ____cacheline_aligned irq_cpustat_t;
6 11
7#include <asm-generic/hardirq.h> 12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13
14extern void ack_bad_irq(unsigned int irq);
8 15
9#endif /* __ASM_SH_HARDIRQ_H */ 16#endif /* __ASM_SH_HARDIRQ_H */
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000000..965dd780d51b
--- /dev/null
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -0,0 +1,67 @@
1#ifndef __ASM_SH_HW_BREAKPOINT_H
2#define __ASM_SH_HW_BREAKPOINT_H
3
4#ifdef __KERNEL__
5#define __ARCH_HW_BREAKPOINT_H
6
7#include <linux/kdebug.h>
8#include <linux/types.h>
9
10struct arch_hw_breakpoint {
11 char *name; /* Contains name of the symbol to set bkpt */
12 unsigned long address;
13 u16 len;
14 u16 type;
15};
16
17enum {
18 SH_BREAKPOINT_READ = (1 << 1),
19 SH_BREAKPOINT_WRITE = (1 << 2),
20 SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
21
22 SH_BREAKPOINT_LEN_1 = (1 << 12),
23 SH_BREAKPOINT_LEN_2 = (1 << 13),
24 SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
25 SH_BREAKPOINT_LEN_8 = (1 << 14),
26};
27
28struct sh_ubc {
29 const char *name;
30 unsigned int num_events;
31 unsigned int trap_nr;
32 void (*enable)(struct arch_hw_breakpoint *, int);
33 void (*disable)(struct arch_hw_breakpoint *, int);
34 void (*enable_all)(unsigned long);
35 void (*disable_all)(void);
36 unsigned long (*active_mask)(void);
37 unsigned long (*triggered_mask)(void);
38 void (*clear_triggered_mask)(unsigned long);
39 struct clk *clk; /* optional interface clock / MSTP bit */
40};
41
42struct perf_event;
43struct task_struct;
44struct pmu;
45
46/* Maximum number of UBC channels */
47#define HBP_NUM 2
48
49/* arch/sh/kernel/hw_breakpoint.c */
50extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len);
51extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
52 struct task_struct *tsk);
53extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
54 unsigned long val, void *data);
55
56int arch_install_hw_breakpoint(struct perf_event *bp);
57void arch_uninstall_hw_breakpoint(struct perf_event *bp);
58void hw_breakpoint_pmu_read(struct perf_event *bp);
59void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
60
61extern void arch_fill_perf_breakpoint(struct perf_event *bp);
62extern int register_sh_ubc(struct sh_ubc *);
63
64extern struct pmu perf_ops_bp;
65
66#endif /* __KERNEL__ */
67#endif /* __ASM_SH_HW_BREAKPOINT_H */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 5be45ea4dfec..f689554e17c1 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -22,6 +22,7 @@
22 * for old compat code for I/O offseting to SuperIOs, all of which are 22 * for old compat code for I/O offseting to SuperIOs, all of which are
23 * better handled through the machvec ioport mapping routines these days. 23 * better handled through the machvec ioport mapping routines these days.
24 */ 24 */
25#include <linux/errno.h>
25#include <asm/cache.h> 26#include <asm/cache.h>
26#include <asm/system.h> 27#include <asm/system.h>
27#include <asm/addrspace.h> 28#include <asm/addrspace.h>
@@ -79,28 +80,81 @@
79#define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) 80#define writel(v,a) ({ __raw_writel((v),(a)); mb(); })
80#define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); }) 81#define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); })
81 82
82/* SuperH on-chip I/O functions */ 83/*
83#define ctrl_inb __raw_readb 84 * Legacy SuperH on-chip I/O functions
84#define ctrl_inw __raw_readw 85 *
85#define ctrl_inl __raw_readl 86 * These are all deprecated, all new (and especially cross-platform) code
86#define ctrl_inq __raw_readq 87 * should be using the __raw_xxx() routines directly.
88 */
89static inline u8 __deprecated ctrl_inb(unsigned long addr)
90{
91 return __raw_readb(addr);
92}
93
94static inline u16 __deprecated ctrl_inw(unsigned long addr)
95{
96 return __raw_readw(addr);
97}
98
99static inline u32 __deprecated ctrl_inl(unsigned long addr)
100{
101 return __raw_readl(addr);
102}
103
104static inline u64 __deprecated ctrl_inq(unsigned long addr)
105{
106 return __raw_readq(addr);
107}
108
109static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
110{
111 __raw_writeb(v, addr);
112}
87 113
88#define ctrl_outb __raw_writeb 114static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
89#define ctrl_outw __raw_writew 115{
90#define ctrl_outl __raw_writel 116 __raw_writew(v, addr);
91#define ctrl_outq __raw_writeq 117}
118
119static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
120{
121 __raw_writel(v, addr);
122}
123
124static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
125{
126 __raw_writeq(v, addr);
127}
128
129extern unsigned long generic_io_base;
92 130
93static inline void ctrl_delay(void) 131static inline void ctrl_delay(void)
94{ 132{
95#ifdef CONFIG_CPU_SH4 133 __raw_readw(generic_io_base);
96 __raw_readw(CCN_PVR);
97#elif defined(P2SEG)
98 __raw_readw(P2SEG);
99#else
100#error "Need a dummy address for delay"
101#endif
102} 134}
103 135
136#define __BUILD_UNCACHED_IO(bwlq, type) \
137static inline type read##bwlq##_uncached(unsigned long addr) \
138{ \
139 type ret; \
140 jump_to_uncached(); \
141 ret = __raw_read##bwlq(addr); \
142 back_to_cached(); \
143 return ret; \
144} \
145 \
146static inline void write##bwlq##_uncached(type v, unsigned long addr) \
147{ \
148 jump_to_uncached(); \
149 __raw_write##bwlq(v, addr); \
150 back_to_cached(); \
151}
152
153__BUILD_UNCACHED_IO(b, u8)
154__BUILD_UNCACHED_IO(w, u16)
155__BUILD_UNCACHED_IO(l, u32)
156__BUILD_UNCACHED_IO(q, u64)
157
104#define __BUILD_MEMORY_STRING(bwlq, type) \ 158#define __BUILD_MEMORY_STRING(bwlq, type) \
105 \ 159 \
106static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ 160static inline void __raw_writes##bwlq(volatile void __iomem *mem, \
@@ -186,8 +240,6 @@ __BUILD_MEMORY_STRING(q, u64)
186 240
187#define IO_SPACE_LIMIT 0xffffffff 241#define IO_SPACE_LIMIT 0xffffffff
188 242
189extern unsigned long generic_io_base;
190
191/* 243/*
192 * This function provides a method for the generic case where a 244 * This function provides a method for the generic case where a
193 * board-specific ioport_map simply needs to return the port + some 245 * board-specific ioport_map simply needs to return the port + some
@@ -239,23 +291,22 @@ unsigned long long poke_real_address_q(unsigned long long addr,
239 * doesn't exist, so everything must go through page tables. 291 * doesn't exist, so everything must go through page tables.
240 */ 292 */
241#ifdef CONFIG_MMU 293#ifdef CONFIG_MMU
242void __iomem *__ioremap(unsigned long offset, unsigned long size, 294void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
243 unsigned long flags); 295 pgprot_t prot, void *caller);
244void __iounmap(void __iomem *addr); 296void __iounmap(void __iomem *addr);
245 297
246static inline void __iomem * 298static inline void __iomem *
247__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 299__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
248{ 300{
249#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) 301 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
250 unsigned long last_addr = offset + size - 1; 302}
251#endif
252 void __iomem *ret;
253 303
254 ret = __ioremap_trapped(offset, size); 304static inline void __iomem *
255 if (ret) 305__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
256 return ret; 306{
307#ifdef CONFIG_29BIT
308 phys_addr_t last_addr = offset + size - 1;
257 309
258#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
259 /* 310 /*
260 * For P1 and P2 space this is trivial, as everything is already 311 * For P1 and P2 space this is trivial, as everything is already
261 * mapped. Uncached access for P1 addresses are done through P2. 312 * mapped. Uncached access for P1 addresses are done through P2.
@@ -263,7 +314,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
263 * mapping must be done by the PMB or by using page tables. 314 * mapping must be done by the PMB or by using page tables.
264 */ 315 */
265 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { 316 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
266 if (unlikely(flags & _PAGE_CACHABLE)) 317 if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
267 return (void __iomem *)P1SEGADDR(offset); 318 return (void __iomem *)P1SEGADDR(offset);
268 319
269 return (void __iomem *)P2SEGADDR(offset); 320 return (void __iomem *)P2SEGADDR(offset);
@@ -274,25 +325,67 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
274 return (void __iomem *)P4SEGADDR(offset); 325 return (void __iomem *)P4SEGADDR(offset);
275#endif 326#endif
276 327
277 return __ioremap(offset, size, flags); 328 return NULL;
329}
330
331static inline void __iomem *
332__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
333{
334 void __iomem *ret;
335
336 ret = __ioremap_trapped(offset, size);
337 if (ret)
338 return ret;
339
340 ret = __ioremap_29bit(offset, size, prot);
341 if (ret)
342 return ret;
343
344 return __ioremap(offset, size, prot);
278} 345}
279#else 346#else
280#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) 347#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
348#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
281#define __iounmap(addr) do { } while (0) 349#define __iounmap(addr) do { } while (0)
282#endif /* CONFIG_MMU */ 350#endif /* CONFIG_MMU */
283 351
284#define ioremap(offset, size) \ 352static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
285 __ioremap_mode((offset), (size), 0) 353{
286#define ioremap_nocache(offset, size) \ 354 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
287 __ioremap_mode((offset), (size), 0) 355}
288#define ioremap_cache(offset, size) \ 356
289 __ioremap_mode((offset), (size), _PAGE_CACHABLE) 357static inline void __iomem *
290#define p3_ioremap(offset, size, flags) \ 358ioremap_cache(phys_addr_t offset, unsigned long size)
291 __ioremap((offset), (size), (flags)) 359{
292#define ioremap_prot(offset, size, flags) \ 360 return __ioremap_mode(offset, size, PAGE_KERNEL);
293 __ioremap_mode((offset), (size), (flags)) 361}
294#define iounmap(addr) \ 362
295 __iounmap((addr)) 363#ifdef CONFIG_HAVE_IOREMAP_PROT
364static inline void __iomem *
365ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
366{
367 return __ioremap_mode(offset, size, __pgprot(flags));
368}
369#endif
370
371#ifdef CONFIG_IOREMAP_FIXED
372extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
373extern int iounmap_fixed(void __iomem *);
374extern void ioremap_fixed_init(void);
375#else
376static inline void __iomem *
377ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
378{
379 BUG();
380 return NULL;
381}
382
383static inline void ioremap_fixed_init(void) { }
384static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
385#endif
386
387#define ioremap_nocache ioremap
388#define iounmap __iounmap
296 389
297#define maybebadio(port) \ 390#define maybebadio(port) \
298 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ 391 printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h
index 46e71da5be6b..a741153b41c2 100644
--- a/arch/sh/include/asm/irqflags.h
+++ b/arch/sh/include/asm/irqflags.h
@@ -1,34 +1,9 @@
1#ifndef __ASM_SH_IRQFLAGS_H 1#ifndef __ASM_SH_IRQFLAGS_H
2#define __ASM_SH_IRQFLAGS_H 2#define __ASM_SH_IRQFLAGS_H
3 3
4#ifdef CONFIG_SUPERH32 4#define RAW_IRQ_DISABLED 0xf0
5#include "irqflags_32.h" 5#define RAW_IRQ_ENABLED 0x00
6#else
7#include "irqflags_64.h"
8#endif
9 6
10#define raw_local_save_flags(flags) \ 7#include <asm-generic/irqflags.h>
11 do { (flags) = __raw_local_save_flags(); } while (0)
12
13static inline int raw_irqs_disabled_flags(unsigned long flags)
14{
15 return (flags != 0);
16}
17
18static inline int raw_irqs_disabled(void)
19{
20 unsigned long flags = __raw_local_save_flags();
21
22 return raw_irqs_disabled_flags(flags);
23}
24
25#define raw_local_irq_save(flags) \
26 do { (flags) = __raw_local_irq_save(); } while (0)
27
28static inline void raw_local_irq_restore(unsigned long flags)
29{
30 if ((flags & 0xf0) != 0xf0)
31 raw_local_irq_enable();
32}
33 8
34#endif /* __ASM_SH_IRQFLAGS_H */ 9#endif /* __ASM_SH_IRQFLAGS_H */
diff --git a/arch/sh/include/asm/irqflags_32.h b/arch/sh/include/asm/irqflags_32.h
deleted file mode 100644
index 60218f541340..000000000000
--- a/arch/sh/include/asm/irqflags_32.h
+++ /dev/null
@@ -1,99 +0,0 @@
1#ifndef __ASM_SH_IRQFLAGS_32_H
2#define __ASM_SH_IRQFLAGS_32_H
3
4static inline void raw_local_irq_enable(void)
5{
6 unsigned long __dummy0, __dummy1;
7
8 __asm__ __volatile__ (
9 "stc sr, %0\n\t"
10 "and %1, %0\n\t"
11#ifdef CONFIG_CPU_HAS_SR_RB
12 "stc r6_bank, %1\n\t"
13 "or %1, %0\n\t"
14#endif
15 "ldc %0, sr\n\t"
16 : "=&r" (__dummy0), "=r" (__dummy1)
17 : "1" (~0x000000f0)
18 : "memory"
19 );
20}
21
22static inline void raw_local_irq_disable(void)
23{
24 unsigned long flags;
25
26 __asm__ __volatile__ (
27 "stc sr, %0\n\t"
28 "or #0xf0, %0\n\t"
29 "ldc %0, sr\n\t"
30 : "=&z" (flags)
31 : /* no inputs */
32 : "memory"
33 );
34}
35
36static inline void set_bl_bit(void)
37{
38 unsigned long __dummy0, __dummy1;
39
40 __asm__ __volatile__ (
41 "stc sr, %0\n\t"
42 "or %2, %0\n\t"
43 "and %3, %0\n\t"
44 "ldc %0, sr\n\t"
45 : "=&r" (__dummy0), "=r" (__dummy1)
46 : "r" (0x10000000), "r" (0xffffff0f)
47 : "memory"
48 );
49}
50
51static inline void clear_bl_bit(void)
52{
53 unsigned long __dummy0, __dummy1;
54
55 __asm__ __volatile__ (
56 "stc sr, %0\n\t"
57 "and %2, %0\n\t"
58 "ldc %0, sr\n\t"
59 : "=&r" (__dummy0), "=r" (__dummy1)
60 : "1" (~0x10000000)
61 : "memory"
62 );
63}
64
65static inline unsigned long __raw_local_save_flags(void)
66{
67 unsigned long flags;
68
69 __asm__ __volatile__ (
70 "stc sr, %0\n\t"
71 "and #0xf0, %0\n\t"
72 : "=&z" (flags)
73 : /* no inputs */
74 : "memory"
75 );
76
77 return flags;
78}
79
80static inline unsigned long __raw_local_irq_save(void)
81{
82 unsigned long flags, __dummy;
83
84 __asm__ __volatile__ (
85 "stc sr, %1\n\t"
86 "mov %1, %0\n\t"
87 "or #0xf0, %0\n\t"
88 "ldc %0, sr\n\t"
89 "mov %1, %0\n\t"
90 "and #0xf0, %0\n\t"
91 : "=&z" (flags), "=&r" (__dummy)
92 : /* no inputs */
93 : "memory"
94 );
95
96 return flags;
97}
98
99#endif /* __ASM_SH_IRQFLAGS_32_H */
diff --git a/arch/sh/include/asm/irqflags_64.h b/arch/sh/include/asm/irqflags_64.h
deleted file mode 100644
index 88f65222c1d4..000000000000
--- a/arch/sh/include/asm/irqflags_64.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef __ASM_SH_IRQFLAGS_64_H
2#define __ASM_SH_IRQFLAGS_64_H
3
4#include <cpu/registers.h>
5
6#define SR_MASK_LL 0x00000000000000f0LL
7#define SR_BL_LL 0x0000000010000000LL
8
9static inline void raw_local_irq_enable(void)
10{
11 unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL;
12
13 __asm__ __volatile__("getcon " __SR ", %0\n\t"
14 "and %0, %1, %0\n\t"
15 "putcon %0, " __SR "\n\t"
16 : "=&r" (__dummy0)
17 : "r" (__dummy1));
18}
19
20static inline void raw_local_irq_disable(void)
21{
22 unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
23
24 __asm__ __volatile__("getcon " __SR ", %0\n\t"
25 "or %0, %1, %0\n\t"
26 "putcon %0, " __SR "\n\t"
27 : "=&r" (__dummy0)
28 : "r" (__dummy1));
29}
30
31static inline void set_bl_bit(void)
32{
33 unsigned long long __dummy0, __dummy1 = SR_BL_LL;
34
35 __asm__ __volatile__("getcon " __SR ", %0\n\t"
36 "or %0, %1, %0\n\t"
37 "putcon %0, " __SR "\n\t"
38 : "=&r" (__dummy0)
39 : "r" (__dummy1));
40
41}
42
43static inline void clear_bl_bit(void)
44{
45 unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
46
47 __asm__ __volatile__("getcon " __SR ", %0\n\t"
48 "and %0, %1, %0\n\t"
49 "putcon %0, " __SR "\n\t"
50 : "=&r" (__dummy0)
51 : "r" (__dummy1));
52}
53
54static inline unsigned long __raw_local_save_flags(void)
55{
56 unsigned long long __dummy = SR_MASK_LL;
57 unsigned long flags;
58
59 __asm__ __volatile__ (
60 "getcon " __SR ", %0\n\t"
61 "and %0, %1, %0"
62 : "=&r" (flags)
63 : "r" (__dummy));
64
65 return flags;
66}
67
68static inline unsigned long __raw_local_irq_save(void)
69{
70 unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
71 unsigned long flags;
72
73 __asm__ __volatile__ (
74 "getcon " __SR ", %1\n\t"
75 "or %1, r63, %0\n\t"
76 "or %1, %2, %1\n\t"
77 "putcon %1, " __SR "\n\t"
78 "and %0, %2, %0"
79 : "=&r" (flags), "=&r" (__dummy0)
80 : "r" (__dummy1));
81
82 return flags;
83}
84
85#endif /* __ASM_SH_IRQFLAGS_64_H */
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h
index 985219f9759e..5f6d2e9ccb7c 100644
--- a/arch/sh/include/asm/kdebug.h
+++ b/arch/sh/include/asm/kdebug.h
@@ -6,6 +6,8 @@ enum die_val {
6 DIE_TRAP, 6 DIE_TRAP,
7 DIE_NMI, 7 DIE_NMI,
8 DIE_OOPS, 8 DIE_OOPS,
9 DIE_BREAKPOINT,
10 DIE_SSTEP,
9}; 11};
10 12
11#endif /* __ASM_SH_KDEBUG_H */ 13#endif /* __ASM_SH_KDEBUG_H */
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index 84dd37761f56..9c30955630ff 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -12,7 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/time.h> 14#include <linux/time.h>
15#include <asm/machtypes.h> 15#include <generated/machtypes.h>
16 16
17struct sh_machine_vector { 17struct sh_machine_vector {
18 void (*mv_setup)(char **cmdline_p); 18 void (*mv_setup)(char **cmdline_p);
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index f5963037c9d6..56e4418c19b9 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -7,12 +7,18 @@
7#define PMB_PASCR 0xff000070 7#define PMB_PASCR 0xff000070
8#define PMB_IRMCR 0xff000078 8#define PMB_IRMCR 0xff000078
9 9
10#define PASCR_SE 0x80000000
11
10#define PMB_ADDR 0xf6100000 12#define PMB_ADDR 0xf6100000
11#define PMB_DATA 0xf7100000 13#define PMB_DATA 0xf7100000
12#define PMB_ENTRY_MAX 16 14
15#define NR_PMB_ENTRIES 16
16
13#define PMB_E_MASK 0x0000000f 17#define PMB_E_MASK 0x0000000f
14#define PMB_E_SHIFT 8 18#define PMB_E_SHIFT 8
15 19
20#define PMB_PFN_MASK 0xff000000
21
16#define PMB_SZ_16M 0x00000000 22#define PMB_SZ_16M 0x00000000
17#define PMB_SZ_64M 0x00000010 23#define PMB_SZ_64M 0x00000010
18#define PMB_SZ_128M 0x00000080 24#define PMB_SZ_128M 0x00000080
@@ -21,11 +27,15 @@
21#define PMB_C 0x00000008 27#define PMB_C 0x00000008
22#define PMB_WT 0x00000001 28#define PMB_WT 0x00000001
23#define PMB_UB 0x00000200 29#define PMB_UB 0x00000200
30#define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB)
24#define PMB_V 0x00000100 31#define PMB_V 0x00000100
25 32
26#define PMB_NO_ENTRY (-1) 33#define PMB_NO_ENTRY (-1)
27 34
28#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36#include <linux/errno.h>
37#include <linux/threads.h>
38#include <asm/page.h>
29 39
30/* Default "unsigned long" context */ 40/* Default "unsigned long" context */
31typedef unsigned long mm_context_id_t[NR_CPUS]; 41typedef unsigned long mm_context_id_t[NR_CPUS];
@@ -43,36 +53,54 @@ typedef struct {
43#endif 53#endif
44} mm_context_t; 54} mm_context_t;
45 55
46struct pmb_entry; 56#ifdef CONFIG_PMB
57/* arch/sh/mm/pmb.c */
58bool __in_29bit_mode(void);
59
60void pmb_init(void);
61int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
62 unsigned long size, pgprot_t prot);
63void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
64 pgprot_t prot, void *caller);
65int pmb_unmap(void __iomem *addr);
47 66
48struct pmb_entry { 67#else
49 unsigned long vpn;
50 unsigned long ppn;
51 unsigned long flags;
52 68
53 /* 69static inline int
54 * 0 .. NR_PMB_ENTRIES for specific entry selection, or 70pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
55 * PMB_NO_ENTRY to search for a free one 71 unsigned long size, pgprot_t prot)
56 */ 72{
57 int entry; 73 return -EINVAL;
74}
58 75
59 struct pmb_entry *next; 76static inline void __iomem *
60 /* Adjacent entry link for contiguous multi-entry mappings */ 77pmb_remap_caller(phys_addr_t phys, unsigned long size,
61 struct pmb_entry *link; 78 pgprot_t prot, void *caller)
62}; 79{
80 return NULL;
81}
82
83static inline int pmb_unmap(void __iomem *addr)
84{
85 return -EINVAL;
86}
87
88#define pmb_init(addr) do { } while (0)
89
90#ifdef CONFIG_29BIT
91#define __in_29bit_mode() (1)
92#else
93#define __in_29bit_mode() (0)
94#endif
95
96#endif /* CONFIG_PMB */
97
98static inline void __iomem *
99pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot)
100{
101 return pmb_remap_caller(phys, size, prot, __builtin_return_address(0));
102}
63 103
64/* arch/sh/mm/pmb.c */
65int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
66 unsigned long flags, int *entry);
67int set_pmb_entry(struct pmb_entry *pmbe);
68void clear_pmb_entry(struct pmb_entry *pmbe);
69struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
70 unsigned long flags);
71void pmb_free(struct pmb_entry *pmbe);
72long pmb_remap(unsigned long virt, unsigned long phys,
73 unsigned long size, unsigned long flags);
74void pmb_unmap(unsigned long addr);
75#endif /* __ASSEMBLY__ */ 104#endif /* __ASSEMBLY__ */
76 105
77#endif /* __MMU_H */ 106#endif /* __MMU_H */
78
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 41080b173a7a..384c7471a374 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -158,7 +158,7 @@ static inline void enable_mmu(void)
158 unsigned int cpu = smp_processor_id(); 158 unsigned int cpu = smp_processor_id();
159 159
160 /* Enable MMU */ 160 /* Enable MMU */
161 ctrl_outl(MMU_CONTROL_INIT, MMUCR); 161 __raw_writel(MMU_CONTROL_INIT, MMUCR);
162 ctrl_barrier(); 162 ctrl_barrier();
163 163
164 if (asid_cache(cpu) == NO_CONTEXT) 164 if (asid_cache(cpu) == NO_CONTEXT)
@@ -171,9 +171,9 @@ static inline void disable_mmu(void)
171{ 171{
172 unsigned long cr; 172 unsigned long cr;
173 173
174 cr = ctrl_inl(MMUCR); 174 cr = __raw_readl(MMUCR);
175 cr &= ~MMU_CONTROL_INIT; 175 cr &= ~MMU_CONTROL_INIT;
176 ctrl_outl(cr, MMUCR); 176 __raw_writel(cr, MMUCR);
177 177
178 ctrl_barrier(); 178 ctrl_barrier();
179} 179}
diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h
index 8ef800c549ab..10e2e17210d2 100644
--- a/arch/sh/include/asm/mmu_context_32.h
+++ b/arch/sh/include/asm/mmu_context_32.h
@@ -49,11 +49,11 @@ static inline unsigned long get_asid(void)
49/* MMU_TTB is used for optimizing the fault handling. */ 49/* MMU_TTB is used for optimizing the fault handling. */
50static inline void set_TTB(pgd_t *pgd) 50static inline void set_TTB(pgd_t *pgd)
51{ 51{
52 ctrl_outl((unsigned long)pgd, MMU_TTB); 52 __raw_writel((unsigned long)pgd, MMU_TTB);
53} 53}
54 54
55static inline pgd_t *get_TTB(void) 55static inline pgd_t *get_TTB(void)
56{ 56{
57 return (pgd_t *)ctrl_inl(MMU_TTB); 57 return (pgd_t *)__raw_readl(MMU_TTB);
58} 58}
59#endif /* __ASM_SH_MMU_CONTEXT_32_H */ 59#endif /* __ASM_SH_MMU_CONTEXT_32_H */
diff --git a/arch/sh/include/asm/module.h b/arch/sh/include/asm/module.h
index 068bf1659750..b7927de86f9f 100644
--- a/arch/sh/include/asm/module.h
+++ b/arch/sh/include/asm/module.h
@@ -1,7 +1,22 @@
1#ifndef _ASM_SH_MODULE_H 1#ifndef _ASM_SH_MODULE_H
2#define _ASM_SH_MODULE_H 2#define _ASM_SH_MODULE_H
3 3
4#include <asm-generic/module.h> 4struct mod_arch_specific {
5#ifdef CONFIG_DWARF_UNWINDER
6 struct list_head fde_list;
7 struct list_head cie_list;
8#endif
9};
10
11#ifdef CONFIG_64BIT
12#define Elf_Shdr Elf64_Shdr
13#define Elf_Sym Elf64_Sym
14#define Elf_Ehdr Elf64_Ehdr
15#else
16#define Elf_Shdr Elf32_Shdr
17#define Elf_Sym Elf32_Sym
18#define Elf_Ehdr Elf32_Ehdr
19#endif
5 20
6#ifdef CONFIG_CPU_LITTLE_ENDIAN 21#ifdef CONFIG_CPU_LITTLE_ENDIAN
7# ifdef CONFIG_CPU_SH2 22# ifdef CONFIG_CPU_SH2
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 81bffc0d6860..d71feb359304 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -45,6 +45,7 @@
45#endif 45#endif
46 46
47#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
48#include <asm/uncached.h>
48 49
49extern unsigned long shm_align_mask; 50extern unsigned long shm_align_mask;
50extern unsigned long max_low_pfn, min_low_pfn; 51extern unsigned long max_low_pfn, min_low_pfn;
@@ -56,7 +57,6 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
56 return (addr1 ^ addr2) & shm_align_mask; 57 return (addr1 ^ addr2) & shm_align_mask;
57} 58}
58 59
59
60#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 60#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
61extern void copy_page(void *to, void *from); 61extern void copy_page(void *to, void *from);
62 62
@@ -88,7 +88,7 @@ typedef struct { unsigned long pgd; } pgd_t;
88#define __pte(x) ((pte_t) { (x) } ) 88#define __pte(x) ((pte_t) { (x) } )
89#else 89#else
90typedef struct { unsigned long long pte_low; } pte_t; 90typedef struct { unsigned long long pte_low; } pte_t;
91typedef struct { unsigned long pgprot; } pgprot_t; 91typedef struct { unsigned long long pgprot; } pgprot_t;
92typedef struct { unsigned long pgd; } pgd_t; 92typedef struct { unsigned long pgd; } pgd_t;
93#define pte_val(x) ((x).pte_low) 93#define pte_val(x) ((x).pte_low)
94#define __pte(x) ((pte_t) { (x) } ) 94#define __pte(x) ((pte_t) { (x) } )
@@ -127,12 +127,7 @@ typedef struct page *pgtable_t;
127 * is not visible (it is part of the PMB mapping) and so needs to be 127 * is not visible (it is part of the PMB mapping) and so needs to be
128 * added or subtracted as required. 128 * added or subtracted as required.
129 */ 129 */
130#if defined(CONFIG_PMB_FIXED) 130#ifdef CONFIG_PMB
131/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
132#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
133#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
134#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
135#elif defined(CONFIG_32BIT)
136#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) 131#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
137#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) 132#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
138#else 133#else
@@ -140,6 +135,14 @@ typedef struct page *pgtable_t;
140#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 135#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
141#endif 136#endif
142 137
138#ifdef CONFIG_UNCACHED_MAPPING
139#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
140#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
141#else
142#define UNCAC_ADDR(addr) ((addr))
143#define CAC_ADDR(addr) ((addr))
144#endif
145
143#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 146#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
144#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 147#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
145 148
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 4163950cd1c6..8bd952fcf3ba 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -3,8 +3,6 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/dma-mapping.h>
7
8/* Can be used to override the logic in pci_scan_bus for skipping 6/* Can be used to override the logic in pci_scan_bus for skipping
9 already-configured bus numbers - to be used for buggy BIOSes 7 already-configured bus numbers - to be used for buggy BIOSes
10 or architectures with incomplete PCI setup by the loader */ 8 or architectures with incomplete PCI setup by the loader */
@@ -17,20 +15,49 @@
17 */ 15 */
18struct pci_channel { 16struct pci_channel {
19 struct pci_channel *next; 17 struct pci_channel *next;
18 struct pci_bus *bus;
20 19
21 struct pci_ops *pci_ops; 20 struct pci_ops *pci_ops;
22 struct resource *io_resource; 21
23 struct resource *mem_resource; 22 struct resource *resources;
23 unsigned int nr_resources;
24 24
25 unsigned long io_offset; 25 unsigned long io_offset;
26 unsigned long mem_offset; 26 unsigned long mem_offset;
27 27
28 unsigned long reg_base; 28 unsigned long reg_base;
29
30 unsigned long io_map_base; 29 unsigned long io_map_base;
30
31 unsigned int index;
32 unsigned int need_domain_info;
33
34 /* Optional error handling */
35 struct timer_list err_timer, serr_timer;
36 unsigned int err_irq, serr_irq;
31}; 37};
32 38
33extern void register_pci_controller(struct pci_channel *hose); 39/* arch/sh/drivers/pci/pci.c */
40extern int register_pci_controller(struct pci_channel *hose);
41extern void pcibios_report_status(unsigned int status_mask, int warn);
42
43/* arch/sh/drivers/pci/common.c */
44extern int early_read_config_byte(struct pci_channel *hose, int top_bus,
45 int bus, int devfn, int offset, u8 *value);
46extern int early_read_config_word(struct pci_channel *hose, int top_bus,
47 int bus, int devfn, int offset, u16 *value);
48extern int early_read_config_dword(struct pci_channel *hose, int top_bus,
49 int bus, int devfn, int offset, u32 *value);
50extern int early_write_config_byte(struct pci_channel *hose, int top_bus,
51 int bus, int devfn, int offset, u8 value);
52extern int early_write_config_word(struct pci_channel *hose, int top_bus,
53 int bus, int devfn, int offset, u16 value);
54extern int early_write_config_dword(struct pci_channel *hose, int top_bus,
55 int bus, int devfn, int offset, u32 value);
56extern void pcibios_enable_timers(struct pci_channel *hose);
57extern unsigned int pcibios_handle_status_errors(unsigned long addr,
58 unsigned int status, struct pci_channel *hose);
59extern int pci_is_66mhz_capable(struct pci_channel *hose,
60 int top_bus, int current_bus);
34 61
35extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM; 62extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
36 63
@@ -54,38 +81,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
54 * address space. The networking and block device layers use 81 * address space. The networking and block device layers use
55 * this boolean for bounce buffer decisions. 82 * this boolean for bounce buffer decisions.
56 */ 83 */
57#define PCI_DMA_BUS_IS_PHYS (1) 84#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
58
59#include <linux/types.h>
60#include <linux/slab.h>
61#include <asm/scatterlist.h>
62#include <linux/string.h>
63#include <asm/io.h>
64
65/* pci_unmap_{single,page} being a nop depends upon the
66 * configuration.
67 */
68#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
69#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
70 dma_addr_t ADDR_NAME;
71#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
72 __u32 LEN_NAME;
73#define pci_unmap_addr(PTR, ADDR_NAME) \
74 ((PTR)->ADDR_NAME)
75#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
76 (((PTR)->ADDR_NAME) = (VAL))
77#define pci_unmap_len(PTR, LEN_NAME) \
78 ((PTR)->LEN_NAME)
79#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
80 (((PTR)->LEN_NAME) = (VAL))
81#else
82#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
83#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
84#define pci_unmap_addr(PTR, ADDR_NAME) (0)
85#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
86#define pci_unmap_len(PTR, LEN_NAME) (0)
87#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
88#endif
89 85
90#ifdef CONFIG_PCI 86#ifdef CONFIG_PCI
91/* 87/*
@@ -113,20 +109,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
113} 109}
114#endif 110#endif
115 111
116#ifdef CONFIG_SUPERH32
117/*
118 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is mapped
119 * at the end of the address space in a special non-translatable area.
120 */
121#define PCI_MEM_FIXED_START 0xfd000000
122#define PCI_MEM_FIXED_END (PCI_MEM_FIXED_START + 0x01000000)
123
124#define is_pci_memory_fixed_range(s, e) \
125 ((s) >= PCI_MEM_FIXED_START && (e) < PCI_MEM_FIXED_END)
126#else
127#define is_pci_memory_fixed_range(s, e) (0)
128#endif
129
130/* Board-specific fixup routines. */ 112/* Board-specific fixup routines. */
131int pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin); 113int pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin);
132 114
@@ -136,6 +118,14 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev,
136extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 118extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
137 struct pci_bus_region *region); 119 struct pci_bus_region *region);
138 120
121#define pci_domain_nr(bus) ((struct pci_channel *)(bus)->sysdata)->index
122
123static inline int pci_proc_domain(struct pci_bus *bus)
124{
125 struct pci_channel *hose = bus->sysdata;
126 return hose->need_domain_info;
127}
128
139/* Chances are this interrupt is wired PC-style ... */ 129/* Chances are this interrupt is wired PC-style ... */
140static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 130static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
141{ 131{
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h
index 11a302297ab7..3d0c9f36d150 100644
--- a/arch/sh/include/asm/perf_event.h
+++ b/arch/sh/include/asm/perf_event.h
@@ -1,8 +1,35 @@
1#ifndef __ASM_SH_PERF_EVENT_H 1#ifndef __ASM_SH_PERF_EVENT_H
2#define __ASM_SH_PERF_EVENT_H 2#define __ASM_SH_PERF_EVENT_H
3 3
4/* SH only supports software events through this interface. */ 4struct hw_perf_event;
5static inline void set_perf_event_pending(void) {} 5
6#define MAX_HWEVENTS 2
7
8struct sh_pmu {
9 const char *name;
10 unsigned int num_events;
11 void (*disable_all)(void);
12 void (*enable_all)(void);
13 void (*enable)(struct hw_perf_event *, int);
14 void (*disable)(struct hw_perf_event *, int);
15 u64 (*read)(int);
16 int (*event_map)(int);
17 unsigned int max_events;
18 unsigned long raw_event_mask;
19 const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
20 [PERF_COUNT_HW_CACHE_OP_MAX]
21 [PERF_COUNT_HW_CACHE_RESULT_MAX];
22};
23
24/* arch/sh/kernel/perf_event.c */
25extern int register_sh_pmu(struct sh_pmu *);
26extern int reserve_pmc_hardware(void);
27extern void release_pmc_hardware(void);
28
29static inline void set_perf_event_pending(void)
30{
31 /* Nothing to see here, move along. */
32}
6 33
7#define PERF_EVENT_INDEX_OFFSET 0 34#define PERF_EVENT_INDEX_OFFSET 0
8 35
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 63ca37bd9a95..8c00785c60d5 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -4,8 +4,16 @@
4#include <linux/quicklist.h> 4#include <linux/quicklist.h>
5#include <asm/page.h> 5#include <asm/page.h>
6 6
7#define QUICK_PGD 0 /* We preserve special mappings over free */ 7#define QUICK_PT 0 /* Other page table pages that are zero on free */
8#define QUICK_PT 1 /* Other page table pages that are zero on free */ 8
9extern pgd_t *pgd_alloc(struct mm_struct *);
10extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
11
12#if PAGETABLE_LEVELS > 2
13extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
15extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
16#endif
9 17
10static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 18static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
11 pte_t *pte) 19 pte_t *pte)
@@ -20,28 +28,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
20} 28}
21#define pmd_pgtable(pmd) pmd_page(pmd) 29#define pmd_pgtable(pmd) pmd_page(pmd)
22 30
23static inline void pgd_ctor(void *x)
24{
25 pgd_t *pgd = x;
26
27 memcpy(pgd + USER_PTRS_PER_PGD,
28 swapper_pg_dir + USER_PTRS_PER_PGD,
29 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
30}
31
32/* 31/*
33 * Allocate and free page tables. 32 * Allocate and free page tables.
34 */ 33 */
35static inline pgd_t *pgd_alloc(struct mm_struct *mm)
36{
37 return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
38}
39
40static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
41{
42 quicklist_free(QUICK_PGD, NULL, pgd);
43}
44
45static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 34static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
46 unsigned long address) 35 unsigned long address)
47{ 36{
@@ -81,7 +70,6 @@ do { \
81 70
82static inline void check_pgt_cache(void) 71static inline void check_pgt_cache(void)
83{ 72{
84 quicklist_trim(QUICK_PGD, NULL, 25, 16);
85 quicklist_trim(QUICK_PT, NULL, 25, 16); 73 quicklist_trim(QUICK_PT, NULL, 25, 16);
86} 74}
87 75
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
new file mode 100644
index 000000000000..19bd89db17e7
--- /dev/null
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_SH_PGTABLE_2LEVEL_H
2#define __ASM_SH_PGTABLE_2LEVEL_H
3
4#include <asm-generic/pgtable-nopmd.h>
5
6/*
7 * traditional two-level paging structure
8 */
9#define PAGETABLE_LEVELS 2
10
11/* PTE bits */
12#define PTE_MAGNITUDE 2 /* 32-bit PTEs */
13
14#define PTE_SHIFT PAGE_SHIFT
15#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE)
16
17/* PGD bits */
18#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
19
20#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE))
21#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
22
23#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
new file mode 100644
index 000000000000..249a985d9648
--- /dev/null
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -0,0 +1,56 @@
1#ifndef __ASM_SH_PGTABLE_3LEVEL_H
2#define __ASM_SH_PGTABLE_3LEVEL_H
3
4#include <asm-generic/pgtable-nopud.h>
5
6/*
7 * Some cores need a 3-level page table layout, for example when using
8 * 64-bit PTEs and 4K pages.
9 */
10#define PAGETABLE_LEVELS 3
11
12#define PTE_MAGNITUDE 3 /* 64-bit PTEs on SH-X2 TLB */
13
14/* PGD bits */
15#define PGDIR_SHIFT 30
16
17#define PTRS_PER_PGD 4
18#define USER_PTRS_PER_PGD 2
19
20/* PMD bits */
21#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE))
22#define PMD_SIZE (1UL << PMD_SHIFT)
23#define PMD_MASK (~(PMD_SIZE-1))
24
25#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
26
27#define pmd_ERROR(e) \
28 printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
29
30typedef struct { unsigned long long pmd; } pmd_t;
31#define pmd_val(x) ((x).pmd)
32#define __pmd(x) ((pmd_t) { (x) } )
33
34static inline unsigned long pud_page_vaddr(pud_t pud)
35{
36 return pud_val(pud);
37}
38
39#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
40static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
41{
42 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
43}
44
45#define pud_none(x) (!pud_val(x))
46#define pud_present(x) (pud_val(x))
47#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0)
48#define pud_bad(x) (pud_val(x) & ~PAGE_MASK)
49
50/*
51 * (puds are folded into pgds so this doesn't get actually called,
52 * but the define is needed for a generic inline function.)
53 */
54#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
55
56#endif /* __ASM_SH_PGTABLE_3LEVEL_H */
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 4f3efa7d5a64..02f77450cd8f 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -12,7 +12,11 @@
12#ifndef __ASM_SH_PGTABLE_H 12#ifndef __ASM_SH_PGTABLE_H
13#define __ASM_SH_PGTABLE_H 13#define __ASM_SH_PGTABLE_H
14 14
15#include <asm-generic/pgtable-nopmd.h> 15#ifdef CONFIG_X2TLB
16#include <asm/pgtable-3level.h>
17#else
18#include <asm/pgtable-2level.h>
19#endif
16#include <asm/page.h> 20#include <asm/page.h>
17 21
18#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
@@ -51,37 +55,39 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
51#define NPHYS_SIGN (1LL << (NPHYS - 1)) 55#define NPHYS_SIGN (1LL << (NPHYS - 1))
52#define NPHYS_MASK (-1LL << NPHYS) 56#define NPHYS_MASK (-1LL << NPHYS)
53 57
54/*
55 * traditional two-level paging structure
56 */
57/* PTE bits */
58#if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64)
59# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */
60#else
61# define PTE_MAGNITUDE 2 /* 32-bit PTEs */
62#endif
63#define PTE_SHIFT PAGE_SHIFT
64#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE)
65
66/* PGD bits */
67#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
68#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 58#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
69#define PGDIR_MASK (~(PGDIR_SIZE-1)) 59#define PGDIR_MASK (~(PGDIR_SIZE-1))
70 60
71/* Entries per level */ 61/* Entries per level */
72#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) 62#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
73#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
74 63
75#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
76#define FIRST_USER_ADDRESS 0 64#define FIRST_USER_ADDRESS 0
77 65
78#ifdef CONFIG_32BIT 66#define PHYS_ADDR_MASK29 0x1fffffff
79#define PHYS_ADDR_MASK 0xffffffff 67#define PHYS_ADDR_MASK32 0xffffffff
68
69#ifdef CONFIG_PMB
70static inline unsigned long phys_addr_mask(void)
71{
72 /* Is the MMU in 29bit mode? */
73 if (__in_29bit_mode())
74 return PHYS_ADDR_MASK29;
75
76 return PHYS_ADDR_MASK32;
77}
78#elif defined(CONFIG_32BIT)
79static inline unsigned long phys_addr_mask(void)
80{
81 return PHYS_ADDR_MASK32;
82}
80#else 83#else
81#define PHYS_ADDR_MASK 0x1fffffff 84static inline unsigned long phys_addr_mask(void)
85{
86 return PHYS_ADDR_MASK29;
87}
82#endif 88#endif
83 89
84#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) 90#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
85#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) 91#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
86 92
87#ifdef CONFIG_SUPERH32 93#ifdef CONFIG_SUPERH32
@@ -135,9 +141,9 @@ typedef pte_t *pte_addr_t;
135#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) 141#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
136 142
137/* 143/*
138 * No page table caches to initialise 144 * Initialise the page table caches
139 */ 145 */
140#define pgtable_cache_init() do { } while (0) 146extern void pgtable_cache_init(void);
141 147
142struct vm_area_struct; 148struct vm_area_struct;
143 149
@@ -147,8 +153,9 @@ extern void __update_tlb(struct vm_area_struct *vma,
147 unsigned long address, pte_t pte); 153 unsigned long address, pte_t pte);
148 154
149static inline void 155static inline void
150update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 156update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
151{ 157{
158 pte_t pte = *ptep;
152 __update_cache(vma, address, pte); 159 __update_cache(vma, address, pte);
153 __update_tlb(vma, address, pte); 160 __update_tlb(vma, address, pte);
154} 161}
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index c0d359ce337b..e172d696e52b 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -71,6 +71,8 @@
71#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ 71#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */
72#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ 72#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
73 73
74#define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */
75
74/* Wrapper for extended mode pgprot twiddling */ 76/* Wrapper for extended mode pgprot twiddling */
75#define _PAGE_EXT(x) ((unsigned long long)(x) << 32) 77#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
76 78
@@ -108,7 +110,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
108#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) 110#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
109#endif 111#endif
110 112
111#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) 113#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS))
112 114
113/* Hardware flags, page size encoding */ 115/* Hardware flags, page size encoding */
114#if !defined(CONFIG_MMU) 116#if !defined(CONFIG_MMU)
@@ -141,12 +143,14 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
141# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 143# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
142# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) 144# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
143# endif 145# endif
146# define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED))
144#else 147#else
145# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 148# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
146# define _PAGE_SZHUGE (_PAGE_SZ1) 149# define _PAGE_SZHUGE (_PAGE_SZ1)
147# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) 150# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
148# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) 151# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1)
149# endif 152# endif
153# define _PAGE_WIRED (0)
150#endif 154#endif
151 155
152/* 156/*
@@ -344,7 +348,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
344#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) 348#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
345 349
346#ifdef CONFIG_X2TLB 350#ifdef CONFIG_X2TLB
347#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) 351#define pte_write(pte) \
352 ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
348#else 353#else
349#define pte_write(pte) ((pte).pte_low & _PAGE_RW) 354#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
350#endif 355#endif
@@ -358,7 +363,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
358 * individually toggled (and user permissions are entirely decoupled from 363 * individually toggled (and user permissions are entirely decoupled from
359 * kernel permissions), we attempt to couple them a bit more sanely here. 364 * kernel permissions), we attempt to couple them a bit more sanely here.
360 */ 365 */
361PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); 366PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
362PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); 367PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
363PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); 368PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
364#else 369#else
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index 17cdbecc3adc..0ee46776dad6 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -43,11 +43,6 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
43} 43}
44#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 44#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
45 45
46static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
47{
48 pmd_val(*pmdp) = (unsigned long) ptep;
49}
50
51/* 46/*
52 * PGD defines. Top level. 47 * PGD defines. Top level.
53 */ 48 */
@@ -128,8 +123,21 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
128#define _PAGE_DIRTY 0x400 /* software: page accessed in write */ 123#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
129#define _PAGE_ACCESSED 0x800 /* software: page referenced */ 124#define _PAGE_ACCESSED 0x800 /* software: page referenced */
130 125
126/* Wrapper for extended mode pgprot twiddling */
127#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
128
129/*
130 * We can use the sign-extended bits in the PTEL to get 32 bits of
131 * software flags. This works for now because no implementations uses
132 * anything above the PPN field.
133 */
134#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
135
136#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
137 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
138
131/* Mask which drops software flags */ 139/* Mask which drops software flags */
132#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL 140#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
133 141
134/* 142/*
135 * HugeTLB support 143 * HugeTLB support
@@ -203,12 +211,6 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
203#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) 211#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
204 212
205/* 213/*
206 * Handling allocation failures during page table setup.
207 */
208extern void __handle_bad_pmd_kernel(pmd_t * pmd);
209#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
210
211/*
212 * PTE level access routines. 214 * PTE level access routines.
213 * 215 *
214 * Note1: 216 * Note1:
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 017e0c1807b2..9605e062840f 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -98,13 +98,34 @@ extern struct sh_cpuinfo cpu_data[];
98 98
99/* Forward decl */ 99/* Forward decl */
100struct seq_operations; 100struct seq_operations;
101struct task_struct;
101 102
102extern struct pt_regs fake_swapper_regs; 103extern struct pt_regs fake_swapper_regs;
103 104
105/* arch/sh/kernel/process.c */
106extern unsigned int xstate_size;
107extern void free_thread_xstate(struct task_struct *);
108extern struct kmem_cache *task_xstate_cachep;
109
110/* arch/sh/mm/alignment.c */
111extern int get_unalign_ctl(struct task_struct *, unsigned long addr);
112extern int set_unalign_ctl(struct task_struct *, unsigned int val);
113
114#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
115#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
116
117/* arch/sh/mm/init.c */
118extern unsigned int mem_init_done;
119
104/* arch/sh/kernel/setup.c */ 120/* arch/sh/kernel/setup.c */
105const char *get_cpu_subtype(struct sh_cpuinfo *c); 121const char *get_cpu_subtype(struct sh_cpuinfo *c);
106extern const struct seq_operations cpuinfo_op; 122extern const struct seq_operations cpuinfo_op;
107 123
124/* thread_struct flags */
125#define SH_THREAD_UAC_NOPRINT (1 << 0)
126#define SH_THREAD_UAC_SIGBUS (1 << 1)
127#define SH_THREAD_UAC_MASK (SH_THREAD_UAC_NOPRINT | SH_THREAD_UAC_SIGBUS)
128
108/* processor boot mode configuration */ 129/* processor boot mode configuration */
109#define MODE_PIN0 (1 << 0) 130#define MODE_PIN0 (1 << 0)
110#define MODE_PIN1 (1 << 1) 131#define MODE_PIN1 (1 << 1)
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 9a8714945dc9..572b4eb09493 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/types.h> 15#include <asm/types.h>
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17#include <asm/hw_breakpoint.h>
17 18
18/* 19/*
19 * Default implementation of macro that returns current 20 * Default implementation of macro that returns current
@@ -56,6 +57,7 @@ asmlinkage void __init sh_cpu_init(void);
56#define SR_DSP 0x00001000 57#define SR_DSP 0x00001000
57#define SR_IMASK 0x000000f0 58#define SR_IMASK 0x000000f0
58#define SR_FD 0x00008000 59#define SR_FD 0x00008000
60#define SR_MD 0x40000000
59 61
60/* 62/*
61 * DSP structure and data 63 * DSP structure and data
@@ -89,9 +91,9 @@ struct sh_fpu_soft_struct {
89 unsigned long entry_pc; 91 unsigned long entry_pc;
90}; 92};
91 93
92union sh_fpu_union { 94union thread_xstate {
93 struct sh_fpu_hard_struct hard; 95 struct sh_fpu_hard_struct hardfpu;
94 struct sh_fpu_soft_struct soft; 96 struct sh_fpu_soft_struct softfpu;
95}; 97};
96 98
97struct thread_struct { 99struct thread_struct {
@@ -99,44 +101,36 @@ struct thread_struct {
99 unsigned long sp; 101 unsigned long sp;
100 unsigned long pc; 102 unsigned long pc;
101 103
102 /* Hardware debugging registers */ 104 /* Various thread flags, see SH_THREAD_xxx */
103 unsigned long ubc_pc; 105 unsigned long flags;
104 106
105 /* floating point info */ 107 /* Save middle states of ptrace breakpoints */
106 union sh_fpu_union fpu; 108 struct perf_event *ptrace_bps[HBP_NUM];
107 109
108#ifdef CONFIG_SH_DSP 110#ifdef CONFIG_SH_DSP
109 /* Dsp status information */ 111 /* Dsp status information */
110 struct sh_dsp_struct dsp_status; 112 struct sh_dsp_struct dsp_status;
111#endif 113#endif
112};
113 114
114/* Count of active tasks with UBC settings */ 115 /* Extended processor state */
115extern int ubc_usercnt; 116 union thread_xstate *xstate;
117};
116 118
117#define INIT_THREAD { \ 119#define INIT_THREAD { \
118 .sp = sizeof(init_stack) + (long) &init_stack, \ 120 .sp = sizeof(init_stack) + (long) &init_stack, \
121 .flags = 0, \
119} 122}
120 123
121/*
122 * Do necessary setup to start up a newly executed thread.
123 */
124#define start_thread(_regs, new_pc, new_sp) \
125 set_fs(USER_DS); \
126 _regs->pr = 0; \
127 _regs->sr = SR_FD; /* User mode. */ \
128 _regs->pc = new_pc; \
129 _regs->regs[15] = new_sp
130
131/* Forward declaration, a strange C thing */ 124/* Forward declaration, a strange C thing */
132struct task_struct; 125struct task_struct;
133struct mm_struct; 126
127extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
134 128
135/* Free all resources held by a thread. */ 129/* Free all resources held by a thread. */
136extern void release_thread(struct task_struct *); 130extern void release_thread(struct task_struct *);
137 131
138/* Prepare to copy thread state - unlazy all lazy status */ 132/* Prepare to copy thread state - unlazy all lazy status */
139#define prepare_to_copy(tsk) do { } while (0) 133void prepare_to_copy(struct task_struct *tsk);
140 134
141/* 135/*
142 * create a kernel thread without removing it from tasklists 136 * create a kernel thread without removing it from tasklists
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 5727d31b0ccf..621bc4618c6b 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -87,26 +87,31 @@ struct sh_fpu_hard_struct {
87 /* long status; * software status information */ 87 /* long status; * software status information */
88}; 88};
89 89
90#if 0
91/* Dummy fpu emulator */ 90/* Dummy fpu emulator */
92struct sh_fpu_soft_struct { 91struct sh_fpu_soft_struct {
93 unsigned long long fp_regs[32]; 92 unsigned long fp_regs[64];
94 unsigned int fpscr; 93 unsigned int fpscr;
95 unsigned char lookahead; 94 unsigned char lookahead;
96 unsigned long entry_pc; 95 unsigned long entry_pc;
97}; 96};
98#endif
99 97
100union sh_fpu_union { 98union thread_xstate {
101 struct sh_fpu_hard_struct hard; 99 struct sh_fpu_hard_struct hardfpu;
102 /* 'hard' itself only produces 32 bit alignment, yet we need 100 struct sh_fpu_soft_struct softfpu;
103 to access it using 64 bit load/store as well. */ 101 /*
102 * The structure definitions only produce 32 bit alignment, yet we need
103 * to access them using 64 bit load/store as well.
104 */
104 unsigned long long alignment_dummy; 105 unsigned long long alignment_dummy;
105}; 106};
106 107
107struct thread_struct { 108struct thread_struct {
108 unsigned long sp; 109 unsigned long sp;
109 unsigned long pc; 110 unsigned long pc;
111
112 /* Various thread flags, see SH_THREAD_xxx */
113 unsigned long flags;
114
110 /* This stores the address of the pt_regs built during a context 115 /* This stores the address of the pt_regs built during a context
111 switch, or of the register save area built for a kernel mode 116 switch, or of the register save area built for a kernel mode
112 exception. It is used for backtracing the stack of a sleeping task 117 exception. It is used for backtracing the stack of a sleeping task
@@ -122,7 +127,7 @@ struct thread_struct {
122 /* Hardware debugging registers may come here */ 127 /* Hardware debugging registers may come here */
123 128
124 /* floating point info */ 129 /* floating point info */
125 union sh_fpu_union fpu; 130 union thread_xstate *xstate;
126}; 131};
127 132
128#define INIT_MMAP \ 133#define INIT_MMAP \
@@ -137,7 +142,7 @@ struct thread_struct {
137 .trap_no = 0, \ 142 .trap_no = 0, \
138 .error_code = 0, \ 143 .error_code = 0, \
139 .address = 0, \ 144 .address = 0, \
140 .fpu = { { { 0, } }, } \ 145 .flags = 0, \
141} 146}
142 147
143/* 148/*
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index 1dc12cb44a2d..2168fde25611 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -102,13 +102,15 @@ struct pt_dspregs {
102#define PTRACE_GETDSPREGS 55 /* DSP registers */ 102#define PTRACE_GETDSPREGS 55 /* DSP registers */
103#define PTRACE_SETDSPREGS 56 103#define PTRACE_SETDSPREGS 56
104 104
105#define PT_TEXT_END_ADDR 240 105#define PT_TEXT_END_ADDR 240
106#define PT_TEXT_ADDR 244 /* &(struct user)->start_code */ 106#define PT_TEXT_ADDR 244 /* &(struct user)->start_code */
107#define PT_DATA_ADDR 248 /* &(struct user)->start_data */ 107#define PT_DATA_ADDR 248 /* &(struct user)->start_data */
108#define PT_TEXT_LEN 252 108#define PT_TEXT_LEN 252
109 109
110#ifdef __KERNEL__ 110#ifdef __KERNEL__
111#include <asm/addrspace.h> 111#include <asm/addrspace.h>
112#include <asm/page.h>
113#include <asm/system.h>
112 114
113#define user_mode(regs) (((regs)->sr & 0x40000000)==0) 115#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
114#define instruction_pointer(regs) ((unsigned long)(regs)->pc) 116#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
@@ -121,8 +123,12 @@ extern void show_regs(struct pt_regs *);
121struct task_struct; 123struct task_struct;
122 124
123#define arch_has_single_step() (1) 125#define arch_has_single_step() (1)
124extern void user_enable_single_step(struct task_struct *); 126
125extern void user_disable_single_step(struct task_struct *); 127struct perf_event;
128struct perf_sample_data;
129
130extern void ptrace_triggered(struct perf_event *bp, int nmi,
131 struct perf_sample_data *data, struct pt_regs *regs);
126 132
127#define task_pt_regs(task) \ 133#define task_pt_regs(task) \
128 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1) 134 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1)
@@ -131,10 +137,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
131{ 137{
132 unsigned long pc = instruction_pointer(regs); 138 unsigned long pc = instruction_pointer(regs);
133 139
134#ifdef P2SEG 140 if (virt_addr_uncached(pc))
135 if (pc >= P2SEG && pc < P3SEG) 141 return CAC_ADDR(pc);
136 pc -= 0x20000000;
137#endif
138 142
139 return pc; 143 return pc;
140} 144}
diff --git a/arch/sh/include/asm/reboot.h b/arch/sh/include/asm/reboot.h
new file mode 100644
index 000000000000..b3da0c63fc3d
--- /dev/null
+++ b/arch/sh/include/asm/reboot.h
@@ -0,0 +1,21 @@
1#ifndef __ASM_SH_REBOOT_H
2#define __ASM_SH_REBOOT_H
3
4#include <linux/kdebug.h>
5
6struct pt_regs;
7
8struct machine_ops {
9 void (*restart)(char *cmd);
10 void (*halt)(void);
11 void (*power_off)(void);
12 void (*shutdown)(void);
13 void (*crash_shutdown)(struct pt_regs *);
14};
15
16extern struct machine_ops machine_ops;
17
18/* arch/sh/kernel/machine_kexec.c */
19void native_machine_crash_shutdown(struct pt_regs *regs);
20
21#endif /* __ASM_SH_REBOOT_H */
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
index 327cc2e4c97b..e38d1d4c7f6f 100644
--- a/arch/sh/include/asm/scatterlist.h
+++ b/arch/sh/include/asm/scatterlist.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_SH_SCATTERLIST_H 1#ifndef __ASM_SH_SCATTERLIST_H
2#define __ASM_SH_SCATTERLIST_H 2#define __ASM_SH_SCATTERLIST_H
3 3
4#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK 4#define ISA_DMA_THRESHOLD phys_addr_mask()
5 5
6#include <asm-generic/scatterlist.h> 6#include <asm-generic/scatterlist.h>
7 7
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index ce3743599b27..4758325bb24a 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -18,7 +18,6 @@
18/* ... */ 18/* ... */
19#define COMMAND_LINE ((char *) (PARAM+0x100)) 19#define COMMAND_LINE ((char *) (PARAM+0x100))
20 20
21int setup_early_printk(char *);
22void sh_mv_setup(void); 21void sh_mv_setup(void);
23 22
24#endif /* __KERNEL__ */ 23#endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h
index d9c96d7cf6c7..95714c28422b 100644
--- a/arch/sh/include/asm/sh_bios.h
+++ b/arch/sh/include/asm/sh_bios.h
@@ -1,18 +1,27 @@
1#ifndef __ASM_SH_BIOS_H 1#ifndef __ASM_SH_BIOS_H
2#define __ASM_SH_BIOS_H 2#define __ASM_SH_BIOS_H
3 3
4#ifdef CONFIG_SH_STANDARD_BIOS
5
4/* 6/*
5 * Copyright (C) 2000 Greg Banks, Mitch Davis 7 * Copyright (C) 2000 Greg Banks, Mitch Davis
6 * C API to interface to the standard LinuxSH BIOS 8 * C API to interface to the standard LinuxSH BIOS
7 * usually from within the early stages of kernel boot. 9 * usually from within the early stages of kernel boot.
8 */ 10 */
9
10
11extern void sh_bios_console_write(const char *buf, unsigned int len); 11extern void sh_bios_console_write(const char *buf, unsigned int len);
12extern void sh_bios_char_out(char ch);
13extern void sh_bios_gdb_detach(void); 12extern void sh_bios_gdb_detach(void);
14 13
15extern void sh_bios_get_node_addr(unsigned char *node_addr); 14extern void sh_bios_get_node_addr(unsigned char *node_addr);
16extern void sh_bios_shutdown(unsigned int how); 15extern void sh_bios_shutdown(unsigned int how);
17 16
17extern void sh_bios_vbr_init(void);
18extern void sh_bios_vbr_reload(void);
19
20#else
21
22static inline void sh_bios_vbr_init(void) { }
23static inline void sh_bios_vbr_reload(void) { }
24
25#endif /* CONFIG_SH_STANDARD_BIOS */
26
18#endif /* __ASM_SH_BIOS_H */ 27#endif /* __ASM_SH_BIOS_H */
diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h
index acf99700deed..f739061e2ee4 100644
--- a/arch/sh/include/asm/sh_eth.h
+++ b/arch/sh/include/asm/sh_eth.h
@@ -7,6 +7,7 @@ struct sh_eth_plat_data {
7 int phy; 7 int phy;
8 int edmac_endian; 8 int edmac_endian;
9 9
10 unsigned char mac_addr[6];
10 unsigned no_ether_link:1; 11 unsigned no_ether_link:1;
11 unsigned ether_link_active_low:1; 12 unsigned ether_link_active_low:1;
12}; 13};
diff --git a/arch/sh/include/asm/sh_keysc.h b/arch/sh/include/asm/sh_keysc.h
deleted file mode 100644
index 4a65b1e40eab..000000000000
--- a/arch/sh/include/asm/sh_keysc.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_KEYSC_H__
2#define __ASM_KEYSC_H__
3
4#define SH_KEYSC_MAXKEYS 30
5
6struct sh_keysc_info {
7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3 } mode;
8 int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */
9 int delay;
10 int kycr2_delay;
11 int keycodes[SH_KEYSC_MAXKEYS];
12};
13
14#endif /* __ASM_KEYSC_H__ */
diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h
new file mode 100644
index 000000000000..f1b1e6944a5f
--- /dev/null
+++ b/arch/sh/include/asm/siu.h
@@ -0,0 +1,26 @@
1/*
2 * platform header for the SIU ASoC driver
3 *
4 * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef ASM_SIU_H
12#define ASM_SIU_H
13
14#include <asm/dmaengine.h>
15
16struct device;
17
18struct siu_platform {
19 struct device *dma_dev;
20 enum sh_dmae_slave_chan_id dma_slave_tx_a;
21 enum sh_dmae_slave_chan_id dma_slave_rx_a;
22 enum sh_dmae_slave_chan_id dma_slave_tx_b;
23 enum sh_dmae_slave_chan_id dma_slave_rx_b;
24};
25
26#endif /* ASM_SIU_H */
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index a28c9f0053fd..bdc0f3b6c56a 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -23,10 +23,10 @@
23 * Your basic SMP spinlocks, allowing only a single CPU anywhere 23 * Your basic SMP spinlocks, allowing only a single CPU anywhere
24 */ 24 */
25 25
26#define __raw_spin_is_locked(x) ((x)->lock <= 0) 26#define arch_spin_is_locked(x) ((x)->lock <= 0)
27#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 27#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
28#define __raw_spin_unlock_wait(x) \ 28#define arch_spin_unlock_wait(x) \
29 do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) 29 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
30 30
31/* 31/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's 32 * Simple spin lock operations. There are two variants, one clears IRQ's
@@ -34,14 +34,14 @@
34 * 34 *
35 * We make no fairness assumptions. They have a cost. 35 * We make no fairness assumptions. They have a cost.
36 */ 36 */
37static inline void __raw_spin_lock(raw_spinlock_t *lock) 37static inline void arch_spin_lock(arch_spinlock_t *lock)
38{ 38{
39 unsigned long tmp; 39 unsigned long tmp;
40 unsigned long oldval; 40 unsigned long oldval;
41 41
42 __asm__ __volatile__ ( 42 __asm__ __volatile__ (
43 "1: \n\t" 43 "1: \n\t"
44 "movli.l @%2, %0 ! __raw_spin_lock \n\t" 44 "movli.l @%2, %0 ! arch_spin_lock \n\t"
45 "mov %0, %1 \n\t" 45 "mov %0, %1 \n\t"
46 "mov #0, %0 \n\t" 46 "mov #0, %0 \n\t"
47 "movco.l %0, @%2 \n\t" 47 "movco.l %0, @%2 \n\t"
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
54 ); 54 );
55} 55}
56 56
57static inline void __raw_spin_unlock(raw_spinlock_t *lock) 57static inline void arch_spin_unlock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp; 59 unsigned long tmp;
60 60
61 __asm__ __volatile__ ( 61 __asm__ __volatile__ (
62 "mov #1, %0 ! __raw_spin_unlock \n\t" 62 "mov #1, %0 ! arch_spin_unlock \n\t"
63 "mov.l %0, @%1 \n\t" 63 "mov.l %0, @%1 \n\t"
64 : "=&z" (tmp) 64 : "=&z" (tmp)
65 : "r" (&lock->lock) 65 : "r" (&lock->lock)
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
67 ); 67 );
68} 68}
69 69
70static inline int __raw_spin_trylock(raw_spinlock_t *lock) 70static inline int arch_spin_trylock(arch_spinlock_t *lock)
71{ 71{
72 unsigned long tmp, oldval; 72 unsigned long tmp, oldval;
73 73
74 __asm__ __volatile__ ( 74 __asm__ __volatile__ (
75 "1: \n\t" 75 "1: \n\t"
76 "movli.l @%2, %0 ! __raw_spin_trylock \n\t" 76 "movli.l @%2, %0 ! arch_spin_trylock \n\t"
77 "mov %0, %1 \n\t" 77 "mov %0, %1 \n\t"
78 "mov #0, %0 \n\t" 78 "mov #0, %0 \n\t"
79 "movco.l %0, @%2 \n\t" 79 "movco.l %0, @%2 \n\t"
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
100 * read_can_lock - would read_trylock() succeed? 100 * read_can_lock - would read_trylock() succeed?
101 * @lock: the rwlock in question. 101 * @lock: the rwlock in question.
102 */ 102 */
103#define __raw_read_can_lock(x) ((x)->lock > 0) 103#define arch_read_can_lock(x) ((x)->lock > 0)
104 104
105/** 105/**
106 * write_can_lock - would write_trylock() succeed? 106 * write_can_lock - would write_trylock() succeed?
107 * @lock: the rwlock in question. 107 * @lock: the rwlock in question.
108 */ 108 */
109#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 109#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
110 110
111static inline void __raw_read_lock(raw_rwlock_t *rw) 111static inline void arch_read_lock(arch_rwlock_t *rw)
112{ 112{
113 unsigned long tmp; 113 unsigned long tmp;
114 114
115 __asm__ __volatile__ ( 115 __asm__ __volatile__ (
116 "1: \n\t" 116 "1: \n\t"
117 "movli.l @%1, %0 ! __raw_read_lock \n\t" 117 "movli.l @%1, %0 ! arch_read_lock \n\t"
118 "cmp/pl %0 \n\t" 118 "cmp/pl %0 \n\t"
119 "bf 1b \n\t" 119 "bf 1b \n\t"
120 "add #-1, %0 \n\t" 120 "add #-1, %0 \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
126 ); 126 );
127} 127}
128 128
129static inline void __raw_read_unlock(raw_rwlock_t *rw) 129static inline void arch_read_unlock(arch_rwlock_t *rw)
130{ 130{
131 unsigned long tmp; 131 unsigned long tmp;
132 132
133 __asm__ __volatile__ ( 133 __asm__ __volatile__ (
134 "1: \n\t" 134 "1: \n\t"
135 "movli.l @%1, %0 ! __raw_read_unlock \n\t" 135 "movli.l @%1, %0 ! arch_read_unlock \n\t"
136 "add #1, %0 \n\t" 136 "add #1, %0 \n\t"
137 "movco.l %0, @%1 \n\t" 137 "movco.l %0, @%1 \n\t"
138 "bf 1b \n\t" 138 "bf 1b \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
142 ); 142 );
143} 143}
144 144
145static inline void __raw_write_lock(raw_rwlock_t *rw) 145static inline void arch_write_lock(arch_rwlock_t *rw)
146{ 146{
147 unsigned long tmp; 147 unsigned long tmp;
148 148
149 __asm__ __volatile__ ( 149 __asm__ __volatile__ (
150 "1: \n\t" 150 "1: \n\t"
151 "movli.l @%1, %0 ! __raw_write_lock \n\t" 151 "movli.l @%1, %0 ! arch_write_lock \n\t"
152 "cmp/hs %2, %0 \n\t" 152 "cmp/hs %2, %0 \n\t"
153 "bf 1b \n\t" 153 "bf 1b \n\t"
154 "sub %2, %0 \n\t" 154 "sub %2, %0 \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
160 ); 160 );
161} 161}
162 162
163static inline void __raw_write_unlock(raw_rwlock_t *rw) 163static inline void arch_write_unlock(arch_rwlock_t *rw)
164{ 164{
165 __asm__ __volatile__ ( 165 __asm__ __volatile__ (
166 "mov.l %1, @%0 ! __raw_write_unlock \n\t" 166 "mov.l %1, @%0 ! arch_write_unlock \n\t"
167 : 167 :
168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) 168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
169 : "t", "memory" 169 : "t", "memory"
170 ); 170 );
171} 171}
172 172
173static inline int __raw_read_trylock(raw_rwlock_t *rw) 173static inline int arch_read_trylock(arch_rwlock_t *rw)
174{ 174{
175 unsigned long tmp, oldval; 175 unsigned long tmp, oldval;
176 176
177 __asm__ __volatile__ ( 177 __asm__ __volatile__ (
178 "1: \n\t" 178 "1: \n\t"
179 "movli.l @%2, %0 ! __raw_read_trylock \n\t" 179 "movli.l @%2, %0 ! arch_read_trylock \n\t"
180 "mov %0, %1 \n\t" 180 "mov %0, %1 \n\t"
181 "cmp/pl %0 \n\t" 181 "cmp/pl %0 \n\t"
182 "bf 2f \n\t" 182 "bf 2f \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
193 return (oldval > 0); 193 return (oldval > 0);
194} 194}
195 195
196static inline int __raw_write_trylock(raw_rwlock_t *rw) 196static inline int arch_write_trylock(arch_rwlock_t *rw)
197{ 197{
198 unsigned long tmp, oldval; 198 unsigned long tmp, oldval;
199 199
200 __asm__ __volatile__ ( 200 __asm__ __volatile__ (
201 "1: \n\t" 201 "1: \n\t"
202 "movli.l @%2, %0 ! __raw_write_trylock \n\t" 202 "movli.l @%2, %0 ! arch_write_trylock \n\t"
203 "mov %0, %1 \n\t" 203 "mov %0, %1 \n\t"
204 "cmp/hs %3, %0 \n\t" 204 "cmp/hs %3, %0 \n\t"
205 "bf 2f \n\t" 205 "bf 2f \n\t"
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
216 return (oldval > (RW_LOCK_BIAS - 1)); 216 return (oldval > (RW_LOCK_BIAS - 1));
217} 217}
218 218
219#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 219#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
220#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 220#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
221 221
222#define _raw_spin_relax(lock) cpu_relax() 222#define arch_spin_relax(lock) cpu_relax()
223#define _raw_read_relax(lock) cpu_relax() 223#define arch_read_relax(lock) cpu_relax()
224#define _raw_write_relax(lock) cpu_relax() 224#define arch_write_relax(lock) cpu_relax()
225 225
226#endif /* __ASM_SH_SPINLOCK_H */ 226#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index b4d244e7b60c..9b7560db06ca 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define RW_LOCK_BIAS 0x01000000 18#define RW_LOCK_BIAS 0x01000000
19#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 19#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
20 20
21#endif 21#endif
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index 5c8ea28ff7a4..64eb41a063e8 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -2,6 +2,7 @@
2#define _ASM_SH_SUSPEND_H 2#define _ASM_SH_SUSPEND_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/notifier.h>
5static inline int arch_prepare_suspend(void) { return 0; } 6static inline int arch_prepare_suspend(void) { return 0; }
6 7
7#include <asm/ptrace.h> 8#include <asm/ptrace.h>
@@ -19,6 +20,69 @@ void sh_mobile_setup_cpuidle(void);
19static inline void sh_mobile_setup_cpuidle(void) {} 20static inline void sh_mobile_setup_cpuidle(void) {}
20#endif 21#endif
21 22
23/* notifier chains for pre/post sleep hooks */
24extern struct atomic_notifier_head sh_mobile_pre_sleep_notifier_list;
25extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list;
26
27/* priority levels for notifiers */
28#define SH_MOBILE_SLEEP_BOARD 0
29#define SH_MOBILE_SLEEP_CPU 1
30#define SH_MOBILE_PRE(x) (x)
31#define SH_MOBILE_POST(x) (-(x))
32
33/* board code registration function for self-refresh assembly snippets */
34void sh_mobile_register_self_refresh(unsigned long flags,
35 void *pre_start, void *pre_end,
36 void *post_start, void *post_end);
37
38/* register structure for address/data information */
39struct sh_sleep_regs {
40 unsigned long stbcr;
41 unsigned long bar;
42
43 /* MMU */
44 unsigned long pteh;
45 unsigned long ptel;
46 unsigned long ttb;
47 unsigned long tea;
48 unsigned long mmucr;
49 unsigned long ptea;
50 unsigned long pascr;
51 unsigned long irmcr;
52
53 /* Cache */
54 unsigned long ccr;
55 unsigned long ramcr;
56};
57
58/* data area for low-level sleep code */
59struct sh_sleep_data {
60 /* current sleep mode (SUSP_SH_...) */
61 unsigned long mode;
62
63 /* addresses of board specific self-refresh snippets */
64 unsigned long sf_pre;
65 unsigned long sf_post;
66
67 /* address of resume code */
68 unsigned long resume;
69
70 /* register state saved and restored by the assembly code */
71 unsigned long vbr;
72 unsigned long spc;
73 unsigned long sr;
74 unsigned long sp;
75
76 /* structure for keeping register addresses */
77 struct sh_sleep_regs addr;
78
79 /* structure for saving/restoring register state */
80 struct sh_sleep_regs data;
81};
82
83/* a bitmap of supported sleep modes (SUSP_SH..) */
84extern unsigned long sh_mobile_sleep_supported;
85
22#endif 86#endif
23 87
24/* flags passed to assembly suspend code */ 88/* flags passed to assembly suspend code */
@@ -27,5 +91,7 @@ static inline void sh_mobile_setup_cpuidle(void) {}
27#define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */ 91#define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */
28#define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */ 92#define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */
29#define SUSP_SH_SF (1 << 4) /* Enable self-refresh */ 93#define SUSP_SH_SF (1 << 4) /* Enable self-refresh */
94#define SUSP_SH_MMU (1 << 5) /* Save/restore MMU and cache */
95#define SUSP_SH_REGS (1 << 6) /* Save/restore registers */
30 96
31#endif /* _ASM_SH_SUSPEND_H */ 97#endif /* _ASM_SH_SUSPEND_H */
diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h
index 6a381429ee9d..aa7777bdc370 100644
--- a/arch/sh/include/asm/syscall.h
+++ b/arch/sh/include/asm/syscall.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_SH_SYSCALL_H 1#ifndef __ASM_SH_SYSCALL_H
2#define __ASM_SH_SYSCALL_H 2#define __ASM_SH_SYSCALL_H
3 3
4extern const unsigned long sys_call_table[];
5
4#ifdef CONFIG_SUPERH32 6#ifdef CONFIG_SUPERH32
5# include "syscall_32.h" 7# include "syscall_32.h"
6#else 8#else
diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h
index c1e2b8deb837..507725af2e54 100644
--- a/arch/sh/include/asm/syscalls.h
+++ b/arch/sh/include/asm/syscalls.h
@@ -3,17 +3,12 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6struct old_utsname;
7
8asmlinkage int old_mmap(unsigned long addr, unsigned long len, 6asmlinkage int old_mmap(unsigned long addr, unsigned long len,
9 unsigned long prot, unsigned long flags, 7 unsigned long prot, unsigned long flags,
10 int fd, unsigned long off); 8 int fd, unsigned long off);
11asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 9asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
12 unsigned long prot, unsigned long flags, 10 unsigned long prot, unsigned long flags,
13 unsigned long fd, unsigned long pgoff); 11 unsigned long fd, unsigned long pgoff);
14asmlinkage int sys_ipc(uint call, int first, int second,
15 int third, void __user *ptr, long fifth);
16asmlinkage int sys_uname(struct old_utsname __user *name);
17 12
18#ifdef CONFIG_SUPERH32 13#ifdef CONFIG_SUPERH32
19# include "syscalls_32.h" 14# include "syscalls_32.h"
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index b5c5acdc8c0e..0bd7a17d5e1a 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,7 +10,6 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/types.h> 12#include <asm/types.h>
13#include <asm/ptrace.h>
14 13
15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ 14#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
16 15
@@ -32,7 +31,7 @@
32#define mb() __asm__ __volatile__ ("synco": : :"memory") 31#define mb() __asm__ __volatile__ ("synco": : :"memory")
33#define rmb() mb() 32#define rmb() mb()
34#define wmb() __asm__ __volatile__ ("synco": : :"memory") 33#define wmb() __asm__ __volatile__ ("synco": : :"memory")
35#define ctrl_barrier() __icbi(0xa8000000) 34#define ctrl_barrier() __icbi(PAGE_OFFSET)
36#define read_barrier_depends() do { } while(0) 35#define read_barrier_depends() do { } while(0)
37#else 36#else
38#define mb() __asm__ __volatile__ ("": : :"memory") 37#define mb() __asm__ __volatile__ ("": : :"memory")
@@ -114,6 +113,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
114 (unsigned long)_n_, sizeof(*(ptr))); \ 113 (unsigned long)_n_, sizeof(*(ptr))); \
115 }) 114 })
116 115
116struct pt_regs;
117
117extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); 118extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
118void free_initmem(void); 119void free_initmem(void);
119void free_initrd_mem(unsigned long start, unsigned long end); 120void free_initrd_mem(unsigned long start, unsigned long end);
@@ -137,14 +138,14 @@ extern unsigned int instruction_size(unsigned int insn);
137#endif 138#endif
138 139
139extern unsigned long cached_to_uncached; 140extern unsigned long cached_to_uncached;
141extern unsigned long uncached_size;
140 142
141extern struct dentry *sh_debugfs_root; 143extern struct dentry *sh_debugfs_root;
142 144
143void per_cpu_trap_init(void); 145void per_cpu_trap_init(void);
144void default_idle(void); 146void default_idle(void);
145void cpu_idle_wait(void); 147void cpu_idle_wait(void);
146 148void stop_this_cpu(void *);
147asmlinkage void break_point_trap(void);
148 149
149#ifdef CONFIG_SUPERH32 150#ifdef CONFIG_SUPERH32
150#define BUILD_TRAP_HANDLER(name) \ 151#define BUILD_TRAP_HANDLER(name) \
@@ -171,10 +172,6 @@ BUILD_TRAP_HANDLER(fpu_error);
171BUILD_TRAP_HANDLER(fpu_state_restore); 172BUILD_TRAP_HANDLER(fpu_state_restore);
172BUILD_TRAP_HANDLER(nmi); 173BUILD_TRAP_HANDLER(nmi);
173 174
174#ifdef CONFIG_BUG
175extern void handle_BUG(struct pt_regs *);
176#endif
177
178#define arch_align_stack(x) (x) 175#define arch_align_stack(x) (x)
179 176
180struct mem_access { 177struct mem_access {
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index 607d413f6168..51296b36770e 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -2,6 +2,7 @@
2#define __ASM_SH_SYSTEM_32_H 2#define __ASM_SH_SYSTEM_32_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/mmu.h>
5 6
6#ifdef CONFIG_SH_DSP 7#ifdef CONFIG_SH_DSP
7 8
@@ -144,9 +145,6 @@ do { \
144 __restore_dsp(prev); \ 145 __restore_dsp(prev); \
145} while (0) 146} while (0)
146 147
147#define __uses_jump_to_uncached \
148 noinline __attribute__ ((__section__ (".uncached.text")))
149
150/* 148/*
151 * Jump to uncached area. 149 * Jump to uncached area.
152 * When handling TLB or caches, we need to do it from an uncached area. 150 * When handling TLB or caches, we need to do it from an uncached area.
@@ -216,6 +214,17 @@ static inline reg_size_t register_align(void *val)
216int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, 214int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
217 struct mem_access *ma, int); 215 struct mem_access *ma, int);
218 216
217static inline void trigger_address_error(void)
218{
219 if (__in_29bit_mode())
220 __asm__ __volatile__ (
221 "ldc %0, sr\n\t"
222 "mov.l @%1, %0"
223 :
224 : "r" (0x10000000), "r" (0x80000001)
225 );
226}
227
219asmlinkage void do_address_error(struct pt_regs *regs, 228asmlinkage void do_address_error(struct pt_regs *regs,
220 unsigned long writeaccess, 229 unsigned long writeaccess,
221 unsigned long address); 230 unsigned long address);
@@ -232,4 +241,33 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
232 unsigned long r6, unsigned long r7, 241 unsigned long r6, unsigned long r7,
233 struct pt_regs __regs); 242 struct pt_regs __regs);
234 243
244static inline void set_bl_bit(void)
245{
246 unsigned long __dummy0, __dummy1;
247
248 __asm__ __volatile__ (
249 "stc sr, %0\n\t"
250 "or %2, %0\n\t"
251 "and %3, %0\n\t"
252 "ldc %0, sr\n\t"
253 : "=&r" (__dummy0), "=r" (__dummy1)
254 : "r" (0x10000000), "r" (0xffffff0f)
255 : "memory"
256 );
257}
258
259static inline void clear_bl_bit(void)
260{
261 unsigned long __dummy0, __dummy1;
262
263 __asm__ __volatile__ (
264 "stc sr, %0\n\t"
265 "and %2, %0\n\t"
266 "ldc %0, sr\n\t"
267 : "=&r" (__dummy0), "=r" (__dummy1)
268 : "1" (~0x10000000)
269 : "memory"
270 );
271}
272
235#endif /* __ASM_SH_SYSTEM_32_H */ 273#endif /* __ASM_SH_SYSTEM_32_H */
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 8e4a03e7966c..36338646dfc8 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -12,11 +12,13 @@
12 * License. See the file "COPYING" in the main directory of this archive 12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details. 13 * for more details.
14 */ 14 */
15#include <cpu/registers.h>
15#include <asm/processor.h> 16#include <asm/processor.h>
16 17
17/* 18/*
18 * switch_to() should switch tasks to task nr n, first 19 * switch_to() should switch tasks to task nr n, first
19 */ 20 */
21struct thread_struct;
20struct task_struct *sh64_switch_to(struct task_struct *prev, 22struct task_struct *sh64_switch_to(struct task_struct *prev,
21 struct thread_struct *prev_thread, 23 struct thread_struct *prev_thread,
22 struct task_struct *next, 24 struct task_struct *next,
@@ -32,8 +34,6 @@ do { \
32 &next->thread); \ 34 &next->thread); \
33} while (0) 35} while (0)
34 36
35#define __uses_jump_to_uncached
36
37#define jump_to_uncached() do { } while (0) 37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0) 38#define back_to_cached() do { } while (0)
39 39
@@ -47,4 +47,36 @@ static inline reg_size_t register_align(void *val)
47 return (unsigned long long)(signed long long)(signed long)val; 47 return (unsigned long long)(signed long long)(signed long)val;
48} 48}
49 49
50extern void phys_stext(void);
51
52static inline void trigger_address_error(void)
53{
54 phys_stext();
55}
56
57#define SR_BL_LL 0x0000000010000000LL
58
59static inline void set_bl_bit(void)
60{
61 unsigned long long __dummy0, __dummy1 = SR_BL_LL;
62
63 __asm__ __volatile__("getcon " __SR ", %0\n\t"
64 "or %0, %1, %0\n\t"
65 "putcon %0, " __SR "\n\t"
66 : "=&r" (__dummy0)
67 : "r" (__dummy1));
68
69}
70
71static inline void clear_bl_bit(void)
72{
73 unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
74
75 __asm__ __volatile__("getcon " __SR ", %0\n\t"
76 "and %0, %1, %0\n\t"
77 "putcon %0, " __SR "\n\t"
78 : "=&r" (__dummy0)
79 : "r" (__dummy1));
80}
81
50#endif /* __ASM_SH_SYSTEM_64_H */ 82#endif /* __ASM_SH_SYSTEM_64_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index bdeb9d46d17d..55a36fef6875 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -19,6 +19,7 @@ struct thread_info {
19 struct task_struct *task; /* main task structure */ 19 struct task_struct *task; /* main task structure */
20 struct exec_domain *exec_domain; /* execution domain */ 20 struct exec_domain *exec_domain; /* execution domain */
21 unsigned long flags; /* low level flags */ 21 unsigned long flags; /* low level flags */
22 __u32 status; /* thread synchronous flags */
22 __u32 cpu; 23 __u32 cpu;
23 int preempt_count; /* 0 => preemptable, <0 => BUG */ 24 int preempt_count; /* 0 => preemptable, <0 => BUG */
24 mm_segment_t addr_limit; /* thread address space */ 25 mm_segment_t addr_limit; /* thread address space */
@@ -50,6 +51,7 @@ struct thread_info {
50 .task = &tsk, \ 51 .task = &tsk, \
51 .exec_domain = &default_exec_domain, \ 52 .exec_domain = &default_exec_domain, \
52 .flags = 0, \ 53 .flags = 0, \
54 .status = 0, \
53 .cpu = 0, \ 55 .cpu = 0, \
54 .preempt_count = INIT_PREEMPT_COUNT, \ 56 .preempt_count = INIT_PREEMPT_COUNT, \
55 .addr_limit = KERNEL_DS, \ 57 .addr_limit = KERNEL_DS, \
@@ -91,14 +93,16 @@ static inline struct thread_info *current_thread_info(void)
91 93
92#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 94#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
93 95
94#else /* THREAD_SHIFT < PAGE_SHIFT */ 96#endif
95
96#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
97 97
98extern struct thread_info *alloc_thread_info(struct task_struct *tsk); 98extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
99extern void free_thread_info(struct thread_info *ti); 99extern void free_thread_info(struct thread_info *ti);
100extern void arch_task_cache_init(void);
101#define arch_task_cache_init arch_task_cache_init
102extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
103extern void init_thread_xstate(void);
100 104
101#endif /* THREAD_SHIFT < PAGE_SHIFT */ 105#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
102 106
103#endif /* __ASSEMBLY__ */ 107#endif /* __ASSEMBLY__ */
104 108
@@ -111,13 +115,11 @@ extern void free_thread_info(struct thread_info *ti);
111#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 115#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
112#define TIF_SIGPENDING 1 /* signal pending */ 116#define TIF_SIGPENDING 1 /* signal pending */
113#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 117#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
114#define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */
115#define TIF_SINGLESTEP 4 /* singlestepping active */ 118#define TIF_SINGLESTEP 4 /* singlestepping active */
116#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ 119#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
117#define TIF_SECCOMP 6 /* secure computing */ 120#define TIF_SECCOMP 6 /* secure computing */
118#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ 121#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
119#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ 122#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
120#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
121#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 123#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
122#define TIF_MEMDIE 18 124#define TIF_MEMDIE 18
123#define TIF_FREEZE 19 /* Freezing for suspend */ 125#define TIF_FREEZE 19 /* Freezing for suspend */
@@ -125,13 +127,11 @@ extern void free_thread_info(struct thread_info *ti);
125#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 127#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
126#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 128#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
127#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 129#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
128#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
129#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 130#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
130#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 131#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
131#define _TIF_SECCOMP (1 << TIF_SECCOMP) 132#define _TIF_SECCOMP (1 << TIF_SECCOMP)
132#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 133#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
133#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 134#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
134#define _TIF_USEDFPU (1 << TIF_USEDFPU)
135#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 135#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
136#define _TIF_FREEZE (1 << TIF_FREEZE) 136#define _TIF_FREEZE (1 << TIF_FREEZE)
137 137
@@ -149,13 +149,33 @@ extern void free_thread_info(struct thread_info *ti);
149/* work to do on any return to u-space */ 149/* work to do on any return to u-space */
150#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ 150#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
151 _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ 151 _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
152 _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ 152 _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \
153 _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) 153 _TIF_SYSCALL_TRACEPOINT)
154 154
155/* work to do on interrupt/exception return */ 155/* work to do on interrupt/exception return */
156#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ 156#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
157 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) 157 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
158 158
159/*
160 * Thread-synchronous status.
161 *
162 * This is different from the flags in that nobody else
163 * ever touches our thread-synchronous status, so we don't
164 * have to worry about atomic accesses.
165 */
166#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
167#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
168
169#ifndef __ASSEMBLY__
170#define HAVE_SET_RESTORE_SIGMASK 1
171static inline void set_restore_sigmask(void)
172{
173 struct thread_info *ti = current_thread_info();
174 ti->status |= TS_RESTORE_SIGMASK;
175 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
176}
177#endif /* !__ASSEMBLY__ */
178
159#endif /* __KERNEL__ */ 179#endif /* __KERNEL__ */
160 180
161#endif /* __ASM_SH_THREAD_INFO_H */ 181#endif /* __ASM_SH_THREAD_INFO_H */
diff --git a/arch/sh/include/asm/timex.h b/arch/sh/include/asm/timex.h
index b556d49e5f2b..18bf06d9c764 100644
--- a/arch/sh/include/asm/timex.h
+++ b/arch/sh/include/asm/timex.h
@@ -6,7 +6,17 @@
6#ifndef __ASM_SH_TIMEX_H 6#ifndef __ASM_SH_TIMEX_H
7#define __ASM_SH_TIMEX_H 7#define __ASM_SH_TIMEX_H
8 8
9/*
10 * Only parts using the legacy CPG code for their clock framework
11 * implementation need to define their own Pclk value. If provided, this
12 * can be used for accurately setting CLOCK_TICK_RATE, otherwise we
13 * simply fall back on the i8253 PIT value.
14 */
15#ifdef CONFIG_SH_PCLK_FREQ
9#define CLOCK_TICK_RATE (CONFIG_SH_PCLK_FREQ / 4) /* Underlying HZ */ 16#define CLOCK_TICK_RATE (CONFIG_SH_PCLK_FREQ / 4) /* Underlying HZ */
17#else
18#define CLOCK_TICK_RATE 1193180
19#endif
10 20
11#include <asm-generic/timex.h> 21#include <asm-generic/timex.h>
12 22
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index da8fe7ab8728..75abb38dffd5 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -11,6 +11,7 @@
11#ifdef CONFIG_MMU 11#ifdef CONFIG_MMU
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
14#include <asm/mmu_context.h>
14 15
15/* 16/*
16 * TLB handling. This allows us to remove pages from the page 17 * TLB handling. This allows us to remove pages from the page
@@ -97,6 +98,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
97 98
98#define tlb_migrate_finish(mm) do { } while (0) 99#define tlb_migrate_finish(mm) do { } while (0)
99 100
101#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
102extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
103extern void tlb_unwire_entry(void);
104#else
105static inline void tlb_wire_entry(struct vm_area_struct *vma ,
106 unsigned long addr, pte_t pte)
107{
108 BUG();
109}
110
111static inline void tlb_unwire_entry(void)
112{
113 BUG();
114}
115#endif
116
100#else /* CONFIG_MMU */ 117#else /* CONFIG_MMU */
101 118
102#define tlb_start_vma(tlb, vma) do { } while (0) 119#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index 65e7bd2f2240..88e734069fa6 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -35,11 +35,19 @@
35 35
36#define pcibus_to_node(bus) ((void)(bus), -1) 36#define pcibus_to_node(bus) ((void)(bus), -1)
37#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 37#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
38 CPU_MASK_ALL_PTR : \ 38 cpu_all_mask : \
39 cpumask_of_node(pcibus_to_node(bus))) 39 cpumask_of_node(pcibus_to_node(bus)))
40 40
41#endif 41#endif
42 42
43#define mc_capable() (1)
44
45const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
46
47extern cpumask_t cpu_core_map[NR_CPUS];
48
49#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
50
43#include <asm-generic/topology.h> 51#include <asm-generic/topology.h>
44 52
45#endif /* _ASM_SH_TOPOLOGY_H */ 53#endif /* _ASM_SH_TOPOLOGY_H */
diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h
deleted file mode 100644
index 4ca4b7717371..000000000000
--- a/arch/sh/include/asm/ubc.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * include/asm-sh/ubc.h
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002, 2003 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#ifndef __ASM_SH_UBC_H
12#define __ASM_SH_UBC_H
13#ifdef __KERNEL__
14
15#include <cpu/ubc.h>
16
17/* User Break Controller */
18#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709)
19#define UBC_TYPE_SH7729 (current_cpu_data.type == CPU_SH7729)
20#else
21#define UBC_TYPE_SH7729 0
22#endif
23
24#define BAMR_ASID (1 << 2)
25#define BAMR_NONE 0
26#define BAMR_10 0x1
27#define BAMR_12 0x2
28#define BAMR_ALL 0x3
29#define BAMR_16 0x8
30#define BAMR_20 0x9
31
32#define BBR_INST (1 << 4)
33#define BBR_DATA (2 << 4)
34#define BBR_READ (1 << 2)
35#define BBR_WRITE (2 << 2)
36#define BBR_BYTE 0x1
37#define BBR_HALF 0x2
38#define BBR_LONG 0x3
39#define BBR_QUAD (1 << 6) /* SH7750 */
40#define BBR_CPU (1 << 6) /* SH7709A,SH7729 */
41#define BBR_DMA (2 << 6) /* SH7709A,SH7729 */
42
43#define BRCR_CMFA (1 << 15)
44#define BRCR_CMFB (1 << 14)
45
46#if defined CONFIG_CPU_SH2A
47#define BRCR_CMFCA (1 << 15)
48#define BRCR_CMFCB (1 << 14)
49#define BRCR_CMFDA (1 << 13)
50#define BRCR_CMFDB (1 << 12)
51#define BRCR_PCBB (1 << 6) /* 1: after execution */
52#define BRCR_PCBA (1 << 5) /* 1: after execution */
53#define BRCR_PCTE 0
54#else
55#define BRCR_PCTE (1 << 11)
56#define BRCR_PCBA (1 << 10) /* 1: after execution */
57#define BRCR_DBEB (1 << 7)
58#define BRCR_PCBB (1 << 6)
59#define BRCR_SEQ (1 << 3)
60#define BRCR_UBDE (1 << 0)
61#endif
62
63#ifndef __ASSEMBLY__
64/* arch/sh/kernel/cpu/ubc.S */
65extern void ubc_sleep(void);
66
67#ifdef CONFIG_UBC_WAKEUP
68extern void ubc_wakeup(void);
69#else
70#define ubc_wakeup() do { } while (0)
71#endif
72#endif
73
74#endif /* __KERNEL__ */
75#endif /* __ASM_SH_UBC_H */
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
new file mode 100644
index 000000000000..e3419f96626a
--- /dev/null
+++ b/arch/sh/include/asm/uncached.h
@@ -0,0 +1,18 @@
1#ifndef __ASM_SH_UNCACHED_H
2#define __ASM_SH_UNCACHED_H
3
4#include <linux/bug.h>
5
6#ifdef CONFIG_UNCACHED_MAPPING
7extern unsigned long uncached_start, uncached_end;
8
9extern int virt_addr_uncached(unsigned long kaddr);
10extern void uncached_init(void);
11extern void uncached_resize(unsigned long size);
12#else
13#define virt_addr_uncached(kaddr) (0)
14#define uncached_init() do { } while (0)
15#define uncached_resize(size) BUG()
16#endif
17
18#endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index f3fd1b9eb6b1..0e7f0fc8f086 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -350,12 +350,15 @@
350 350
351#ifdef __KERNEL__ 351#ifdef __KERNEL__
352 352
353#define __IGNORE_recvmmsg
354
353#define __ARCH_WANT_IPC_PARSE_VERSION 355#define __ARCH_WANT_IPC_PARSE_VERSION
354#define __ARCH_WANT_OLD_READDIR 356#define __ARCH_WANT_OLD_READDIR
355#define __ARCH_WANT_OLD_STAT 357#define __ARCH_WANT_OLD_STAT
356#define __ARCH_WANT_STAT64 358#define __ARCH_WANT_STAT64
357#define __ARCH_WANT_SYS_ALARM 359#define __ARCH_WANT_SYS_ALARM
358#define __ARCH_WANT_SYS_GETHOSTNAME 360#define __ARCH_WANT_SYS_GETHOSTNAME
361#define __ARCH_WANT_SYS_IPC
359#define __ARCH_WANT_SYS_PAUSE 362#define __ARCH_WANT_SYS_PAUSE
360#define __ARCH_WANT_SYS_SGETMASK 363#define __ARCH_WANT_SYS_SGETMASK
361#define __ARCH_WANT_SYS_SIGNAL 364#define __ARCH_WANT_SYS_SIGNAL
@@ -368,6 +371,7 @@
368#define __ARCH_WANT_SYS_LLSEEK 371#define __ARCH_WANT_SYS_LLSEEK
369#define __ARCH_WANT_SYS_NICE 372#define __ARCH_WANT_SYS_NICE
370#define __ARCH_WANT_SYS_OLD_GETRLIMIT 373#define __ARCH_WANT_SYS_OLD_GETRLIMIT
374#define __ARCH_WANT_SYS_OLD_UNAME
371#define __ARCH_WANT_SYS_OLDUMOUNT 375#define __ARCH_WANT_SYS_OLDUMOUNT
372#define __ARCH_WANT_SYS_SIGPENDING 376#define __ARCH_WANT_SYS_SIGPENDING
373#define __ARCH_WANT_SYS_SIGPROCMASK 377#define __ARCH_WANT_SYS_SIGPROCMASK
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 343ce8f073ea..0580c33a1e04 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -385,10 +385,12 @@
385#define __NR_pwritev 362 385#define __NR_pwritev 362
386#define __NR_rt_tgsigqueueinfo 363 386#define __NR_rt_tgsigqueueinfo 363
387#define __NR_perf_event_open 364 387#define __NR_perf_event_open 364
388#define __NR_recvmmsg 365
389#define __NR_accept4 366
388 390
389#ifdef __KERNEL__ 391#ifdef __KERNEL__
390 392
391#define NR_syscalls 365 393#define NR_syscalls 367
392 394
393#define __ARCH_WANT_IPC_PARSE_VERSION 395#define __ARCH_WANT_IPC_PARSE_VERSION
394#define __ARCH_WANT_OLD_READDIR 396#define __ARCH_WANT_OLD_READDIR
@@ -396,6 +398,7 @@
396#define __ARCH_WANT_STAT64 398#define __ARCH_WANT_STAT64
397#define __ARCH_WANT_SYS_ALARM 399#define __ARCH_WANT_SYS_ALARM
398#define __ARCH_WANT_SYS_GETHOSTNAME 400#define __ARCH_WANT_SYS_GETHOSTNAME
401#define __ARCH_WANT_SYS_IPC
399#define __ARCH_WANT_SYS_PAUSE 402#define __ARCH_WANT_SYS_PAUSE
400#define __ARCH_WANT_SYS_SGETMASK 403#define __ARCH_WANT_SYS_SGETMASK
401#define __ARCH_WANT_SYS_SIGNAL 404#define __ARCH_WANT_SYS_SIGNAL
@@ -408,6 +411,7 @@
408#define __ARCH_WANT_SYS_LLSEEK 411#define __ARCH_WANT_SYS_LLSEEK
409#define __ARCH_WANT_SYS_NICE 412#define __ARCH_WANT_SYS_NICE
410#define __ARCH_WANT_SYS_OLD_GETRLIMIT 413#define __ARCH_WANT_SYS_OLD_GETRLIMIT
414#define __ARCH_WANT_SYS_OLD_UNAME
411#define __ARCH_WANT_SYS_OLDUMOUNT 415#define __ARCH_WANT_SYS_OLDUMOUNT
412#define __ARCH_WANT_SYS_SIGPENDING 416#define __ARCH_WANT_SYS_SIGPENDING
413#define __ARCH_WANT_SYS_SIGPROCMASK 417#define __ARCH_WANT_SYS_SIGPROCMASK
diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h
index 244ec4ad9a79..d58ad493b3a6 100644
--- a/arch/sh/include/asm/vmlinux.lds.h
+++ b/arch/sh/include/asm/vmlinux.lds.h
@@ -14,4 +14,12 @@
14#define DWARF_EH_FRAME 14#define DWARF_EH_FRAME
15#endif 15#endif
16 16
17#ifdef CONFIG_SUPERH64
18#define EXTRA_TEXT \
19 *(.text64) \
20 *(.text..SHmedia32)
21#else
22#define EXTRA_TEXT
23#endif
24
17#endif /* __ASM_SH_VMLINUX_LDS_H */ 25#endif /* __ASM_SH_VMLINUX_LDS_H */
diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h
index 2fe7cee9e43a..85a7aca7fb8f 100644
--- a/arch/sh/include/asm/watchdog.h
+++ b/arch/sh/include/asm/watchdog.h
@@ -2,6 +2,8 @@
2 * include/asm-sh/watchdog.h 2 * include/asm-sh/watchdog.h
3 * 3 *
4 * Copyright (C) 2002, 2003 Paul Mundt 4 * Copyright (C) 2002, 2003 Paul Mundt
5 * Copyright (C) 2009 Siemens AG
6 * Copyright (C) 2009 Valentin Sitdikov
5 * 7 *
6 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -61,13 +63,68 @@
61#define WTCSR_CKS_2048 0x06 63#define WTCSR_CKS_2048 0x06
62#define WTCSR_CKS_4096 0x07 64#define WTCSR_CKS_4096 0x07
63 65
66#if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780)
67/**
68 * sh_wdt_read_cnt - Read from Counter
69 * Reads back the WTCNT value.
70 */
71static inline __u32 sh_wdt_read_cnt(void)
72{
73 return __raw_readl(WTCNT_R);
74}
75
76/**
77 * sh_wdt_write_cnt - Write to Counter
78 * @val: Value to write
79 *
80 * Writes the given value @val to the lower byte of the timer counter.
81 * The upper byte is set manually on each write.
82 */
83static inline void sh_wdt_write_cnt(__u32 val)
84{
85 __raw_writel((WTCNT_HIGH << 24) | (__u32)val, WTCNT);
86}
87
88/**
89 * sh_wdt_write_bst - Write to Counter
90 * @val: Value to write
91 *
92 * Writes the given value @val to the lower byte of the timer counter.
93 * The upper byte is set manually on each write.
94 */
95static inline void sh_wdt_write_bst(__u32 val)
96{
97 __raw_writel((WTBST_HIGH << 24) | (__u32)val, WTBST);
98}
99/**
100 * sh_wdt_read_csr - Read from Control/Status Register
101 *
102 * Reads back the WTCSR value.
103 */
104static inline __u32 sh_wdt_read_csr(void)
105{
106 return __raw_readl(WTCSR_R);
107}
108
109/**
110 * sh_wdt_write_csr - Write to Control/Status Register
111 * @val: Value to write
112 *
113 * Writes the given value @val to the lower byte of the control/status
114 * register. The upper byte is set manually on each write.
115 */
116static inline void sh_wdt_write_csr(__u32 val)
117{
118 __raw_writel((WTCSR_HIGH << 24) | (__u32)val, WTCSR);
119}
120#else
64/** 121/**
65 * sh_wdt_read_cnt - Read from Counter 122 * sh_wdt_read_cnt - Read from Counter
66 * Reads back the WTCNT value. 123 * Reads back the WTCNT value.
67 */ 124 */
68static inline __u8 sh_wdt_read_cnt(void) 125static inline __u8 sh_wdt_read_cnt(void)
69{ 126{
70 return ctrl_inb(WTCNT_R); 127 return __raw_readb(WTCNT_R);
71} 128}
72 129
73/** 130/**
@@ -79,7 +136,7 @@ static inline __u8 sh_wdt_read_cnt(void)
79 */ 136 */
80static inline void sh_wdt_write_cnt(__u8 val) 137static inline void sh_wdt_write_cnt(__u8 val)
81{ 138{
82 ctrl_outw((WTCNT_HIGH << 8) | (__u16)val, WTCNT); 139 __raw_writew((WTCNT_HIGH << 8) | (__u16)val, WTCNT);
83} 140}
84 141
85/** 142/**
@@ -89,7 +146,7 @@ static inline void sh_wdt_write_cnt(__u8 val)
89 */ 146 */
90static inline __u8 sh_wdt_read_csr(void) 147static inline __u8 sh_wdt_read_csr(void)
91{ 148{
92 return ctrl_inb(WTCSR_R); 149 return __raw_readb(WTCSR_R);
93} 150}
94 151
95/** 152/**
@@ -101,8 +158,8 @@ static inline __u8 sh_wdt_read_csr(void)
101 */ 158 */
102static inline void sh_wdt_write_csr(__u8 val) 159static inline void sh_wdt_write_csr(__u8 val)
103{ 160{
104 ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR); 161 __raw_writew((WTCSR_HIGH << 8) | (__u16)val, WTCSR);
105} 162}
106 163#endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */
107#endif /* __KERNEL__ */ 164#endif /* __KERNEL__ */
108#endif /* __ASM_SH_WATCHDOG_H */ 165#endif /* __ASM_SH_WATCHDOG_H */