aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 11:14:46 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 11:14:46 -0500
commitd224a93d91610fc641fbc5b234b32fcb84045a30 (patch)
treef908bcf0c0c1c73dabfd00a134895cfb33aa9a5d /include
parentb57bd06655a028aba7b92e1c19c2093e7fcfb341 (diff)
parente9cfc147df99790a7d260e9d20b865fa31ec56da (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6: (29 commits) sh: Fixup SH-2 BUG() trap handling. sh: Use early_param() for earlyprintk parsing. sh: Fix .empty_zero_page alignment for PAGE_SIZE > 4096. sh: Fixup .data.page_aligned. sh: Hook up SH7722 scif ipr interrupts. sh: Fixup sh_bios() trap handling. sh: SH-MobileR SH7722 CPU support. sh: Fixup dma_cache_sync() callers. sh: Convert remaining remap_area_pages() users to ioremap_page_range(). sh: Fixup kernel_execve() for syscall cleanups. sh: Fix get_wchan(). sh: BUG() handling through trapa vector. rtc: rtc-sh: alarm support. rtc: rtc-sh: fix rtc for out-by-one for the month. sh: Kill off unused SE7619 I/O ops. serial: sh-sci: Shut up various sci_rxd_in() gcc4 warnings. sh: Split out atomic ops logically. sh: Fix Solution Engine 7619 build. sh: Trivial build fixes for SH-2 support. sh: IPR IRQ updates for SH7619/SH7206. ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/atomic-irq.h71
-rw-r--r--include/asm-sh/atomic-llsc.h107
-rw-r--r--include/asm-sh/atomic.h153
-rw-r--r--include/asm-sh/bug.h53
-rw-r--r--include/asm-sh/bugs.h12
-rw-r--r--include/asm-sh/checksum.h69
-rw-r--r--include/asm-sh/cpu-sh4/cache.h2
-rw-r--r--include/asm-sh/cpu-sh4/freq.h2
-rw-r--r--include/asm-sh/dma-mapping.h10
-rw-r--r--include/asm-sh/irq.h5
-rw-r--r--include/asm-sh/pgtable.h47
-rw-r--r--include/asm-sh/processor.h8
-rw-r--r--include/asm-sh/push-switch.h3
13 files changed, 329 insertions, 213 deletions
diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h
new file mode 100644
index 000000000000..74f7943cff6f
--- /dev/null
+++ b/include/asm-sh/atomic-irq.h
@@ -0,0 +1,71 @@
1#ifndef __ASM_SH_ATOMIC_IRQ_H
2#define __ASM_SH_ATOMIC_IRQ_H
3
4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long flags;
12
13 local_irq_save(flags);
14 *(long *)v += i;
15 local_irq_restore(flags);
16}
17
18static inline void atomic_sub(int i, atomic_t *v)
19{
20 unsigned long flags;
21
22 local_irq_save(flags);
23 *(long *)v -= i;
24 local_irq_restore(flags);
25}
26
27static inline int atomic_add_return(int i, atomic_t *v)
28{
29 unsigned long temp, flags;
30
31 local_irq_save(flags);
32 temp = *(long *)v;
33 temp += i;
34 *(long *)v = temp;
35 local_irq_restore(flags);
36
37 return temp;
38}
39
40static inline int atomic_sub_return(int i, atomic_t *v)
41{
42 unsigned long temp, flags;
43
44 local_irq_save(flags);
45 temp = *(long *)v;
46 temp -= i;
47 *(long *)v = temp;
48 local_irq_restore(flags);
49
50 return temp;
51}
52
53static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
54{
55 unsigned long flags;
56
57 local_irq_save(flags);
58 *(long *)v &= ~mask;
59 local_irq_restore(flags);
60}
61
62static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
63{
64 unsigned long flags;
65
66 local_irq_save(flags);
67 *(long *)v |= mask;
68 local_irq_restore(flags);
69}
70
71#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h
new file mode 100644
index 000000000000..4b00b78e3f4f
--- /dev/null
+++ b/include/asm-sh/atomic-llsc.h
@@ -0,0 +1,107 @@
1#ifndef __ASM_SH_ATOMIC_LLSC_H
2#define __ASM_SH_ATOMIC_LLSC_H
3
4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long tmp;
12
13 __asm__ __volatile__ (
14"1: movli.l @%2, %0 ! atomic_add \n"
15" add %1, %0 \n"
16" movco.l %0, @%2 \n"
17" bf 1b \n"
18 : "=&z" (tmp)
19 : "r" (i), "r" (&v->counter)
20 : "t");
21}
22
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 unsigned long tmp;
26
27 __asm__ __volatile__ (
28"1: movli.l @%2, %0 ! atomic_sub \n"
29" sub %1, %0 \n"
30" movco.l %0, @%2 \n"
31" bf 1b \n"
32 : "=&z" (tmp)
33 : "r" (i), "r" (&v->counter)
34 : "t");
35}
36
37/*
38 * SH-4A note:
39 *
40 * We basically get atomic_xxx_return() for free compared with
41 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
42 * encoding, so the retval is automatically set without having to
43 * do any special work.
44 */
45static inline int atomic_add_return(int i, atomic_t *v)
46{
47 unsigned long temp;
48
49 __asm__ __volatile__ (
50"1: movli.l @%2, %0 ! atomic_add_return \n"
51" add %1, %0 \n"
52" movco.l %0, @%2 \n"
53" bf 1b \n"
54" synco \n"
55 : "=&z" (temp)
56 : "r" (i), "r" (&v->counter)
57 : "t");
58
59 return temp;
60}
61
62static inline int atomic_sub_return(int i, atomic_t *v)
63{
64 unsigned long temp;
65
66 __asm__ __volatile__ (
67"1: movli.l @%2, %0 ! atomic_sub_return \n"
68" sub %1, %0 \n"
69" movco.l %0, @%2 \n"
70" bf 1b \n"
71" synco \n"
72 : "=&z" (temp)
73 : "r" (i), "r" (&v->counter)
74 : "t");
75
76 return temp;
77}
78
79static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80{
81 unsigned long tmp;
82
83 __asm__ __volatile__ (
84"1: movli.l @%2, %0 ! atomic_clear_mask \n"
85" and %1, %0 \n"
86" movco.l %0, @%2 \n"
87" bf 1b \n"
88 : "=&z" (tmp)
89 : "r" (~mask), "r" (&v->counter)
90 : "t");
91}
92
93static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
94{
95 unsigned long tmp;
96
97 __asm__ __volatile__ (
98"1: movli.l @%2, %0 ! atomic_set_mask \n"
99" or %1, %0 \n"
100" movco.l %0, @%2 \n"
101" bf 1b \n"
102 : "=&z" (tmp)
103 : "r" (mask), "r" (&v->counter)
104 : "t");
105}
106
107#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 28305c3cbddf..e12570b9339d 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20/*
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
24 */
25static inline void atomic_add(int i, atomic_t *v)
26{
27#ifdef CONFIG_CPU_SH4A 20#ifdef CONFIG_CPU_SH4A
28 unsigned long tmp; 21#include <asm/atomic-llsc.h>
29
30 __asm__ __volatile__ (
31"1: movli.l @%2, %0 ! atomic_add \n"
32" add %1, %0 \n"
33" movco.l %0, @%2 \n"
34" bf 1b \n"
35 : "=&z" (tmp)
36 : "r" (i), "r" (&v->counter)
37 : "t");
38#else 22#else
39 unsigned long flags; 23#include <asm/atomic-irq.h>
40
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
44#endif
45}
46
47static inline void atomic_sub(int i, atomic_t *v)
48{
49#ifdef CONFIG_CPU_SH4A
50 unsigned long tmp;
51
52 __asm__ __volatile__ (
53"1: movli.l @%2, %0 ! atomic_sub \n"
54" sub %1, %0 \n"
55" movco.l %0, @%2 \n"
56" bf 1b \n"
57 : "=&z" (tmp)
58 : "r" (i), "r" (&v->counter)
59 : "t");
60#else
61 unsigned long flags;
62
63 local_irq_save(flags);
64 *(long *)v -= i;
65 local_irq_restore(flags);
66#endif 24#endif
67}
68
69/*
70 * SH-4A note:
71 *
72 * We basically get atomic_xxx_return() for free compared with
73 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
74 * encoding, so the retval is automatically set without having to
75 * do any special work.
76 */
77static inline int atomic_add_return(int i, atomic_t *v)
78{
79 unsigned long temp;
80
81#ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ (
83"1: movli.l @%2, %0 ! atomic_add_return \n"
84" add %1, %0 \n"
85" movco.l %0, @%2 \n"
86" bf 1b \n"
87" synco \n"
88 : "=&z" (temp)
89 : "r" (i), "r" (&v->counter)
90 : "t");
91#else
92 unsigned long flags;
93
94 local_irq_save(flags);
95 temp = *(long *)v;
96 temp += i;
97 *(long *)v = temp;
98 local_irq_restore(flags);
99#endif
100
101 return temp;
102}
103 25
104#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 26#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
105 27
106static inline int atomic_sub_return(int i, atomic_t *v)
107{
108 unsigned long temp;
109
110#ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ (
112"1: movli.l @%2, %0 ! atomic_sub_return \n"
113" sub %1, %0 \n"
114" movco.l %0, @%2 \n"
115" bf 1b \n"
116" synco \n"
117 : "=&z" (temp)
118 : "r" (i), "r" (&v->counter)
119 : "t");
120#else
121 unsigned long flags;
122
123 local_irq_save(flags);
124 temp = *(long *)v;
125 temp -= i;
126 *(long *)v = temp;
127 local_irq_restore(flags);
128#endif
129
130 return temp;
131}
132
133#define atomic_dec_return(v) atomic_sub_return(1,(v)) 28#define atomic_dec_return(v) atomic_sub_return(1,(v))
134#define atomic_inc_return(v) atomic_add_return(1,(v)) 29#define atomic_inc_return(v) atomic_add_return(1,(v))
135 30
@@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
180} 75}
181#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 76#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
182 77
183static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
184{
185#ifdef CONFIG_CPU_SH4A
186 unsigned long tmp;
187
188 __asm__ __volatile__ (
189"1: movli.l @%2, %0 ! atomic_clear_mask \n"
190" and %1, %0 \n"
191" movco.l %0, @%2 \n"
192" bf 1b \n"
193 : "=&z" (tmp)
194 : "r" (~mask), "r" (&v->counter)
195 : "t");
196#else
197 unsigned long flags;
198
199 local_irq_save(flags);
200 *(long *)v &= ~mask;
201 local_irq_restore(flags);
202#endif
203}
204
205static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
206{
207#ifdef CONFIG_CPU_SH4A
208 unsigned long tmp;
209
210 __asm__ __volatile__ (
211"1: movli.l @%2, %0 ! atomic_set_mask \n"
212" or %1, %0 \n"
213" movco.l %0, @%2 \n"
214" bf 1b \n"
215 : "=&z" (tmp)
216 : "r" (mask), "r" (&v->counter)
217 : "t");
218#else
219 unsigned long flags;
220
221 local_irq_save(flags);
222 *(long *)v |= mask;
223 local_irq_restore(flags);
224#endif
225}
226
227/* Atomic operations are already serializing on SH */ 78/* Atomic operations are already serializing on SH */
228#define smp_mb__before_atomic_dec() barrier() 79#define smp_mb__before_atomic_dec() barrier()
229#define smp_mb__after_atomic_dec() barrier() 80#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h
index 1b4fc52a59e8..2f89dd06d0cd 100644
--- a/include/asm-sh/bug.h
+++ b/include/asm-sh/bug.h
@@ -1,19 +1,54 @@
1#ifndef __ASM_SH_BUG_H 1#ifndef __ASM_SH_BUG_H
2#define __ASM_SH_BUG_H 2#define __ASM_SH_BUG_H
3 3
4
5#ifdef CONFIG_BUG 4#ifdef CONFIG_BUG
6/* 5
7 * Tell the user there is some problem. 6struct bug_frame {
8 */ 7 unsigned short opcode;
9#define BUG() do { \ 8 unsigned short line;
10 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 9 const char *file;
11 *(volatile int *)0 = 0; \ 10 const char *func;
11};
12
13struct pt_regs;
14
15extern void handle_BUG(struct pt_regs *);
16
17#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
18
19#ifdef CONFIG_DEBUG_BUGVERBOSE
20
21#define BUG() \
22do { \
23 __asm__ __volatile__ ( \
24 ".align 2\n\t" \
25 ".short %O0\n\t" \
26 ".short %O1\n\t" \
27 ".long %O2\n\t" \
28 ".long %O3\n\t" \
29 : \
30 : "n" (TRAPA_BUG_OPCODE), \
31 "i" (__LINE__), "X" (__FILE__), \
32 "X" (__FUNCTION__)); \
33} while (0)
34
35#else
36
37#define BUG() \
38do { \
39 __asm__ __volatile__ ( \
40 ".align 2\n\t" \
41 ".short %O0\n\t" \
42 : \
43 : "n" (TRAPA_BUG_OPCODE)); \
12} while (0) 44} while (0)
13 45
46#endif /* CONFIG_DEBUG_BUGVERBOSE */
47
14#define HAVE_ARCH_BUG 48#define HAVE_ARCH_BUG
15#endif 49
50#endif /* CONFIG_BUG */
16 51
17#include <asm-generic/bug.h> 52#include <asm-generic/bug.h>
18 53
19#endif 54#endif /* __ASM_SH_BUG_H */
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index 795047da5e17..a294997a8412 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -16,9 +16,8 @@
16 16
17static void __init check_bugs(void) 17static void __init check_bugs(void)
18{ 18{
19 extern char *get_cpu_subtype(void);
20 extern unsigned long loops_per_jiffy; 19 extern unsigned long loops_per_jiffy;
21 char *p= &init_utsname()->machine[2]; /* "sh" */ 20 char *p = &init_utsname()->machine[2]; /* "sh" */
22 21
23 cpu_data->loops_per_jiffy = loops_per_jiffy; 22 cpu_data->loops_per_jiffy = loops_per_jiffy;
24 23
@@ -40,6 +39,15 @@ static void __init check_bugs(void)
40 *p++ = '4'; 39 *p++ = '4';
41 *p++ = 'a'; 40 *p++ = 'a';
42 break; 41 break;
42 case CPU_SH73180 ... CPU_SH7722:
43 *p++ = '4';
44 *p++ = 'a';
45 *p++ = 'l';
46 *p++ = '-';
47 *p++ = 'd';
48 *p++ = 's';
49 *p++ = 'p';
50 break;
43 default: 51 default:
44 *p++ = '?'; 52 *p++ = '?';
45 *p++ = '!'; 53 *p++ = '!';
diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h
index d44344c88e73..4bc8357e8892 100644
--- a/include/asm-sh/checksum.h
+++ b/include/asm-sh/checksum.h
@@ -34,25 +34,26 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
34 */ 34 */
35 35
36asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, 36asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
37 int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); 37 int len, __wsum sum,
38 int *src_err_ptr, int *dst_err_ptr);
38 39
39/* 40/*
40 * Note: when you get a NULL pointer exception here this means someone 41 * Note: when you get a NULL pointer exception here this means someone
41 * passed in an incorrect kernel address to one of these functions. 42 * passed in an incorrect kernel address to one of these functions.
42 * 43 *
43 * If you use these functions directly please don't forget the 44 * If you use these functions directly please don't forget the
44 * access_ok(). 45 * access_ok().
45 */ 46 */
46static __inline__ 47static inline
47__wsum csum_partial_copy_nocheck(const void *src, void *dst, 48__wsum csum_partial_copy_nocheck(const void *src, void *dst,
48 int len, __wsum sum) 49 int len, __wsum sum)
49{ 50{
50 return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); 51 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
51} 52}
52 53
53static __inline__ 54static inline
54__wsum csum_partial_copy_from_user(const void __user *src, void *dst, 55__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
55 int len, __wsum sum, int *err_ptr) 56 int len, __wsum sum, int *err_ptr)
56{ 57{
57 return csum_partial_copy_generic((__force const void *)src, dst, 58 return csum_partial_copy_generic((__force const void *)src, dst,
58 len, sum, err_ptr, NULL); 59 len, sum, err_ptr, NULL);
@@ -62,7 +63,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
62 * Fold a partial checksum 63 * Fold a partial checksum
63 */ 64 */
64 65
65static __inline__ __sum16 csum_fold(__wsum sum) 66static inline __sum16 csum_fold(__wsum sum)
66{ 67{
67 unsigned int __dummy; 68 unsigned int __dummy;
68 __asm__("swap.w %0, %1\n\t" 69 __asm__("swap.w %0, %1\n\t"
@@ -85,7 +86,7 @@ static __inline__ __sum16 csum_fold(__wsum sum)
85 * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted 86 * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
86 * for linux by * Arnt Gulbrandsen. 87 * for linux by * Arnt Gulbrandsen.
87 */ 88 */
88static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 89static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
89{ 90{
90 unsigned int sum, __dummy0, __dummy1; 91 unsigned int sum, __dummy0, __dummy1;
91 92
@@ -113,10 +114,10 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
113 return csum_fold(sum); 114 return csum_fold(sum);
114} 115}
115 116
116static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 117static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
117 unsigned short len, 118 unsigned short len,
118 unsigned short proto, 119 unsigned short proto,
119 __wsum sum) 120 __wsum sum)
120{ 121{
121#ifdef __LITTLE_ENDIAN__ 122#ifdef __LITTLE_ENDIAN__
122 unsigned long len_proto = (proto + len) << 8; 123 unsigned long len_proto = (proto + len) << 8;
@@ -132,6 +133,7 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
132 : "=r" (sum), "=r" (len_proto) 133 : "=r" (sum), "=r" (len_proto)
133 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum) 134 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
134 : "t"); 135 : "t");
136
135 return sum; 137 return sum;
136} 138}
137 139
@@ -139,30 +141,28 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
139 * computes the checksum of the TCP/UDP pseudo-header 141 * computes the checksum of the TCP/UDP pseudo-header
140 * returns a 16-bit checksum, already complemented 142 * returns a 16-bit checksum, already complemented
141 */ 143 */
142static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 144static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
143 unsigned short len, 145 unsigned short len,
144 unsigned short proto, 146 unsigned short proto,
145 __wsum sum) 147 __wsum sum)
146{ 148{
147 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 149 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
148} 150}
149 151
150/* 152/*
151 * this routine is used for miscellaneous IP-like checksums, mainly 153 * this routine is used for miscellaneous IP-like checksums, mainly
152 * in icmp.c 154 * in icmp.c
153 */ 155 */
154 156static inline __sum16 ip_compute_csum(const void *buff, int len)
155static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
156{ 157{
157 return csum_fold (csum_partial(buff, len, 0)); 158 return csum_fold(csum_partial(buff, len, 0));
158} 159}
159 160
160#define _HAVE_ARCH_IPV6_CSUM 161#define _HAVE_ARCH_IPV6_CSUM
161#ifdef CONFIG_IPV6 162static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
162static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 163 const struct in6_addr *daddr,
163 const struct in6_addr *daddr, 164 __u32 len, unsigned short proto,
164 __u32 len, unsigned short proto, 165 __wsum sum)
165 __wsum sum)
166{ 166{
167 unsigned int __dummy; 167 unsigned int __dummy;
168 __asm__("clrt\n\t" 168 __asm__("clrt\n\t"
@@ -187,22 +187,21 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
187 "movt %1\n\t" 187 "movt %1\n\t"
188 "add %1, %0\n" 188 "add %1, %0\n"
189 : "=r" (sum), "=&r" (__dummy) 189 : "=r" (sum), "=&r" (__dummy)
190 : "r" (saddr), "r" (daddr), 190 : "r" (saddr), "r" (daddr),
191 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) 191 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
192 : "t"); 192 : "t");
193 193
194 return csum_fold(sum); 194 return csum_fold(sum);
195} 195}
196#endif
197 196
198/* 197/*
199 * Copy and checksum to user 198 * Copy and checksum to user
200 */ 199 */
201#define HAVE_CSUM_COPY_USER 200#define HAVE_CSUM_COPY_USER
202static __inline__ __wsum csum_and_copy_to_user (const void *src, 201static inline __wsum csum_and_copy_to_user(const void *src,
203 void __user *dst, 202 void __user *dst,
204 int len, __wsum sum, 203 int len, __wsum sum,
205 int *err_ptr) 204 int *err_ptr)
206{ 205{
207 if (access_ok(VERIFY_WRITE, dst, len)) 206 if (access_ok(VERIFY_WRITE, dst, len))
208 return csum_partial_copy_generic((__force const void *)src, 207 return csum_partial_copy_generic((__force const void *)src,
diff --git a/include/asm-sh/cpu-sh4/cache.h b/include/asm-sh/cpu-sh4/cache.h
index 6e9c7e6ee8e4..f92b20a0983d 100644
--- a/include/asm-sh/cpu-sh4/cache.h
+++ b/include/asm-sh/cpu-sh4/cache.h
@@ -22,7 +22,7 @@
22#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */ 22#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */
23#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */ 23#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */
24#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */ 24#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */
25#ifndef CONFIG_CPU_SUBTYPE_SH7780 25#ifndef CONFIG_CPU_SH4A
26#define CCR_CACHE_EMODE 0x80000000 /* EMODE Enable */ 26#define CCR_CACHE_EMODE 0x80000000 /* EMODE Enable */
27#endif 27#endif
28 28
diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h
index ef2b9b1ae41f..602d061ca2dc 100644
--- a/include/asm-sh/cpu-sh4/freq.h
+++ b/include/asm-sh/cpu-sh4/freq.h
@@ -10,7 +10,7 @@
10#ifndef __ASM_CPU_SH4_FREQ_H 10#ifndef __ASM_CPU_SH4_FREQ_H
11#define __ASM_CPU_SH4_FREQ_H 11#define __ASM_CPU_SH4_FREQ_H
12 12
13#if defined(CONFIG_CPU_SUBTYPE_SH73180) 13#if defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7722)
14#define FRQCR 0xa4150000 14#define FRQCR 0xa4150000
15#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 15#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
16#define FRQCR 0xffc80000 16#define FRQCR 0xffc80000
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 37ab0c131a4d..8d0867b98e05 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -67,7 +67,7 @@ static inline dma_addr_t dma_map_single(struct device *dev,
67 if (dev->bus == &pci_bus_type) 67 if (dev->bus == &pci_bus_type)
68 return virt_to_bus(ptr); 68 return virt_to_bus(ptr);
69#endif 69#endif
70 dma_cache_sync(ptr, size, dir); 70 dma_cache_sync(dev, ptr, size, dir);
71 71
72 return virt_to_bus(ptr); 72 return virt_to_bus(ptr);
73} 73}
@@ -81,7 +81,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
81 81
82 for (i = 0; i < nents; i++) { 82 for (i = 0; i < nents; i++) {
83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
84 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 84 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
85 sg[i].length, dir); 85 sg[i].length, dir);
86#endif 86#endif
87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
@@ -112,7 +112,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
112 if (dev->bus == &pci_bus_type) 112 if (dev->bus == &pci_bus_type)
113 return; 113 return;
114#endif 114#endif
115 dma_cache_sync(bus_to_virt(dma_handle), size, dir); 115 dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
116} 116}
117 117
118static inline void dma_sync_single_range(struct device *dev, 118static inline void dma_sync_single_range(struct device *dev,
@@ -124,7 +124,7 @@ static inline void dma_sync_single_range(struct device *dev,
124 if (dev->bus == &pci_bus_type) 124 if (dev->bus == &pci_bus_type)
125 return; 125 return;
126#endif 126#endif
127 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); 127 dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
128} 128}
129 129
130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, 130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
@@ -134,7 +134,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
134 134
135 for (i = 0; i < nelems; i++) { 135 for (i = 0; i < nelems; i++) {
136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
137 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 137 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
138 sg[i].length, dir); 138 sg[i].length, dir);
139#endif 139#endif
140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index fd576088e47e..bff965ef4b95 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -37,7 +37,8 @@
37# define ONCHIP_NR_IRQS 144 37# define ONCHIP_NR_IRQS 144
38#elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \ 38#elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \
39 defined(CONFIG_CPU_SUBTYPE_SH73180) || \ 39 defined(CONFIG_CPU_SUBTYPE_SH73180) || \
40 defined(CONFIG_CPU_SUBTYPE_SH7343) 40 defined(CONFIG_CPU_SUBTYPE_SH7343) || \
41 defined(CONFIG_CPU_SUBTYPE_SH7722)
41# define ONCHIP_NR_IRQS 109 42# define ONCHIP_NR_IRQS 109
42#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 43#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
43# define ONCHIP_NR_IRQS 111 44# define ONCHIP_NR_IRQS 111
@@ -79,6 +80,8 @@
79# define OFFCHIP_NR_IRQS 16 80# define OFFCHIP_NR_IRQS 16
80#elif defined(CONFIG_SH_7343_SOLUTION_ENGINE) 81#elif defined(CONFIG_SH_7343_SOLUTION_ENGINE)
81# define OFFCHIP_NR_IRQS 12 82# define OFFCHIP_NR_IRQS 12
83#elif defined(CONFIG_SH_7722_SOLUTION_ENGINE)
84# define OFFCHIP_NR_IRQS 14
82#elif defined(CONFIG_SH_UNKNOWN) 85#elif defined(CONFIG_SH_UNKNOWN)
83# define OFFCHIP_NR_IRQS 16 /* Must also be last */ 86# define OFFCHIP_NR_IRQS 16 /* Must also be last */
84#else 87#else
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index c84901dbd8e5..036ca2843866 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -508,16 +508,50 @@ struct vm_area_struct;
508extern void update_mmu_cache(struct vm_area_struct * vma, 508extern void update_mmu_cache(struct vm_area_struct * vma,
509 unsigned long address, pte_t pte); 509 unsigned long address, pte_t pte);
510 510
511/* Encode and de-code a swap entry */
512/* 511/*
512 * Encode and de-code a swap entry
513 *
514 * Constraints:
515 * _PAGE_FILE at bit 0
516 * _PAGE_PRESENT at bit 8
517 * _PAGE_PROTNONE at bit 9
518 *
519 * For the normal case, we encode the swap type into bits 0:7 and the
520 * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
521 * preserved bits in the low 32-bits and use the upper 32 as the swap
522 * offset (along with a 5-bit type), following the same approach as x86
523 * PAE. This keeps the logic quite simple, and allows for a full 32
524 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
525 * in the pte_low case.
526 *
527 * As is evident by the Alpha code, if we ever get a 64-bit unsigned
528 * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
529 * much cleaner..
530 *
513 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT 531 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
514 * and _PAGE_PROTNONE bits 532 * and _PAGE_PROTNONE bits
515 */ 533 */
516#define __swp_type(x) ((x).val & 0xff) 534#ifdef CONFIG_X2TLB
517#define __swp_offset(x) ((x).val >> 10) 535#define __swp_type(x) ((x).val & 0x1f)
518#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) }) 536#define __swp_offset(x) ((x).val >> 5)
519#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) 537#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
520#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) 538#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
539#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
540
541/*
542 * Encode and decode a nonlinear file mapping entry
543 */
544#define pte_to_pgoff(pte) ((pte).pte_high)
545#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
546
547#define PTE_FILE_MAX_BITS 32
548#else
549#define __swp_type(x) ((x).val & 0xff)
550#define __swp_offset(x) ((x).val >> 10)
551#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
552
553#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
554#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
521 555
522/* 556/*
523 * Encode and decode a nonlinear file mapping entry 557 * Encode and decode a nonlinear file mapping entry
@@ -525,6 +559,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
525#define PTE_FILE_MAX_BITS 29 559#define PTE_FILE_MAX_BITS 29
526#define pte_to_pgoff(pte) (pte_val(pte) >> 1) 560#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
527#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE }) 561#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
562#endif
528 563
529typedef pte_t *pte_addr_t; 564typedef pte_t *pte_addr_t;
530 565
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index 6f1dd7ca1b1d..e29f2abb92de 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -27,6 +27,8 @@
27#define CCN_CVR 0xff000040 27#define CCN_CVR 0xff000040
28#define CCN_PRR 0xff000044 28#define CCN_PRR 0xff000044
29 29
30const char *get_cpu_subtype(void);
31
30/* 32/*
31 * CPU type and hardware bug flags. Kept separately for each CPU. 33 * CPU type and hardware bug flags. Kept separately for each CPU.
32 * 34 *
@@ -52,8 +54,10 @@ enum cpu_type {
52 CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, 54 CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501,
53 55
54 /* SH-4A types */ 56 /* SH-4A types */
55 CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, 57 CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785,
56 CPU_SH7785, 58
59 /* SH4AL-DSP types */
60 CPU_SH73180, CPU_SH7343, CPU_SH7722,
57 61
58 /* Unknown subtype */ 62 /* Unknown subtype */
59 CPU_SH_NONE 63 CPU_SH_NONE
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h
index dfc6bad567f0..4903f9e52dd8 100644
--- a/include/asm-sh/push-switch.h
+++ b/include/asm-sh/push-switch.h
@@ -4,6 +4,7 @@
4#include <linux/timer.h> 4#include <linux/timer.h>
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/platform_device.h>
7 8
8struct push_switch { 9struct push_switch {
9 /* switch state */ 10 /* switch state */
@@ -12,6 +13,8 @@ struct push_switch {
12 struct timer_list debounce; 13 struct timer_list debounce;
13 /* workqueue */ 14 /* workqueue */
14 struct work_struct work; 15 struct work_struct work;
16 /* platform device, for workqueue handler */
17 struct platform_device *pdev;
15}; 18};
16 19
17struct push_switch_platform_info { 20struct push_switch_platform_info {