aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/addrspace.h40
-rw-r--r--include/asm-mips/atomic.h49
-rw-r--r--include/asm-mips/barrier.h132
-rw-r--r--include/asm-mips/bitops.h92
-rw-r--r--include/asm-mips/bootinfo.h1
-rw-r--r--include/asm-mips/bug.h12
-rw-r--r--include/asm-mips/cacheflush.h2
-rw-r--r--include/asm-mips/checksum.h74
-rw-r--r--include/asm-mips/compat.h69
-rw-r--r--include/asm-mips/cpu-info.h10
-rw-r--r--include/asm-mips/dec/kn02.h2
-rw-r--r--include/asm-mips/device.h7
-rw-r--r--include/asm-mips/dma-mapping.h4
-rw-r--r--include/asm-mips/dma.h2
-rw-r--r--include/asm-mips/futex.h26
-rw-r--r--include/asm-mips/gt64120.h14
-rw-r--r--include/asm-mips/highmem.h10
-rw-r--r--include/asm-mips/i8259.h37
-rw-r--r--include/asm-mips/io.h2
-rw-r--r--include/asm-mips/irq.h14
-rw-r--r--include/asm-mips/kexec.h32
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h29
-rw-r--r--include/asm-mips/mach-cobalt/mach-gt64120.h28
-rw-r--r--include/asm-mips/mach-ip27/irq.h2
-rw-r--r--include/asm-mips/mach-ip27/topology.h1
-rw-r--r--include/asm-mips/mach-rm/cpu-feature-overrides.h (renamed from include/asm-mips/mach-rm200/cpu-feature-overrides.h)0
-rw-r--r--include/asm-mips/mach-rm/mc146818rtc.h (renamed from include/asm-mips/mach-rm200/mc146818rtc.h)0
-rw-r--r--include/asm-mips/mach-rm/timex.h (renamed from include/asm-mips/mach-rm200/timex.h)0
-rw-r--r--include/asm-mips/mipsregs.h56
-rw-r--r--include/asm-mips/page.h32
-rw-r--r--include/asm-mips/pci.h6
-rw-r--r--include/asm-mips/pgtable-32.h6
-rw-r--r--include/asm-mips/pgtable-64.h17
-rw-r--r--include/asm-mips/pgtable.h2
-rw-r--r--include/asm-mips/ptrace.h10
-rw-r--r--include/asm-mips/setup.h2
-rw-r--r--include/asm-mips/sn/arch.h1
-rw-r--r--include/asm-mips/sn/klconfig.h4
-rw-r--r--include/asm-mips/spinlock.h53
-rw-r--r--include/asm-mips/system.h165
-rw-r--r--include/asm-mips/termbits.h11
-rw-r--r--include/asm-mips/time.h9
-rw-r--r--include/asm-mips/types.h10
-rw-r--r--include/asm-mips/unistd.h262
-rw-r--r--include/asm-mips/war.h2
45 files changed, 591 insertions, 748 deletions
diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h
index 45c706e34df1..c6275088cf65 100644
--- a/include/asm-mips/addrspace.h
+++ b/include/asm-mips/addrspace.h
@@ -19,12 +19,16 @@
19#define _ATYPE_ 19#define _ATYPE_
20#define _ATYPE32_ 20#define _ATYPE32_
21#define _ATYPE64_ 21#define _ATYPE64_
22#define _LLCONST_(x) x 22#define _CONST64_(x) x
23#else 23#else
24#define _ATYPE_ __PTRDIFF_TYPE__ 24#define _ATYPE_ __PTRDIFF_TYPE__
25#define _ATYPE32_ int 25#define _ATYPE32_ int
26#define _ATYPE64_ long long 26#define _ATYPE64_ __s64
27#define _LLCONST_(x) x ## LL 27#ifdef CONFIG_64BIT
28#define _CONST64_(x) x ## L
29#else
30#define _CONST64_(x) x ## LL
31#endif
28#endif 32#endif
29 33
30/* 34/*
@@ -48,7 +52,7 @@
48 */ 52 */
49#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) 53#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
50#define XPHYSADDR(a) ((_ACAST64_(a)) & \ 54#define XPHYSADDR(a) ((_ACAST64_(a)) & \
51 _LLCONST_(0x000000ffffffffff)) 55 _CONST64_(0x000000ffffffffff))
52 56
53#ifdef CONFIG_64BIT 57#ifdef CONFIG_64BIT
54 58
@@ -57,14 +61,14 @@
57 * The compatibility segments use the full 64-bit sign extended value. Note 61 * The compatibility segments use the full 64-bit sign extended value. Note
58 * the R8000 doesn't have them so don't reference these in generic MIPS code. 62 * the R8000 doesn't have them so don't reference these in generic MIPS code.
59 */ 63 */
60#define XKUSEG _LLCONST_(0x0000000000000000) 64#define XKUSEG _CONST64_(0x0000000000000000)
61#define XKSSEG _LLCONST_(0x4000000000000000) 65#define XKSSEG _CONST64_(0x4000000000000000)
62#define XKPHYS _LLCONST_(0x8000000000000000) 66#define XKPHYS _CONST64_(0x8000000000000000)
63#define XKSEG _LLCONST_(0xc000000000000000) 67#define XKSEG _CONST64_(0xc000000000000000)
64#define CKSEG0 _LLCONST_(0xffffffff80000000) 68#define CKSEG0 _CONST64_(0xffffffff80000000)
65#define CKSEG1 _LLCONST_(0xffffffffa0000000) 69#define CKSEG1 _CONST64_(0xffffffffa0000000)
66#define CKSSEG _LLCONST_(0xffffffffc0000000) 70#define CKSSEG _CONST64_(0xffffffffc0000000)
67#define CKSEG3 _LLCONST_(0xffffffffe0000000) 71#define CKSEG3 _CONST64_(0xffffffffe0000000)
68 72
69#define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0) 73#define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0)
70#define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1) 74#define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1)
@@ -122,7 +126,7 @@
122#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED,(p)) 126#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED,(p))
123#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE,(p)) 127#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE,(p))
124#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK) 128#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
125#define PHYS_TO_XKPHYS(cm,a) (_LLCONST_(0x8000000000000000) | \ 129#define PHYS_TO_XKPHYS(cm,a) (_CONST64_(0x8000000000000000) | \
126 ((cm)<<59) | (a)) 130 ((cm)<<59) | (a))
127 131
128#if defined (CONFIG_CPU_R4300) \ 132#if defined (CONFIG_CPU_R4300) \
@@ -132,20 +136,20 @@
132 || defined (CONFIG_CPU_NEVADA) \ 136 || defined (CONFIG_CPU_NEVADA) \
133 || defined (CONFIG_CPU_TX49XX) \ 137 || defined (CONFIG_CPU_TX49XX) \
134 || defined (CONFIG_CPU_MIPS64) 138 || defined (CONFIG_CPU_MIPS64)
135#define TO_PHYS_MASK _LLCONST_(0x0000000fffffffff) /* 2^^36 - 1 */ 139#define TO_PHYS_MASK _CONST64_(0x0000000fffffffff) /* 2^^36 - 1 */
136#endif 140#endif
137 141
138#if defined (CONFIG_CPU_R8000) 142#if defined (CONFIG_CPU_R8000)
139/* We keep KUSIZE consistent with R4000 for now (2^^40) instead of (2^^48) */ 143/* We keep KUSIZE consistent with R4000 for now (2^^40) instead of (2^^48) */
140#define TO_PHYS_MASK _LLCONST_(0x000000ffffffffff) /* 2^^40 - 1 */ 144#define TO_PHYS_MASK _CONST64_(0x000000ffffffffff) /* 2^^40 - 1 */
141#endif 145#endif
142 146
143#if defined (CONFIG_CPU_R10000) 147#if defined (CONFIG_CPU_R10000)
144#define TO_PHYS_MASK _LLCONST_(0x000000ffffffffff) /* 2^^40 - 1 */ 148#define TO_PHYS_MASK _CONST64_(0x000000ffffffffff) /* 2^^40 - 1 */
145#endif 149#endif
146 150
147#if defined(CONFIG_CPU_SB1) || defined(CONFIG_CPU_SB1A) 151#if defined(CONFIG_CPU_SB1) || defined(CONFIG_CPU_SB1A)
148#define TO_PHYS_MASK _LLCONST_(0x00000fffffffffff) /* 2^^44 - 1 */ 152#define TO_PHYS_MASK _CONST64_(0x00000fffffffffff) /* 2^^44 - 1 */
149#endif 153#endif
150 154
151#ifndef CONFIG_CPU_R8000 155#ifndef CONFIG_CPU_R8000
@@ -155,7 +159,7 @@
155 * in order to catch bugs in the source code. 159 * in order to catch bugs in the source code.
156 */ 160 */
157 161
158#define COMPAT_K1BASE32 _LLCONST_(0xffffffffa0000000) 162#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000)
159#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */ 163#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
160 164
161#endif 165#endif
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index e64abc0d8221..c1a2409bb52a 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -9,20 +9,13 @@
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details. 10 * for more details.
11 * 11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13 */ 13 */
14
15/*
16 * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
17 * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
18 * main big wrapper ...
19 */
20#include <linux/spinlock.h>
21
22#ifndef _ASM_ATOMIC_H 14#ifndef _ASM_ATOMIC_H
23#define _ASM_ATOMIC_H 15#define _ASM_ATOMIC_H
24 16
25#include <linux/irqflags.h> 17#include <linux/irqflags.h>
18#include <asm/barrier.h>
26#include <asm/cpu-features.h> 19#include <asm/cpu-features.h>
27#include <asm/war.h> 20#include <asm/war.h>
28 21
@@ -138,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
138{ 131{
139 unsigned long result; 132 unsigned long result;
140 133
134 smp_mb();
135
141 if (cpu_has_llsc && R10000_LLSC_WAR) { 136 if (cpu_has_llsc && R10000_LLSC_WAR) {
142 unsigned long temp; 137 unsigned long temp;
143 138
@@ -148,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
148 " sc %0, %2 \n" 143 " sc %0, %2 \n"
149 " beqzl %0, 1b \n" 144 " beqzl %0, 1b \n"
150 " addu %0, %1, %3 \n" 145 " addu %0, %1, %3 \n"
151 " sync \n"
152 " .set mips0 \n" 146 " .set mips0 \n"
153 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 147 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
154 : "Ir" (i), "m" (v->counter) 148 : "Ir" (i), "m" (v->counter)
@@ -163,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
163 " sc %0, %2 \n" 157 " sc %0, %2 \n"
164 " beqz %0, 1b \n" 158 " beqz %0, 1b \n"
165 " addu %0, %1, %3 \n" 159 " addu %0, %1, %3 \n"
166 " sync \n"
167 " .set mips0 \n" 160 " .set mips0 \n"
168 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 161 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
169 : "Ir" (i), "m" (v->counter) 162 : "Ir" (i), "m" (v->counter)
@@ -178,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
178 local_irq_restore(flags); 171 local_irq_restore(flags);
179 } 172 }
180 173
174 smp_mb();
175
181 return result; 176 return result;
182} 177}
183 178
@@ -185,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
185{ 180{
186 unsigned long result; 181 unsigned long result;
187 182
183 smp_mb();
184
188 if (cpu_has_llsc && R10000_LLSC_WAR) { 185 if (cpu_has_llsc && R10000_LLSC_WAR) {
189 unsigned long temp; 186 unsigned long temp;
190 187
@@ -195,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
195 " sc %0, %2 \n" 192 " sc %0, %2 \n"
196 " beqzl %0, 1b \n" 193 " beqzl %0, 1b \n"
197 " subu %0, %1, %3 \n" 194 " subu %0, %1, %3 \n"
198 " sync \n"
199 " .set mips0 \n" 195 " .set mips0 \n"
200 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 196 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
201 : "Ir" (i), "m" (v->counter) 197 : "Ir" (i), "m" (v->counter)
@@ -210,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
210 " sc %0, %2 \n" 206 " sc %0, %2 \n"
211 " beqz %0, 1b \n" 207 " beqz %0, 1b \n"
212 " subu %0, %1, %3 \n" 208 " subu %0, %1, %3 \n"
213 " sync \n"
214 " .set mips0 \n" 209 " .set mips0 \n"
215 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 210 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
216 : "Ir" (i), "m" (v->counter) 211 : "Ir" (i), "m" (v->counter)
@@ -225,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
225 local_irq_restore(flags); 220 local_irq_restore(flags);
226 } 221 }
227 222
223 smp_mb();
224
228 return result; 225 return result;
229} 226}
230 227
@@ -240,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
240{ 237{
241 unsigned long result; 238 unsigned long result;
242 239
240 smp_mb();
241
243 if (cpu_has_llsc && R10000_LLSC_WAR) { 242 if (cpu_has_llsc && R10000_LLSC_WAR) {
244 unsigned long temp; 243 unsigned long temp;
245 244
@@ -253,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
253 " beqzl %0, 1b \n" 252 " beqzl %0, 1b \n"
254 " subu %0, %1, %3 \n" 253 " subu %0, %1, %3 \n"
255 " .set reorder \n" 254 " .set reorder \n"
256 " sync \n"
257 "1: \n" 255 "1: \n"
258 " .set mips0 \n" 256 " .set mips0 \n"
259 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 257 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -272,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
272 " beqz %0, 1b \n" 270 " beqz %0, 1b \n"
273 " subu %0, %1, %3 \n" 271 " subu %0, %1, %3 \n"
274 " .set reorder \n" 272 " .set reorder \n"
275 " sync \n"
276 "1: \n" 273 "1: \n"
277 " .set mips0 \n" 274 " .set mips0 \n"
278 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 275 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -289,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
289 local_irq_restore(flags); 286 local_irq_restore(flags);
290 } 287 }
291 288
289 smp_mb();
290
292 return result; 291 return result;
293} 292}
294 293
@@ -383,7 +382,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
383 382
384#ifdef CONFIG_64BIT 383#ifdef CONFIG_64BIT
385 384
386typedef struct { volatile __s64 counter; } atomic64_t; 385typedef struct { volatile long counter; } atomic64_t;
387 386
388#define ATOMIC64_INIT(i) { (i) } 387#define ATOMIC64_INIT(i) { (i) }
389 388
@@ -492,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
492{ 491{
493 unsigned long result; 492 unsigned long result;
494 493
494 smp_mb();
495
495 if (cpu_has_llsc && R10000_LLSC_WAR) { 496 if (cpu_has_llsc && R10000_LLSC_WAR) {
496 unsigned long temp; 497 unsigned long temp;
497 498
@@ -502,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
502 " scd %0, %2 \n" 503 " scd %0, %2 \n"
503 " beqzl %0, 1b \n" 504 " beqzl %0, 1b \n"
504 " addu %0, %1, %3 \n" 505 " addu %0, %1, %3 \n"
505 " sync \n"
506 " .set mips0 \n" 506 " .set mips0 \n"
507 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 507 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
508 : "Ir" (i), "m" (v->counter) 508 : "Ir" (i), "m" (v->counter)
@@ -517,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
517 " scd %0, %2 \n" 517 " scd %0, %2 \n"
518 " beqz %0, 1b \n" 518 " beqz %0, 1b \n"
519 " addu %0, %1, %3 \n" 519 " addu %0, %1, %3 \n"
520 " sync \n"
521 " .set mips0 \n" 520 " .set mips0 \n"
522 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 521 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
523 : "Ir" (i), "m" (v->counter) 522 : "Ir" (i), "m" (v->counter)
@@ -532,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
532 local_irq_restore(flags); 531 local_irq_restore(flags);
533 } 532 }
534 533
534 smp_mb();
535
535 return result; 536 return result;
536} 537}
537 538
@@ -539,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
539{ 540{
540 unsigned long result; 541 unsigned long result;
541 542
543 smp_mb();
544
542 if (cpu_has_llsc && R10000_LLSC_WAR) { 545 if (cpu_has_llsc && R10000_LLSC_WAR) {
543 unsigned long temp; 546 unsigned long temp;
544 547
@@ -549,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
549 " scd %0, %2 \n" 552 " scd %0, %2 \n"
550 " beqzl %0, 1b \n" 553 " beqzl %0, 1b \n"
551 " subu %0, %1, %3 \n" 554 " subu %0, %1, %3 \n"
552 " sync \n"
553 " .set mips0 \n" 555 " .set mips0 \n"
554 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 556 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
555 : "Ir" (i), "m" (v->counter) 557 : "Ir" (i), "m" (v->counter)
@@ -564,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
564 " scd %0, %2 \n" 566 " scd %0, %2 \n"
565 " beqz %0, 1b \n" 567 " beqz %0, 1b \n"
566 " subu %0, %1, %3 \n" 568 " subu %0, %1, %3 \n"
567 " sync \n"
568 " .set mips0 \n" 569 " .set mips0 \n"
569 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 570 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
570 : "Ir" (i), "m" (v->counter) 571 : "Ir" (i), "m" (v->counter)
@@ -579,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
579 local_irq_restore(flags); 580 local_irq_restore(flags);
580 } 581 }
581 582
583 smp_mb();
584
582 return result; 585 return result;
583} 586}
584 587
@@ -594,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
594{ 597{
595 unsigned long result; 598 unsigned long result;
596 599
600 smp_mb();
601
597 if (cpu_has_llsc && R10000_LLSC_WAR) { 602 if (cpu_has_llsc && R10000_LLSC_WAR) {
598 unsigned long temp; 603 unsigned long temp;
599 604
@@ -607,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
607 " beqzl %0, 1b \n" 612 " beqzl %0, 1b \n"
608 " dsubu %0, %1, %3 \n" 613 " dsubu %0, %1, %3 \n"
609 " .set reorder \n" 614 " .set reorder \n"
610 " sync \n"
611 "1: \n" 615 "1: \n"
612 " .set mips0 \n" 616 " .set mips0 \n"
613 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 617 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -626,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
626 " beqz %0, 1b \n" 630 " beqz %0, 1b \n"
627 " dsubu %0, %1, %3 \n" 631 " dsubu %0, %1, %3 \n"
628 " .set reorder \n" 632 " .set reorder \n"
629 " sync \n"
630 "1: \n" 633 "1: \n"
631 " .set mips0 \n" 634 " .set mips0 \n"
632 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 635 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -643,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
643 local_irq_restore(flags); 646 local_irq_restore(flags);
644 } 647 }
645 648
649 smp_mb();
650
646 return result; 651 return result;
647} 652}
648 653
diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h
new file mode 100644
index 000000000000..ed82631b0017
--- /dev/null
+++ b/include/asm-mips/barrier.h
@@ -0,0 +1,132 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H
10
11/*
12 * read_barrier_depends - Flush all pending reads that subsequents reads
13 * depend on.
14 *
15 * No data-dependent reads from memory-like regions are ever reordered
16 * over this barrier. All reads preceding this primitive are guaranteed
17 * to access memory (but not necessarily other CPUs' caches) before any
18 * reads following this primitive that depend on the data return by
19 * any of the preceding reads. This primitive is much lighter weight than
20 * rmb() on most CPUs, and is never heavier weight than is
21 * rmb().
22 *
23 * These ordering constraints are respected by both the local CPU
24 * and the compiler.
25 *
26 * Ordering is not guaranteed by anything other than these primitives,
27 * not even by data dependencies. See the documentation for
28 * memory_barrier() for examples and URLs to more information.
29 *
30 * For example, the following code would force ordering (the initial
31 * value of "a" is zero, "b" is one, and "p" is "&a"):
32 *
33 * <programlisting>
34 * CPU 0 CPU 1
35 *
36 * b = 2;
37 * memory_barrier();
38 * p = &b; q = p;
39 * read_barrier_depends();
40 * d = *q;
41 * </programlisting>
42 *
43 * because the read of "*q" depends on the read of "p" and these
44 * two reads are separated by a read_barrier_depends(). However,
45 * the following code, with the same initial values for "a" and "b":
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * a = 2;
51 * memory_barrier();
52 * b = 3; y = b;
53 * read_barrier_depends();
54 * x = a;
55 * </programlisting>
56 *
57 * does not enforce ordering, since there is no data dependency between
58 * the read of "a" and the read of "b". Therefore, on some CPUs, such
59 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
60 * in cases like this where there are no data dependencies.
61 */
62
63#define read_barrier_depends() do { } while(0)
64#define smp_read_barrier_depends() do { } while(0)
65
66#ifdef CONFIG_CPU_HAS_SYNC
67#define __sync() \
68 __asm__ __volatile__( \
69 ".set push\n\t" \
70 ".set noreorder\n\t" \
71 ".set mips2\n\t" \
72 "sync\n\t" \
73 ".set pop" \
74 : /* no output */ \
75 : /* no input */ \
76 : "memory")
77#else
78#define __sync() do { } while(0)
79#endif
80
81#define __fast_iob() \
82 __asm__ __volatile__( \
83 ".set push\n\t" \
84 ".set noreorder\n\t" \
85 "lw $0,%0\n\t" \
86 "nop\n\t" \
87 ".set pop" \
88 : /* no output */ \
89 : "m" (*(int *)CKSEG1) \
90 : "memory")
91
92#define fast_wmb() __sync()
93#define fast_rmb() __sync()
94#define fast_mb() __sync()
95#define fast_iob() \
96 do { \
97 __sync(); \
98 __fast_iob(); \
99 } while (0)
100
101#ifdef CONFIG_CPU_HAS_WB
102
103#include <asm/wbflush.h>
104
105#define wmb() fast_wmb()
106#define rmb() fast_rmb()
107#define mb() wbflush()
108#define iob() wbflush()
109
110#else /* !CONFIG_CPU_HAS_WB */
111
112#define wmb() fast_wmb()
113#define rmb() fast_rmb()
114#define mb() fast_mb()
115#define iob() fast_iob()
116
117#endif /* !CONFIG_CPU_HAS_WB */
118
119#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
120#define __WEAK_ORDERING_MB " sync \n"
121#else
122#define __WEAK_ORDERING_MB " \n"
123#endif
124
125#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
126#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
127#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
128
129#define set_mb(var, value) \
130 do { var = value; smp_mb(); } while (0)
131
132#endif /* __ASM_BARRIER_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 1bb89c5a10ee..06445de1324b 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -3,38 +3,34 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) 6 * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */ 8 */
9#ifndef _ASM_BITOPS_H 9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H 10#define _ASM_BITOPS_H
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <linux/irqflags.h>
13#include <linux/types.h> 14#include <linux/types.h>
15#include <asm/barrier.h>
14#include <asm/bug.h> 16#include <asm/bug.h>
15#include <asm/byteorder.h> /* sigh ... */ 17#include <asm/byteorder.h> /* sigh ... */
16#include <asm/cpu-features.h> 18#include <asm/cpu-features.h>
19#include <asm/sgidefs.h>
20#include <asm/war.h>
17 21
18#if (_MIPS_SZLONG == 32) 22#if (_MIPS_SZLONG == 32)
19#define SZLONG_LOG 5 23#define SZLONG_LOG 5
20#define SZLONG_MASK 31UL 24#define SZLONG_MASK 31UL
21#define __LL "ll " 25#define __LL "ll "
22#define __SC "sc " 26#define __SC "sc "
23#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
24#elif (_MIPS_SZLONG == 64) 27#elif (_MIPS_SZLONG == 64)
25#define SZLONG_LOG 6 28#define SZLONG_LOG 6
26#define SZLONG_MASK 63UL 29#define SZLONG_MASK 63UL
27#define __LL "lld " 30#define __LL "lld "
28#define __SC "scd " 31#define __SC "scd "
29#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
30#endif 32#endif
31 33
32#ifdef __KERNEL__
33
34#include <linux/irqflags.h>
35#include <asm/sgidefs.h>
36#include <asm/war.h>
37
38/* 34/*
39 * clear_bit() doesn't provide any barrier for the compiler. 35 * clear_bit() doesn't provide any barrier for the compiler.
40 */ 36 */
@@ -42,20 +38,6 @@
42#define smp_mb__after_clear_bit() smp_mb() 38#define smp_mb__after_clear_bit() smp_mb()
43 39
44/* 40/*
45 * Only disable interrupt for kernel mode stuff to keep usermode stuff
46 * that dares to use kernel include files alive.
47 */
48
49#define __bi_flags unsigned long flags
50#define __bi_local_irq_save(x) local_irq_save(x)
51#define __bi_local_irq_restore(x) local_irq_restore(x)
52#else
53#define __bi_flags
54#define __bi_local_irq_save(x)
55#define __bi_local_irq_restore(x)
56#endif /* __KERNEL__ */
57
58/*
59 * set_bit - Atomically set a bit in memory 41 * set_bit - Atomically set a bit in memory
60 * @nr: the bit to set 42 * @nr: the bit to set
61 * @addr: the address to start counting from 43 * @addr: the address to start counting from
@@ -93,13 +75,13 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
93 } else { 75 } else {
94 volatile unsigned long *a = addr; 76 volatile unsigned long *a = addr;
95 unsigned long mask; 77 unsigned long mask;
96 __bi_flags; 78 unsigned long flags;
97 79
98 a += nr >> SZLONG_LOG; 80 a += nr >> SZLONG_LOG;
99 mask = 1UL << (nr & SZLONG_MASK); 81 mask = 1UL << (nr & SZLONG_MASK);
100 __bi_local_irq_save(flags); 82 local_irq_save(flags);
101 *a |= mask; 83 *a |= mask;
102 __bi_local_irq_restore(flags); 84 local_irq_restore(flags);
103 } 85 }
104} 86}
105 87
@@ -141,13 +123,13 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
141 } else { 123 } else {
142 volatile unsigned long *a = addr; 124 volatile unsigned long *a = addr;
143 unsigned long mask; 125 unsigned long mask;
144 __bi_flags; 126 unsigned long flags;
145 127
146 a += nr >> SZLONG_LOG; 128 a += nr >> SZLONG_LOG;
147 mask = 1UL << (nr & SZLONG_MASK); 129 mask = 1UL << (nr & SZLONG_MASK);
148 __bi_local_irq_save(flags); 130 local_irq_save(flags);
149 *a &= ~mask; 131 *a &= ~mask;
150 __bi_local_irq_restore(flags); 132 local_irq_restore(flags);
151 } 133 }
152} 134}
153 135
@@ -191,13 +173,13 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
191 } else { 173 } else {
192 volatile unsigned long *a = addr; 174 volatile unsigned long *a = addr;
193 unsigned long mask; 175 unsigned long mask;
194 __bi_flags; 176 unsigned long flags;
195 177
196 a += nr >> SZLONG_LOG; 178 a += nr >> SZLONG_LOG;
197 mask = 1UL << (nr & SZLONG_MASK); 179 mask = 1UL << (nr & SZLONG_MASK);
198 __bi_local_irq_save(flags); 180 local_irq_save(flags);
199 *a ^= mask; 181 *a ^= mask;
200 __bi_local_irq_restore(flags); 182 local_irq_restore(flags);
201 } 183 }
202} 184}
203 185
@@ -223,9 +205,6 @@ static inline int test_and_set_bit(unsigned long nr,
223 " " __SC "%2, %1 \n" 205 " " __SC "%2, %1 \n"
224 " beqzl %2, 1b \n" 206 " beqzl %2, 1b \n"
225 " and %2, %0, %3 \n" 207 " and %2, %0, %3 \n"
226#ifdef CONFIG_SMP
227 " sync \n"
228#endif
229 " .set mips0 \n" 208 " .set mips0 \n"
230 : "=&r" (temp), "=m" (*m), "=&r" (res) 209 : "=&r" (temp), "=m" (*m), "=&r" (res)
231 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 210 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -245,9 +224,6 @@ static inline int test_and_set_bit(unsigned long nr,
245 " " __SC "%2, %1 \n" 224 " " __SC "%2, %1 \n"
246 " beqz %2, 1b \n" 225 " beqz %2, 1b \n"
247 " and %2, %0, %3 \n" 226 " and %2, %0, %3 \n"
248#ifdef CONFIG_SMP
249 " sync \n"
250#endif
251 " .set pop \n" 227 " .set pop \n"
252 : "=&r" (temp), "=m" (*m), "=&r" (res) 228 : "=&r" (temp), "=m" (*m), "=&r" (res)
253 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 229 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -258,17 +234,19 @@ static inline int test_and_set_bit(unsigned long nr,
258 volatile unsigned long *a = addr; 234 volatile unsigned long *a = addr;
259 unsigned long mask; 235 unsigned long mask;
260 int retval; 236 int retval;
261 __bi_flags; 237 unsigned long flags;
262 238
263 a += nr >> SZLONG_LOG; 239 a += nr >> SZLONG_LOG;
264 mask = 1UL << (nr & SZLONG_MASK); 240 mask = 1UL << (nr & SZLONG_MASK);
265 __bi_local_irq_save(flags); 241 local_irq_save(flags);
266 retval = (mask & *a) != 0; 242 retval = (mask & *a) != 0;
267 *a |= mask; 243 *a |= mask;
268 __bi_local_irq_restore(flags); 244 local_irq_restore(flags);
269 245
270 return retval; 246 return retval;
271 } 247 }
248
249 smp_mb();
272} 250}
273 251
274/* 252/*
@@ -294,9 +272,6 @@ static inline int test_and_clear_bit(unsigned long nr,
294 " " __SC "%2, %1 \n" 272 " " __SC "%2, %1 \n"
295 " beqzl %2, 1b \n" 273 " beqzl %2, 1b \n"
296 " and %2, %0, %3 \n" 274 " and %2, %0, %3 \n"
297#ifdef CONFIG_SMP
298 " sync \n"
299#endif
300 " .set mips0 \n" 275 " .set mips0 \n"
301 : "=&r" (temp), "=m" (*m), "=&r" (res) 276 : "=&r" (temp), "=m" (*m), "=&r" (res)
302 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 277 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -317,9 +292,6 @@ static inline int test_and_clear_bit(unsigned long nr,
317 " " __SC "%2, %1 \n" 292 " " __SC "%2, %1 \n"
318 " beqz %2, 1b \n" 293 " beqz %2, 1b \n"
319 " and %2, %0, %3 \n" 294 " and %2, %0, %3 \n"
320#ifdef CONFIG_SMP
321 " sync \n"
322#endif
323 " .set pop \n" 295 " .set pop \n"
324 : "=&r" (temp), "=m" (*m), "=&r" (res) 296 : "=&r" (temp), "=m" (*m), "=&r" (res)
325 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 297 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -330,17 +302,19 @@ static inline int test_and_clear_bit(unsigned long nr,
330 volatile unsigned long *a = addr; 302 volatile unsigned long *a = addr;
331 unsigned long mask; 303 unsigned long mask;
332 int retval; 304 int retval;
333 __bi_flags; 305 unsigned long flags;
334 306
335 a += nr >> SZLONG_LOG; 307 a += nr >> SZLONG_LOG;
336 mask = 1UL << (nr & SZLONG_MASK); 308 mask = 1UL << (nr & SZLONG_MASK);
337 __bi_local_irq_save(flags); 309 local_irq_save(flags);
338 retval = (mask & *a) != 0; 310 retval = (mask & *a) != 0;
339 *a &= ~mask; 311 *a &= ~mask;
340 __bi_local_irq_restore(flags); 312 local_irq_restore(flags);
341 313
342 return retval; 314 return retval;
343 } 315 }
316
317 smp_mb();
344} 318}
345 319
346/* 320/*
@@ -365,9 +339,6 @@ static inline int test_and_change_bit(unsigned long nr,
365 " " __SC "%2, %1 \n" 339 " " __SC "%2, %1 \n"
366 " beqzl %2, 1b \n" 340 " beqzl %2, 1b \n"
367 " and %2, %0, %3 \n" 341 " and %2, %0, %3 \n"
368#ifdef CONFIG_SMP
369 " sync \n"
370#endif
371 " .set mips0 \n" 342 " .set mips0 \n"
372 : "=&r" (temp), "=m" (*m), "=&r" (res) 343 : "=&r" (temp), "=m" (*m), "=&r" (res)
373 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 344 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -387,9 +358,6 @@ static inline int test_and_change_bit(unsigned long nr,
387 " " __SC "\t%2, %1 \n" 358 " " __SC "\t%2, %1 \n"
388 " beqz %2, 1b \n" 359 " beqz %2, 1b \n"
389 " and %2, %0, %3 \n" 360 " and %2, %0, %3 \n"
390#ifdef CONFIG_SMP
391 " sync \n"
392#endif
393 " .set pop \n" 361 " .set pop \n"
394 : "=&r" (temp), "=m" (*m), "=&r" (res) 362 : "=&r" (temp), "=m" (*m), "=&r" (res)
395 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 363 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -399,22 +367,20 @@ static inline int test_and_change_bit(unsigned long nr,
399 } else { 367 } else {
400 volatile unsigned long *a = addr; 368 volatile unsigned long *a = addr;
401 unsigned long mask, retval; 369 unsigned long mask, retval;
402 __bi_flags; 370 unsigned long flags;
403 371
404 a += nr >> SZLONG_LOG; 372 a += nr >> SZLONG_LOG;
405 mask = 1UL << (nr & SZLONG_MASK); 373 mask = 1UL << (nr & SZLONG_MASK);
406 __bi_local_irq_save(flags); 374 local_irq_save(flags);
407 retval = (mask & *a) != 0; 375 retval = (mask & *a) != 0;
408 *a ^= mask; 376 *a ^= mask;
409 __bi_local_irq_restore(flags); 377 local_irq_restore(flags);
410 378
411 return retval; 379 return retval;
412 } 380 }
413}
414 381
415#undef __bi_flags 382 smp_mb();
416#undef __bi_local_irq_save 383}
417#undef __bi_local_irq_restore
418 384
419#include <asm-generic/bitops/non-atomic.h> 385#include <asm-generic/bitops/non-atomic.h>
420 386
diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h
index 1e5ccdad3b02..8e321f53a382 100644
--- a/include/asm-mips/bootinfo.h
+++ b/include/asm-mips/bootinfo.h
@@ -131,6 +131,7 @@
131#define MACH_PHILIPS_NINO 0 /* Nino */ 131#define MACH_PHILIPS_NINO 0 /* Nino */
132#define MACH_PHILIPS_VELO 1 /* Velo */ 132#define MACH_PHILIPS_VELO 1 /* Velo */
133#define MACH_PHILIPS_JBS 2 /* JBS */ 133#define MACH_PHILIPS_JBS 2 /* JBS */
134#define MACH_PHILIPS_STB810 3 /* STB810 */
134 135
135/* 136/*
136 * Valid machtype for group SIBYTE 137 * Valid machtype for group SIBYTE
diff --git a/include/asm-mips/bug.h b/include/asm-mips/bug.h
index 7b4739dc8f3f..4d560a533940 100644
--- a/include/asm-mips/bug.h
+++ b/include/asm-mips/bug.h
@@ -1,6 +1,7 @@
1#ifndef __ASM_BUG_H 1#ifndef __ASM_BUG_H
2#define __ASM_BUG_H 2#define __ASM_BUG_H
3 3
4#include <asm/sgidefs.h>
4 5
5#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
6 7
@@ -13,6 +14,17 @@ do { \
13 14
14#define HAVE_ARCH_BUG 15#define HAVE_ARCH_BUG
15 16
17#if (_MIPS_ISA > _MIPS_ISA_MIPS1)
18
19#define BUG_ON(condition) \
20do { \
21 __asm__ __volatile__("tne $0, %0" : : "r" (condition)); \
22} while (0)
23
24#define HAVE_ARCH_BUG_ON
25
26#endif /* _MIPS_ISA > _MIPS_ISA_MIPS1 */
27
16#endif 28#endif
17 29
18#include <asm-generic/bug.h> 30#include <asm-generic/bug.h>
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index e3c9925876a3..0ddada3bb0b6 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -17,6 +17,7 @@
17 * 17 *
18 * - flush_cache_all() flushes entire cache 18 * - flush_cache_all() flushes entire cache
19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines 19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
20 * - flush_cache_dup mm(mm) handles cache flushing when forking
20 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page 21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
21 * - flush_cache_range(vma, start, end) flushes a range of pages 22 * - flush_cache_range(vma, start, end) flushes a range of pages
22 * - flush_icache_range(start, end) flush a range of instructions 23 * - flush_icache_range(start, end) flush a range of instructions
@@ -31,6 +32,7 @@
31extern void (*flush_cache_all)(void); 32extern void (*flush_cache_all)(void);
32extern void (*__flush_cache_all)(void); 33extern void (*__flush_cache_all)(void);
33extern void (*flush_cache_mm)(struct mm_struct *mm); 34extern void (*flush_cache_mm)(struct mm_struct *mm);
35#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
34extern void (*flush_cache_range)(struct vm_area_struct *vma, 36extern void (*flush_cache_range)(struct vm_area_struct *vma,
35 unsigned long start, unsigned long end); 37 unsigned long start, unsigned long end);
36extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); 38extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h
index a5e6050ec0f3..24cdcc6eaab8 100644
--- a/include/asm-mips/checksum.h
+++ b/include/asm-mips/checksum.h
@@ -27,47 +27,53 @@
27 * 27 *
28 * it's best to have buff aligned on a 32-bit boundary 28 * it's best to have buff aligned on a 32-bit boundary
29 */ 29 */
30unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum); 30__wsum csum_partial(const void *buff, int len, __wsum sum);
31
32__wsum __csum_partial_copy_user(const void *src, void *dst,
33 int len, __wsum sum, int *err_ptr);
31 34
32/* 35/*
33 * this is a new version of the above that records errors it finds in *errp, 36 * this is a new version of the above that records errors it finds in *errp,
34 * but continues and zeros the rest of the buffer. 37 * but continues and zeros the rest of the buffer.
35 */ 38 */
36unsigned int csum_partial_copy_from_user(const unsigned char __user *src, 39static inline
37 unsigned char *dst, int len, 40__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
38 unsigned int sum, int *errp); 41 __wsum sum, int *err_ptr)
42{
43 might_sleep();
44 return __csum_partial_copy_user((__force void *)src, dst,
45 len, sum, err_ptr);
46}
39 47
40/* 48/*
41 * Copy and checksum to user 49 * Copy and checksum to user
42 */ 50 */
43#define HAVE_CSUM_COPY_USER 51#define HAVE_CSUM_COPY_USER
44static inline unsigned int csum_and_copy_to_user (const unsigned char *src, 52static inline
45 unsigned char __user *dst, 53__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
46 int len, int sum, 54 __wsum sum, int *err_ptr)
47 int *err_ptr)
48{ 55{
49 might_sleep(); 56 might_sleep();
50 sum = csum_partial(src, len, sum); 57 if (access_ok(VERIFY_WRITE, dst, len))
51 58 return __csum_partial_copy_user(src, (__force void *)dst,
52 if (copy_to_user(dst, src, len)) { 59 len, sum, err_ptr);
60 if (len)
53 *err_ptr = -EFAULT; 61 *err_ptr = -EFAULT;
54 return -1;
55 }
56 62
57 return sum; 63 return (__force __wsum)-1; /* invalid checksum */
58} 64}
59 65
60/* 66/*
61 * the same as csum_partial, but copies from user space (but on MIPS 67 * the same as csum_partial, but copies from user space (but on MIPS
62 * we have just one address space, so this is identical to the above) 68 * we have just one address space, so this is identical to the above)
63 */ 69 */
64unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, 70__wsum csum_partial_copy_nocheck(const void *src, void *dst,
65 int len, unsigned int sum); 71 int len, __wsum sum);
66 72
67/* 73/*
68 * Fold a partial checksum without adding pseudo headers 74 * Fold a partial checksum without adding pseudo headers
69 */ 75 */
70static inline unsigned short int csum_fold(unsigned int sum) 76static inline __sum16 csum_fold(__wsum sum)
71{ 77{
72 __asm__( 78 __asm__(
73 " .set push # csum_fold\n" 79 " .set push # csum_fold\n"
@@ -82,7 +88,7 @@ static inline unsigned short int csum_fold(unsigned int sum)
82 : "=r" (sum) 88 : "=r" (sum)
83 : "0" (sum)); 89 : "0" (sum));
84 90
85 return sum; 91 return (__force __sum16)sum;
86} 92}
87 93
88/* 94/*
@@ -92,10 +98,10 @@ static inline unsigned short int csum_fold(unsigned int sum)
92 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by 98 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
93 * Arnt Gulbrandsen. 99 * Arnt Gulbrandsen.
94 */ 100 */
95static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) 101static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
96{ 102{
97 unsigned int *word = (unsigned int *) iph; 103 const unsigned int *word = iph;
98 unsigned int *stop = word + ihl; 104 const unsigned int *stop = word + ihl;
99 unsigned int csum; 105 unsigned int csum;
100 int carry; 106 int carry;
101 107
@@ -123,9 +129,9 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
123 return csum_fold(csum); 129 return csum_fold(csum);
124} 130}
125 131
126static inline unsigned int csum_tcpudp_nofold(unsigned long saddr, 132static inline __wsum csum_tcpudp_nofold(__be32 saddr,
127 unsigned long daddr, unsigned short len, unsigned short proto, 133 __be32 daddr, unsigned short len, unsigned short proto,
128 unsigned int sum) 134 __wsum sum)
129{ 135{
130 __asm__( 136 __asm__(
131 " .set push # csum_tcpudp_nofold\n" 137 " .set push # csum_tcpudp_nofold\n"
@@ -155,9 +161,9 @@ static inline unsigned int csum_tcpudp_nofold(unsigned long saddr,
155 : "=r" (sum) 161 : "=r" (sum)
156 : "0" (daddr), "r"(saddr), 162 : "0" (daddr), "r"(saddr),
157#ifdef __MIPSEL__ 163#ifdef __MIPSEL__
158 "r" (((unsigned long)htons(len)<<16) + proto*256), 164 "r" ((proto + len) << 8),
159#else 165#else
160 "r" (((unsigned long)(proto)<<16) + len), 166 "r" (proto + len),
161#endif 167#endif
162 "r" (sum)); 168 "r" (sum));
163 169
@@ -168,11 +174,10 @@ static inline unsigned int csum_tcpudp_nofold(unsigned long saddr,
168 * computes the checksum of the TCP/UDP pseudo-header 174 * computes the checksum of the TCP/UDP pseudo-header
169 * returns a 16-bit checksum, already complemented 175 * returns a 16-bit checksum, already complemented
170 */ 176 */
171static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, 177static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
172 unsigned long daddr,
173 unsigned short len, 178 unsigned short len,
174 unsigned short proto, 179 unsigned short proto,
175 unsigned int sum) 180 __wsum sum)
176{ 181{
177 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 182 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
178} 183}
@@ -181,17 +186,16 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
181 * this routine is used for miscellaneous IP-like checksums, mainly 186 * this routine is used for miscellaneous IP-like checksums, mainly
182 * in icmp.c 187 * in icmp.c
183 */ 188 */
184static inline unsigned short ip_compute_csum(unsigned char * buff, int len) 189static inline __sum16 ip_compute_csum(const void *buff, int len)
185{ 190{
186 return csum_fold(csum_partial(buff, len, 0)); 191 return csum_fold(csum_partial(buff, len, 0));
187} 192}
188 193
189#define _HAVE_ARCH_IPV6_CSUM 194#define _HAVE_ARCH_IPV6_CSUM
190static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, 195static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
191 struct in6_addr *daddr, 196 const struct in6_addr *daddr,
192 __u32 len, 197 __u32 len, unsigned short proto,
193 unsigned short proto, 198 __wsum sum)
194 unsigned int sum)
195{ 199{
196 __asm__( 200 __asm__(
197 " .set push # csum_ipv6_magic\n" 201 " .set push # csum_ipv6_magic\n"
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 900f472fdd2b..432653d7ae09 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/ptrace.h>
8 9
9#define COMPAT_USER_HZ 100 10#define COMPAT_USER_HZ 100
10 11
@@ -32,6 +33,7 @@ typedef struct {
32 s32 val[2]; 33 s32 val[2];
33} compat_fsid_t; 34} compat_fsid_t;
34typedef s32 compat_timer_t; 35typedef s32 compat_timer_t;
36typedef s32 compat_key_t;
35 37
36typedef s32 compat_int_t; 38typedef s32 compat_int_t;
37typedef s32 compat_long_t; 39typedef s32 compat_long_t;
@@ -146,4 +148,71 @@ static inline void __user *compat_alloc_user_space(long len)
146 return (void __user *) (regs->regs[29] - len); 148 return (void __user *) (regs->regs[29] - len);
147} 149}
148 150
151struct compat_ipc64_perm {
152 compat_key_t key;
153 __compat_uid32_t uid;
154 __compat_gid32_t gid;
155 __compat_uid32_t cuid;
156 __compat_gid32_t cgid;
157 compat_mode_t mode;
158 unsigned short seq;
159 unsigned short __pad2;
160 compat_ulong_t __unused1;
161 compat_ulong_t __unused2;
162};
163
164struct compat_semid64_ds {
165 struct compat_ipc64_perm sem_perm;
166 compat_time_t sem_otime;
167 compat_time_t sem_ctime;
168 compat_ulong_t sem_nsems;
169 compat_ulong_t __unused1;
170 compat_ulong_t __unused2;
171};
172
173struct compat_msqid64_ds {
174 struct compat_ipc64_perm msg_perm;
175#ifndef CONFIG_CPU_LITTLE_ENDIAN
176 compat_ulong_t __unused1;
177#endif
178 compat_time_t msg_stime;
179#ifdef CONFIG_CPU_LITTLE_ENDIAN
180 compat_ulong_t __unused1;
181#endif
182#ifndef CONFIG_CPU_LITTLE_ENDIAN
183 compat_ulong_t __unused2;
184#endif
185 compat_time_t msg_rtime;
186#ifdef CONFIG_CPU_LITTLE_ENDIAN
187 compat_ulong_t __unused2;
188#endif
189#ifndef CONFIG_CPU_LITTLE_ENDIAN
190 compat_ulong_t __unused3;
191#endif
192 compat_time_t msg_ctime;
193#ifdef CONFIG_CPU_LITTLE_ENDIAN
194 compat_ulong_t __unused3;
195#endif
196 compat_ulong_t msg_cbytes;
197 compat_ulong_t msg_qnum;
198 compat_ulong_t msg_qbytes;
199 compat_pid_t msg_lspid;
200 compat_pid_t msg_lrpid;
201 compat_ulong_t __unused4;
202 compat_ulong_t __unused5;
203};
204
205struct compat_shmid64_ds {
206 struct compat_ipc64_perm shm_perm;
207 compat_size_t shm_segsz;
208 compat_time_t shm_atime;
209 compat_time_t shm_dtime;
210 compat_time_t shm_ctime;
211 compat_pid_t shm_cpid;
212 compat_pid_t shm_lpid;
213 compat_ulong_t shm_nattch;
214 compat_ulong_t __unused1;
215 compat_ulong_t __unused2;
216};
217
149#endif /* _ASM_COMPAT_H */ 218#endif /* _ASM_COMPAT_H */
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h
index a2f0c8ea9160..610d0cdeaa9e 100644
--- a/include/asm-mips/cpu-info.h
+++ b/include/asm-mips/cpu-info.h
@@ -22,12 +22,12 @@
22 * Descriptor for a cache 22 * Descriptor for a cache
23 */ 23 */
24struct cache_desc { 24struct cache_desc {
25 unsigned short linesz; /* Size of line in bytes */
26 unsigned short ways; /* Number of ways */
27 unsigned short sets; /* Number of lines per set */
28 unsigned int waysize; /* Bytes per way */ 25 unsigned int waysize; /* Bytes per way */
29 unsigned int waybit; /* Bits to select in a cache set */ 26 unsigned short sets; /* Number of lines per set */
30 unsigned int flags; /* Flags describing cache properties */ 27 unsigned char ways; /* Number of ways */
28 unsigned char linesz; /* Size of line in bytes */
29 unsigned char waybit; /* Bits to select in a cache set */
30 unsigned char flags; /* Flags describing cache properties */
31}; 31};
32 32
33/* 33/*
diff --git a/include/asm-mips/dec/kn02.h b/include/asm-mips/dec/kn02.h
index 8319ad77b250..93430b5f4724 100644
--- a/include/asm-mips/dec/kn02.h
+++ b/include/asm-mips/dec/kn02.h
@@ -82,11 +82,9 @@
82 82
83#ifndef __ASSEMBLY__ 83#ifndef __ASSEMBLY__
84 84
85#include <linux/spinlock.h>
86#include <linux/types.h> 85#include <linux/types.h>
87 86
88extern u32 cached_kn02_csr; 87extern u32 cached_kn02_csr;
89extern spinlock_t kn02_lock;
90extern void init_kn02_irqs(int base); 88extern void init_kn02_irqs(int base);
91#endif 89#endif
92 90
diff --git a/include/asm-mips/device.h b/include/asm-mips/device.h
new file mode 100644
index 000000000000..d8f9872b0e2d
--- /dev/null
+++ b/include/asm-mips/device.h
@@ -0,0 +1,7 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index 43288634c38a..236d1a467cc7 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -63,9 +63,9 @@ dma_get_cache_alignment(void)
63 return 128; 63 return 128;
64} 64}
65 65
66extern int dma_is_consistent(dma_addr_t dma_addr); 66extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
67 67
68extern void dma_cache_sync(void *vaddr, size_t size, 68extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
69 enum dma_data_direction direction); 69 enum dma_data_direction direction);
70 70
71#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 71#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
diff --git a/include/asm-mips/dma.h b/include/asm-mips/dma.h
index e85849ac165f..23f789c80845 100644
--- a/include/asm-mips/dma.h
+++ b/include/asm-mips/dma.h
@@ -74,7 +74,9 @@
74 * 74 *
75 */ 75 */
76 76
77#ifndef GENERIC_ISA_DMA_SUPPORT_BROKEN
77#define MAX_DMA_CHANNELS 8 78#define MAX_DMA_CHANNELS 8
79#endif
78 80
79/* 81/*
80 * The maximum address in KSEG0 that we can perform a DMA transfer to on this 82 * The maximum address in KSEG0 that we can perform a DMA transfer to on this
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index ed023eae0674..47e5679c2353 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -1,19 +1,21 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
7 */
1#ifndef _ASM_FUTEX_H 8#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H 9#define _ASM_FUTEX_H
3 10
4#ifdef __KERNEL__ 11#ifdef __KERNEL__
5 12
6#include <linux/futex.h> 13#include <linux/futex.h>
14#include <asm/barrier.h>
7#include <asm/errno.h> 15#include <asm/errno.h>
8#include <asm/uaccess.h> 16#include <asm/uaccess.h>
9#include <asm/war.h> 17#include <asm/war.h>
10 18
11#ifdef CONFIG_SMP
12#define __FUTEX_SMP_SYNC " sync \n"
13#else
14#define __FUTEX_SMP_SYNC
15#endif
16
17#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 19#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
18{ \ 20{ \
19 if (cpu_has_llsc && R10000_LLSC_WAR) { \ 21 if (cpu_has_llsc && R10000_LLSC_WAR) { \
@@ -27,7 +29,7 @@
27 " .set mips3 \n" \ 29 " .set mips3 \n" \
28 "2: sc $1, %2 \n" \ 30 "2: sc $1, %2 \n" \
29 " beqzl $1, 1b \n" \ 31 " beqzl $1, 1b \n" \
30 __FUTEX_SMP_SYNC \ 32 __WEAK_ORDERING_MB \
31 "3: \n" \ 33 "3: \n" \
32 " .set pop \n" \ 34 " .set pop \n" \
33 " .set mips0 \n" \ 35 " .set mips0 \n" \
@@ -53,7 +55,7 @@
53 " .set mips3 \n" \ 55 " .set mips3 \n" \
54 "2: sc $1, %2 \n" \ 56 "2: sc $1, %2 \n" \
55 " beqz $1, 1b \n" \ 57 " beqz $1, 1b \n" \
56 __FUTEX_SMP_SYNC \ 58 __WEAK_ORDERING_MB \
57 "3: \n" \ 59 "3: \n" \
58 " .set pop \n" \ 60 " .set pop \n" \
59 " .set mips0 \n" \ 61 " .set mips0 \n" \
@@ -86,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
86 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 88 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
87 return -EFAULT; 89 return -EFAULT;
88 90
89 inc_preempt_count(); 91 pagefault_disable();
90 92
91 switch (op) { 93 switch (op) {
92 case FUTEX_OP_SET: 94 case FUTEX_OP_SET:
@@ -113,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
113 ret = -ENOSYS; 115 ret = -ENOSYS;
114 } 116 }
115 117
116 dec_preempt_count(); 118 pagefault_enable();
117 119
118 if (!ret) { 120 if (!ret) {
119 switch (cmp) { 121 switch (cmp) {
@@ -150,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
150 " .set mips3 \n" 152 " .set mips3 \n"
151 "2: sc $1, %1 \n" 153 "2: sc $1, %1 \n"
152 " beqzl $1, 1b \n" 154 " beqzl $1, 1b \n"
153 __FUTEX_SMP_SYNC 155 __WEAK_ORDERING_MB
154 "3: \n" 156 "3: \n"
155 " .set pop \n" 157 " .set pop \n"
156 " .section .fixup,\"ax\" \n" 158 " .section .fixup,\"ax\" \n"
@@ -177,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
177 " .set mips3 \n" 179 " .set mips3 \n"
178 "2: sc $1, %1 \n" 180 "2: sc $1, %1 \n"
179 " beqz $1, 1b \n" 181 " beqz $1, 1b \n"
180 __FUTEX_SMP_SYNC 182 __WEAK_ORDERING_MB
181 "3: \n" 183 "3: \n"
182 " .set pop \n" 184 " .set pop \n"
183 " .section .fixup,\"ax\" \n" 185 " .section .fixup,\"ax\" \n"
diff --git a/include/asm-mips/gt64120.h b/include/asm-mips/gt64120.h
index 2edd171bb6cd..4bf8e28f8850 100644
--- a/include/asm-mips/gt64120.h
+++ b/include/asm-mips/gt64120.h
@@ -451,6 +451,13 @@
451#define GT_SDRAM_OPMODE_OP_MODE 3 451#define GT_SDRAM_OPMODE_OP_MODE 3
452#define GT_SDRAM_OPMODE_OP_CBR 4 452#define GT_SDRAM_OPMODE_OP_CBR 4
453 453
454#define GT_TC_CONTROL_ENTC0_SHF 0
455#define GT_TC_CONTROL_ENTC0_MSK (MSK(1) << GT_TC_CONTROL_ENTC0_SHF)
456#define GT_TC_CONTROL_ENTC0_BIT GT_TC_CONTROL_ENTC0_MSK
457#define GT_TC_CONTROL_SELTC0_SHF 1
458#define GT_TC_CONTROL_SELTC0_MSK (MSK(1) << GT_TC_CONTROL_SELTC0_SHF)
459#define GT_TC_CONTROL_SELTC0_BIT GT_TC_CONTROL_SELTC0_MSK
460
454 461
455#define GT_PCI0_BARE_SWSCS3BOOTDIS_SHF 0 462#define GT_PCI0_BARE_SWSCS3BOOTDIS_SHF 0
456#define GT_PCI0_BARE_SWSCS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS3BOOTDIS_SHF) 463#define GT_PCI0_BARE_SWSCS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS3BOOTDIS_SHF)
@@ -523,6 +530,13 @@
523#define GT_PCI0_CMD_SWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_SWORDSWAP_SHF) 530#define GT_PCI0_CMD_SWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_SWORDSWAP_SHF)
524#define GT_PCI0_CMD_SWORDSWAP_BIT GT_PCI0_CMD_SWORDSWAP_MSK 531#define GT_PCI0_CMD_SWORDSWAP_BIT GT_PCI0_CMD_SWORDSWAP_MSK
525 532
533#define GT_INTR_T0EXP_SHF 8
534#define GT_INTR_T0EXP_MSK (MSK(1) << GT_INTR_T0EXP_SHF)
535#define GT_INTR_T0EXP_BIT GT_INTR_T0EXP_MSK
536#define GT_INTR_RETRYCTR0_SHF 20
537#define GT_INTR_RETRYCTR0_MSK (MSK(1) << GT_INTR_RETRYCTR0_SHF)
538#define GT_INTR_RETRYCTR0_BIT GT_INTR_RETRYCTR0_MSK
539
526/* 540/*
527 * Misc 541 * Misc
528 */ 542 */
diff --git a/include/asm-mips/highmem.h b/include/asm-mips/highmem.h
index c976bfaaba83..f8c8182f7f2e 100644
--- a/include/asm-mips/highmem.h
+++ b/include/asm-mips/highmem.h
@@ -21,6 +21,7 @@
21 21
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/uaccess.h>
24#include <asm/kmap_types.h> 25#include <asm/kmap_types.h>
25 26
26/* undef for production */ 27/* undef for production */
@@ -70,11 +71,16 @@ static inline void *kmap(struct page *page)
70 71
71static inline void *kmap_atomic(struct page *page, enum km_type type) 72static inline void *kmap_atomic(struct page *page, enum km_type type)
72{ 73{
74 pagefault_disable();
73 return page_address(page); 75 return page_address(page);
74} 76}
75 77
76static inline void kunmap_atomic(void *kvaddr, enum km_type type) { } 78static inline void kunmap_atomic(void *kvaddr, enum km_type type)
77#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) 79{
80 pagefault_enable();
81}
82
83#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
78 84
79#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 85#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
80 86
diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h
index 0214abe3f0af..4df8d8b118c0 100644
--- a/include/asm-mips/i8259.h
+++ b/include/asm-mips/i8259.h
@@ -19,10 +19,31 @@
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21 21
22/* i8259A PIC registers */
23#define PIC_MASTER_CMD 0x20
24#define PIC_MASTER_IMR 0x21
25#define PIC_MASTER_ISR PIC_MASTER_CMD
26#define PIC_MASTER_POLL PIC_MASTER_ISR
27#define PIC_MASTER_OCW3 PIC_MASTER_ISR
28#define PIC_SLAVE_CMD 0xa0
29#define PIC_SLAVE_IMR 0xa1
30
31/* i8259A PIC related value */
32#define PIC_CASCADE_IR 2
33#define MASTER_ICW4_DEFAULT 0x01
34#define SLAVE_ICW4_DEFAULT 0x01
35#define PIC_ICW4_AEOI 2
36
22extern spinlock_t i8259A_lock; 37extern spinlock_t i8259A_lock;
23 38
39extern void init_8259A(int auto_eoi);
40extern void enable_8259A_irq(unsigned int irq);
41extern void disable_8259A_irq(unsigned int irq);
42
24extern void init_i8259_irqs(void); 43extern void init_i8259_irqs(void);
25 44
45#define I8259A_IRQ_BASE 0
46
26/* 47/*
27 * Do the traditional i8259 interrupt polling thing. This is for the few 48 * Do the traditional i8259 interrupt polling thing. This is for the few
28 * cases where no better interrupt acknowledge method is available and we 49 * cases where no better interrupt acknowledge method is available and we
@@ -35,15 +56,15 @@ static inline int i8259_irq(void)
35 spin_lock(&i8259A_lock); 56 spin_lock(&i8259A_lock);
36 57
37 /* Perform an interrupt acknowledge cycle on controller 1. */ 58 /* Perform an interrupt acknowledge cycle on controller 1. */
38 outb(0x0C, 0x20); /* prepare for poll */ 59 outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */
39 irq = inb(0x20) & 7; 60 irq = inb(PIC_MASTER_CMD) & 7;
40 if (irq == 2) { 61 if (irq == PIC_CASCADE_IR) {
41 /* 62 /*
42 * Interrupt is cascaded so perform interrupt 63 * Interrupt is cascaded so perform interrupt
43 * acknowledge on controller 2. 64 * acknowledge on controller 2.
44 */ 65 */
45 outb(0x0C, 0xA0); /* prepare for poll */ 66 outb(0x0C, PIC_SLAVE_CMD); /* prepare for poll */
46 irq = (inb(0xA0) & 7) + 8; 67 irq = (inb(PIC_SLAVE_CMD) & 7) + 8;
47 } 68 }
48 69
49 if (unlikely(irq == 7)) { 70 if (unlikely(irq == 7)) {
@@ -54,14 +75,14 @@ static inline int i8259_irq(void)
54 * significant bit is not set then there is no valid 75 * significant bit is not set then there is no valid
55 * interrupt. 76 * interrupt.
56 */ 77 */
57 outb(0x0B, 0x20); /* ISR register */ 78 outb(0x0B, PIC_MASTER_ISR); /* ISR register */
58 if(~inb(0x20) & 0x80) 79 if(~inb(PIC_MASTER_ISR) & 0x80)
59 irq = -1; 80 irq = -1;
60 } 81 }
61 82
62 spin_unlock(&i8259A_lock); 83 spin_unlock(&i8259A_lock);
63 84
64 return irq; 85 return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq;
65} 86}
66 87
67#endif /* _ASM_I8259_H */ 88#endif /* _ASM_I8259_H */
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index bc5f3c53155f..d77b657c09c7 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -113,7 +113,7 @@ static inline void set_io_port_base(unsigned long base)
113 * almost all conceivable cases a device driver should not be using 113 * almost all conceivable cases a device driver should not be using
114 * this function 114 * this function
115 */ 115 */
116static inline unsigned long virt_to_phys(volatile void * address) 116static inline unsigned long virt_to_phys(volatile const void *address)
117{ 117{
118 return (unsigned long)address - PAGE_OFFSET; 118 return (unsigned long)address - PAGE_OFFSET;
119} 119}
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 35a05ca5560c..386da82e5774 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -24,8 +24,6 @@ static inline int irq_canonicalize(int irq)
24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
25#endif 25#endif
26 26
27extern asmlinkage unsigned int do_IRQ(unsigned int irq);
28
29#ifdef CONFIG_MIPS_MT_SMTC 27#ifdef CONFIG_MIPS_MT_SMTC
30/* 28/*
31 * Clear interrupt mask handling "backstop" if irq_hwmask 29 * Clear interrupt mask handling "backstop" if irq_hwmask
@@ -33,18 +31,16 @@ extern asmlinkage unsigned int do_IRQ(unsigned int irq);
33 * functions will take over re-enabling the low-level mask. 31 * functions will take over re-enabling the low-level mask.
34 * Otherwise it will be done on return from exception. 32 * Otherwise it will be done on return from exception.
35 */ 33 */
36#define __DO_IRQ_SMTC_HOOK() \ 34#define __DO_IRQ_SMTC_HOOK(irq) \
37do { \ 35do { \
38 if (irq_hwmask[irq] & 0x0000ff00) \ 36 if (irq_hwmask[irq] & 0x0000ff00) \
39 write_c0_tccontext(read_c0_tccontext() & \ 37 write_c0_tccontext(read_c0_tccontext() & \
40 ~(irq_hwmask[irq] & 0x0000ff00)); \ 38 ~(irq_hwmask[irq] & 0x0000ff00)); \
41} while (0) 39} while (0)
42#else 40#else
43#define __DO_IRQ_SMTC_HOOK() do { } while (0) 41#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0)
44#endif 42#endif
45 43
46#ifdef CONFIG_PREEMPT
47
48/* 44/*
49 * do_IRQ handles all normal device IRQ's (the special 45 * do_IRQ handles all normal device IRQ's (the special
50 * SMP cross-CPU interrupts have their own specific 46 * SMP cross-CPU interrupts have their own specific
@@ -56,13 +52,11 @@ do { \
56#define do_IRQ(irq) \ 52#define do_IRQ(irq) \
57do { \ 53do { \
58 irq_enter(); \ 54 irq_enter(); \
59 __DO_IRQ_SMTC_HOOK(); \ 55 __DO_IRQ_SMTC_HOOK(irq); \
60 __do_IRQ((irq)); \ 56 generic_handle_irq(irq); \
61 irq_exit(); \ 57 irq_exit(); \
62} while (0) 58} while (0)
63 59
64#endif
65
66extern void arch_init_irq(void); 60extern void arch_init_irq(void);
67extern void spurious_interrupt(void); 61extern void spurious_interrupt(void);
68 62
diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h
new file mode 100644
index 000000000000..b25267ebcb09
--- /dev/null
+++ b/include/asm-mips/kexec.h
@@ -0,0 +1,32 @@
1/*
2 * kexec.h for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 14:59:34 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#ifndef _MIPS_KEXEC
10# define _MIPS_KEXEC
11
12/* Maximum physical address we can use pages from */
13#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
14/* Maximum address we can reach in physical address mode */
15#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
16 /* Maximum address we can use for the control code buffer */
17#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
18
19#define KEXEC_CONTROL_CODE_SIZE 4096
20
21/* The native architecture */
22#define KEXEC_ARCH KEXEC_ARCH_MIPS
23
24#define MAX_NOTE_BYTES 1024
25
26static inline void crash_setup_regs(struct pt_regs *newregs,
27 struct pt_regs *oldregs)
28{
29 /* Dummy implementation for now */
30}
31
32#endif /* !_MIPS_KEXEC */
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index b3c5ecbec03c..00b0fc68d5cb 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -67,34 +67,9 @@
67#define COBALT_BRD_ID_QUBE2 0x5 67#define COBALT_BRD_ID_QUBE2 0x5
68#define COBALT_BRD_ID_RAQ2 0x6 68#define COBALT_BRD_ID_RAQ2 0x6
69 69
70/*
71 * Galileo chipset access macros for the Cobalt. The base address for
72 * the GT64111 chip is 0x14000000
73 *
74 * Most of this really should go into a separate GT64111 header file.
75 */
76#define GT64111_IO_BASE 0x10000000UL
77#define GT64111_IO_END 0x11ffffffUL
78#define GT64111_MEM_BASE 0x12000000UL
79#define GT64111_MEM_END 0x13ffffffUL
80#define GT64111_BASE 0x14000000UL
81#define GALILEO_REG(ofs) CKSEG1ADDR(GT64111_BASE + (unsigned long)(ofs))
82
83#define GALILEO_INL(port) (*(volatile unsigned int *) GALILEO_REG(port))
84#define GALILEO_OUTL(val, port) \
85do { \
86 *(volatile unsigned int *) GALILEO_REG(port) = (val); \
87} while (0)
88
89#define GALILEO_INTR_T0EXP (1 << 8)
90#define GALILEO_INTR_RETRY_CTR (1 << 20)
91
92#define GALILEO_ENTC0 0x01
93#define GALILEO_SELTC0 0x02
94
95#define PCI_CFG_SET(devfn,where) \ 70#define PCI_CFG_SET(devfn,where) \
96 GALILEO_OUTL((0x80000000 | (PCI_SLOT (devfn) << 11) | \ 71 GT_WRITE(GT_PCI0_CFGADDR_OFS, (0x80000000 | (PCI_SLOT (devfn) << 11) | \
97 (PCI_FUNC (devfn) << 8) | (where)), GT_PCI0_CFGADDR_OFS) 72 (PCI_FUNC (devfn) << 8) | (where)))
98 73
99#define COBALT_LED_PORT (*(volatile unsigned char *) CKSEG1ADDR(0x1c000000)) 74#define COBALT_LED_PORT (*(volatile unsigned char *) CKSEG1ADDR(0x1c000000))
100# define COBALT_LED_BAR_LEFT (1 << 0) /* Qube */ 75# define COBALT_LED_BAR_LEFT (1 << 0) /* Qube */
diff --git a/include/asm-mips/mach-cobalt/mach-gt64120.h b/include/asm-mips/mach-cobalt/mach-gt64120.h
index 587fc4378f44..ae9c5523c7ef 100644
--- a/include/asm-mips/mach-cobalt/mach-gt64120.h
+++ b/include/asm-mips/mach-cobalt/mach-gt64120.h
@@ -1 +1,27 @@
1/* there's something here ... in the dark */ 1/*
2 * Copyright (C) 2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef _COBALT_MACH_GT64120_H
19#define _COBALT_MACH_GT64120_H
20
21/*
22 * Cobalt uses GT64111. GT64111 is almost the same as GT64120.
23 */
24
25#define GT64120_BASE CKSEG1ADDR(GT_DEF_BASE)
26
27#endif /* _COBALT_MACH_GT64120_H */
diff --git a/include/asm-mips/mach-ip27/irq.h b/include/asm-mips/mach-ip27/irq.h
index 806213ce31b6..25f0c3f39adf 100644
--- a/include/asm-mips/mach-ip27/irq.h
+++ b/include/asm-mips/mach-ip27/irq.h
@@ -10,8 +10,6 @@
10#ifndef __ASM_MACH_IP27_IRQ_H 10#ifndef __ASM_MACH_IP27_IRQ_H
11#define __ASM_MACH_IP27_IRQ_H 11#define __ASM_MACH_IP27_IRQ_H
12 12
13#include <asm/sn/arch.h>
14
15/* 13/*
16 * A hardwired interrupt number is completly stupid for this system - a 14 * A hardwired interrupt number is completly stupid for this system - a
17 * large configuration might have thousands if not tenthousands of 15 * large configuration might have thousands if not tenthousands of
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index a13b715fd9ca..44790fdc5d00 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_MACH_TOPOLOGY_H 1#ifndef _ASM_MACH_TOPOLOGY_H
2#define _ASM_MACH_TOPOLOGY_H 1 2#define _ASM_MACH_TOPOLOGY_H 1
3 3
4#include <asm/sn/arch.h>
5#include <asm/sn/hub.h> 4#include <asm/sn/hub.h>
6#include <asm/mmzone.h> 5#include <asm/mmzone.h>
7 6
diff --git a/include/asm-mips/mach-rm200/cpu-feature-overrides.h b/include/asm-mips/mach-rm/cpu-feature-overrides.h
index 11410ae10d36..11410ae10d36 100644
--- a/include/asm-mips/mach-rm200/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-rm/cpu-feature-overrides.h
diff --git a/include/asm-mips/mach-rm200/mc146818rtc.h b/include/asm-mips/mach-rm/mc146818rtc.h
index d37ae68dc6a3..d37ae68dc6a3 100644
--- a/include/asm-mips/mach-rm200/mc146818rtc.h
+++ b/include/asm-mips/mach-rm/mc146818rtc.h
diff --git a/include/asm-mips/mach-rm200/timex.h b/include/asm-mips/mach-rm/timex.h
index 11ff6cb0f214..11ff6cb0f214 100644
--- a/include/asm-mips/mach-rm200/timex.h
+++ b/include/asm-mips/mach-rm/timex.h
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index 1f318d707998..9985cb7c16e7 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -545,62 +545,6 @@
545#define MIPS_FPIR_L (_ULCAST_(1) << 21) 545#define MIPS_FPIR_L (_ULCAST_(1) << 21)
546#define MIPS_FPIR_F64 (_ULCAST_(1) << 22) 546#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
547 547
548/*
549 * R10000 performance counter definitions.
550 *
551 * FIXME: The R10000 performance counter opens a nice way to implement CPU
552 * time accounting with a precission of one cycle. I don't have
553 * R10000 silicon but just a manual, so ...
554 */
555
556/*
557 * Events counted by counter #0
558 */
559#define CE0_CYCLES 0
560#define CE0_INSN_ISSUED 1
561#define CE0_LPSC_ISSUED 2
562#define CE0_S_ISSUED 3
563#define CE0_SC_ISSUED 4
564#define CE0_SC_FAILED 5
565#define CE0_BRANCH_DECODED 6
566#define CE0_QW_WB_SECONDARY 7
567#define CE0_CORRECTED_ECC_ERRORS 8
568#define CE0_ICACHE_MISSES 9
569#define CE0_SCACHE_I_MISSES 10
570#define CE0_SCACHE_I_WAY_MISSPREDICTED 11
571#define CE0_EXT_INTERVENTIONS_REQ 12
572#define CE0_EXT_INVALIDATE_REQ 13
573#define CE0_VIRTUAL_COHERENCY_COND 14
574#define CE0_INSN_GRADUATED 15
575
576/*
577 * Events counted by counter #1
578 */
579#define CE1_CYCLES 0
580#define CE1_INSN_GRADUATED 1
581#define CE1_LPSC_GRADUATED 2
582#define CE1_S_GRADUATED 3
583#define CE1_SC_GRADUATED 4
584#define CE1_FP_INSN_GRADUATED 5
585#define CE1_QW_WB_PRIMARY 6
586#define CE1_TLB_REFILL 7
587#define CE1_BRANCH_MISSPREDICTED 8
588#define CE1_DCACHE_MISS 9
589#define CE1_SCACHE_D_MISSES 10
590#define CE1_SCACHE_D_WAY_MISSPREDICTED 11
591#define CE1_EXT_INTERVENTION_HITS 12
592#define CE1_EXT_INVALIDATE_REQ 13
593#define CE1_SP_HINT_TO_CEXCL_SC_BLOCKS 14
594#define CE1_SP_HINT_TO_SHARED_SC_BLOCKS 15
595
596/*
597 * These flags define in which privilege mode the counters count events
598 */
599#define CEB_USER 8 /* Count events in user mode, EXL = ERL = 0 */
600#define CEB_SUPERVISOR 4 /* Count events in supvervisor mode EXL = ERL = 0 */
601#define CEB_KERNEL 2 /* Count events in kernel mode EXL = ERL = 0 */
602#define CEB_EXL 1 /* Count events with EXL = 1, ERL = 0 */
603
604#ifndef __ASSEMBLY__ 548#ifndef __ASSEMBLY__
605 549
606/* 550/*
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index 85b258ee7090..2f9e1a9ec51f 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -34,7 +34,8 @@
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36 36
37#include <asm/cpu-features.h> 37#include <linux/pfn.h>
38#include <asm/io.h>
38 39
39extern void clear_page(void * page); 40extern void clear_page(void * page);
40extern void copy_page(void * to, void * from); 41extern void copy_page(void * to, void * from);
@@ -59,16 +60,13 @@ static inline void clear_user_page(void *addr, unsigned long vaddr,
59 flush_data_cache_page((unsigned long)addr); 60 flush_data_cache_page((unsigned long)addr);
60} 61}
61 62
62static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 63extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
63 struct page *to) 64 struct page *to);
64{ 65struct vm_area_struct;
65 extern void (*flush_data_cache_page)(unsigned long addr); 66extern void copy_user_highpage(struct page *to, struct page *from,
67 unsigned long vaddr, struct vm_area_struct *vma);
66 68
67 copy_page(vto, vfrom); 69#define __HAVE_ARCH_COPY_USER_HIGHPAGE
68 if (!cpu_has_ic_fills_f_dc ||
69 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
70 flush_data_cache_page((unsigned long)vto);
71}
72 70
73/* 71/*
74 * These are used to make use of C type-checking.. 72 * These are used to make use of C type-checking..
@@ -134,8 +132,14 @@ typedef struct { unsigned long pgprot; } pgprot_t;
134/* to align the pointer to the (next) page boundary */ 132/* to align the pointer to the (next) page boundary */
135#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) 133#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
136 134
137#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 135#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64)
138#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 136#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0)
137#else
138#define __pa_page_offset(x) PAGE_OFFSET
139#endif
140#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x))
141#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
142#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
139 143
140#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 144#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
141 145
@@ -160,8 +164,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
160 164
161#endif 165#endif
162 166
163#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 167#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
164#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 168#define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr)))
165 169
166#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 170#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
167 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 171 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/include/asm-mips/pci.h b/include/asm-mips/pci.h
index c4d68bebdca6..7f0f120ca07c 100644
--- a/include/asm-mips/pci.h
+++ b/include/asm-mips/pci.h
@@ -187,4 +187,10 @@ static inline void pcibios_add_platform_entries(struct pci_dev *dev)
187/* Do platform specific device initialization at pci_enable_device() time */ 187/* Do platform specific device initialization at pci_enable_device() time */
188extern int pcibios_plat_dev_init(struct pci_dev *dev); 188extern int pcibios_plat_dev_init(struct pci_dev *dev);
189 189
190/* Chances are this interrupt is wired PC-style ... */
191static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
192{
193 return channel ? 15 : 14;
194}
195
190#endif /* _ASM_PCI_H */ 196#endif /* _ASM_PCI_H */
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index d20f2e9b28be..2fbd47eba32d 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -156,9 +156,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
156#define __pte_offset(address) \ 156#define __pte_offset(address) \
157 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 157 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
158#define pte_offset(dir, address) \ 158#define pte_offset(dir, address) \
159 ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) 159 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
160#define pte_offset_kernel(dir, address) \ 160#define pte_offset_kernel(dir, address) \
161 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 161 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
162 162
163#define pte_offset_map(dir, address) \ 163#define pte_offset_map(dir, address) \
164 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 164 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index 7e7320300aa3..a5b18710b6a4 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -14,6 +14,7 @@
14#include <asm/addrspace.h> 14#include <asm/addrspace.h>
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm/cachectl.h> 16#include <asm/cachectl.h>
17#include <asm/fixmap.h>
17 18
18#include <asm-generic/pgtable-nopud.h> 19#include <asm-generic/pgtable-nopud.h>
19 20
@@ -103,6 +104,13 @@
103#define VMALLOC_START MAP_BASE 104#define VMALLOC_START MAP_BASE
104#define VMALLOC_END \ 105#define VMALLOC_END \
105 (VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE) 106 (VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE)
107#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) && \
108 VMALLOC_START != CKSSEG
109/* Load modules into 32bit-compatible segment. */
110#define MODULE_START CKSSEG
111#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
112extern pgd_t module_pg_dir[PTRS_PER_PGD];
113#endif
106 114
107#define pte_ERROR(e) \ 115#define pte_ERROR(e) \
108 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 116 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -174,7 +182,12 @@ static inline void pud_clear(pud_t *pudp)
174#define __pmd_offset(address) pmd_index(address) 182#define __pmd_offset(address) pmd_index(address)
175 183
176/* to find an entry in a kernel page-table-directory */ 184/* to find an entry in a kernel page-table-directory */
185#ifdef MODULE_START
186#define pgd_offset_k(address) \
187 ((address) >= MODULE_START ? module_pg_dir : pgd_offset(&init_mm, 0UL))
188#else
177#define pgd_offset_k(address) pgd_offset(&init_mm, 0UL) 189#define pgd_offset_k(address) pgd_offset(&init_mm, 0UL)
190#endif
178 191
179#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 192#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
180#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 193#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
@@ -199,9 +212,9 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
199#define __pte_offset(address) \ 212#define __pte_offset(address) \
200 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 213 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
201#define pte_offset(dir, address) \ 214#define pte_offset(dir, address) \
202 ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) 215 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
203#define pte_offset_kernel(dir, address) \ 216#define pte_offset_kernel(dir, address) \
204 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 217 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
205#define pte_offset_map(dir, address) \ 218#define pte_offset_map(dir, address) \
206 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 219 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
207#define pte_offset_map_nested(dir, address) \ 220#define pte_offset_map_nested(dir, address) \
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 1ca4d1e185c7..f2e1325fec6c 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -67,7 +67,7 @@ extern unsigned long empty_zero_page;
67extern unsigned long zero_page_mask; 67extern unsigned long zero_page_mask;
68 68
69#define ZERO_PAGE(vaddr) \ 69#define ZERO_PAGE(vaddr) \
70 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) 70 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
71 71
72#define __HAVE_ARCH_MOVE_PTE 72#define __HAVE_ARCH_MOVE_PTE
73#define move_pte(pte, prot, old_addr, new_addr) \ 73#define move_pte(pte, prot, old_addr, new_addr) \
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h
index 5f3a9075cd28..8a1f2b6f04ac 100644
--- a/include/asm-mips/ptrace.h
+++ b/include/asm-mips/ptrace.h
@@ -80,10 +80,16 @@ struct pt_regs {
80#define instruction_pointer(regs) ((regs)->cp0_epc) 80#define instruction_pointer(regs) ((regs)->cp0_epc)
81#define profile_pc(regs) instruction_pointer(regs) 81#define profile_pc(regs) instruction_pointer(regs)
82 82
83extern void show_regs(struct pt_regs *);
84
85extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); 83extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit);
86 84
85extern NORET_TYPE void die(const char *, struct pt_regs *);
86
87static inline void die_if_kernel(const char *str, struct pt_regs *regs)
88{
89 if (unlikely(!user_mode(regs)))
90 die(str, regs);
91}
92
87#endif 93#endif
88 94
89#endif /* _ASM_PTRACE_H */ 95#endif /* _ASM_PTRACE_H */
diff --git a/include/asm-mips/setup.h b/include/asm-mips/setup.h
index 737fa4a6912e..70009a902639 100644
--- a/include/asm-mips/setup.h
+++ b/include/asm-mips/setup.h
@@ -1,8 +1,6 @@
1#ifdef __KERNEL__
2#ifndef _MIPS_SETUP_H 1#ifndef _MIPS_SETUP_H
3#define _MIPS_SETUP_H 2#define _MIPS_SETUP_H
4 3
5#define COMMAND_LINE_SIZE 256 4#define COMMAND_LINE_SIZE 256
6 5
7#endif /* __SETUP_H */ 6#endif /* __SETUP_H */
8#endif /* __KERNEL__ */
diff --git a/include/asm-mips/sn/arch.h b/include/asm-mips/sn/arch.h
index 51174af6ac52..da523de628be 100644
--- a/include/asm-mips/sn/arch.h
+++ b/include/asm-mips/sn/arch.h
@@ -18,7 +18,6 @@
18#endif 18#endif
19 19
20typedef u64 hubreg_t; 20typedef u64 hubreg_t;
21typedef u64 nic_t;
22 21
23#define cputonasid(cpu) (cpu_data[(cpu)].p_nasid) 22#define cputonasid(cpu) (cpu_data[(cpu)].p_nasid)
24#define cputoslice(cpu) (cpu_data[(cpu)].p_slice) 23#define cputoslice(cpu) (cpu_data[(cpu)].p_slice)
diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h
index b63cd0655b3d..82aeb9e322db 100644
--- a/include/asm-mips/sn/klconfig.h
+++ b/include/asm-mips/sn/klconfig.h
@@ -61,6 +61,8 @@
61#endif /* CONFIG_SGI_IP35 */ 61#endif /* CONFIG_SGI_IP35 */
62#endif /* CONFIG_SGI_IP27 || CONFIG_SGI_IP35 */ 62#endif /* CONFIG_SGI_IP27 || CONFIG_SGI_IP35 */
63 63
64typedef u64 nic_t;
65
64#define KLCFGINFO_MAGIC 0xbeedbabe 66#define KLCFGINFO_MAGIC 0xbeedbabe
65 67
66typedef s32 klconf_off_t; 68typedef s32 klconf_off_t;
@@ -176,7 +178,7 @@ typedef struct kl_config_hdr {
176/* --- New Macros for the changed kl_config_hdr_t structure --- */ 178/* --- New Macros for the changed kl_config_hdr_t structure --- */
177 179
178#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\ 180#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\
179 (unsigned long)_k + (_k->ch_malloc_hdr_off))) 181 ((unsigned long)_k + (_k->ch_malloc_hdr_off)))
180 182
181#define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n)) 183#define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
182 184
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index c8d5587467bb..fc3217fc1118 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -3,12 +3,13 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999, 2000 by Ralf Baechle 6 * Copyright (C) 1999, 2000, 06 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */ 8 */
9#ifndef _ASM_SPINLOCK_H 9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H 10#define _ASM_SPINLOCK_H
11 11
12#include <asm/barrier.h>
12#include <asm/war.h> 13#include <asm/war.h>
13 14
14/* 15/*
@@ -40,7 +41,6 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
40 " sc %1, %0 \n" 41 " sc %1, %0 \n"
41 " beqzl %1, 1b \n" 42 " beqzl %1, 1b \n"
42 " nop \n" 43 " nop \n"
43 " sync \n"
44 " .set reorder \n" 44 " .set reorder \n"
45 : "=m" (lock->lock), "=&r" (tmp) 45 : "=m" (lock->lock), "=&r" (tmp)
46 : "m" (lock->lock) 46 : "m" (lock->lock)
@@ -53,19 +53,22 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
53 " li %1, 1 \n" 53 " li %1, 1 \n"
54 " sc %1, %0 \n" 54 " sc %1, %0 \n"
55 " beqz %1, 1b \n" 55 " beqz %1, 1b \n"
56 " sync \n" 56 " nop \n"
57 " .set reorder \n" 57 " .set reorder \n"
58 : "=m" (lock->lock), "=&r" (tmp) 58 : "=m" (lock->lock), "=&r" (tmp)
59 : "m" (lock->lock) 59 : "m" (lock->lock)
60 : "memory"); 60 : "memory");
61 } 61 }
62
63 smp_mb();
62} 64}
63 65
64static inline void __raw_spin_unlock(raw_spinlock_t *lock) 66static inline void __raw_spin_unlock(raw_spinlock_t *lock)
65{ 67{
68 smp_mb();
69
66 __asm__ __volatile__( 70 __asm__ __volatile__(
67 " .set noreorder # __raw_spin_unlock \n" 71 " .set noreorder # __raw_spin_unlock \n"
68 " sync \n"
69 " sw $0, %0 \n" 72 " sw $0, %0 \n"
70 " .set\treorder \n" 73 " .set\treorder \n"
71 : "=m" (lock->lock) 74 : "=m" (lock->lock)
@@ -86,7 +89,6 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
86 " beqzl %2, 1b \n" 89 " beqzl %2, 1b \n"
87 " nop \n" 90 " nop \n"
88 " andi %2, %0, 1 \n" 91 " andi %2, %0, 1 \n"
89 " sync \n"
90 " .set reorder" 92 " .set reorder"
91 : "=&r" (temp), "=m" (lock->lock), "=&r" (res) 93 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
92 : "m" (lock->lock) 94 : "m" (lock->lock)
@@ -99,13 +101,14 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
99 " sc %2, %1 \n" 101 " sc %2, %1 \n"
100 " beqz %2, 1b \n" 102 " beqz %2, 1b \n"
101 " andi %2, %0, 1 \n" 103 " andi %2, %0, 1 \n"
102 " sync \n"
103 " .set reorder" 104 " .set reorder"
104 : "=&r" (temp), "=m" (lock->lock), "=&r" (res) 105 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
105 : "m" (lock->lock) 106 : "m" (lock->lock)
106 : "memory"); 107 : "memory");
107 } 108 }
108 109
110 smp_mb();
111
109 return res == 0; 112 return res == 0;
110} 113}
111 114
@@ -143,7 +146,6 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
143 " sc %1, %0 \n" 146 " sc %1, %0 \n"
144 " beqzl %1, 1b \n" 147 " beqzl %1, 1b \n"
145 " nop \n" 148 " nop \n"
146 " sync \n"
147 " .set reorder \n" 149 " .set reorder \n"
148 : "=m" (rw->lock), "=&r" (tmp) 150 : "=m" (rw->lock), "=&r" (tmp)
149 : "m" (rw->lock) 151 : "m" (rw->lock)
@@ -156,12 +158,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
156 " addu %1, 1 \n" 158 " addu %1, 1 \n"
157 " sc %1, %0 \n" 159 " sc %1, %0 \n"
158 " beqz %1, 1b \n" 160 " beqz %1, 1b \n"
159 " sync \n" 161 " nop \n"
160 " .set reorder \n" 162 " .set reorder \n"
161 : "=m" (rw->lock), "=&r" (tmp) 163 : "=m" (rw->lock), "=&r" (tmp)
162 : "m" (rw->lock) 164 : "m" (rw->lock)
163 : "memory"); 165 : "memory");
164 } 166 }
167
168 smp_mb();
165} 169}
166 170
167/* Note the use of sub, not subu which will make the kernel die with an 171/* Note the use of sub, not subu which will make the kernel die with an
@@ -171,13 +175,14 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
171{ 175{
172 unsigned int tmp; 176 unsigned int tmp;
173 177
178 smp_mb();
179
174 if (R10000_LLSC_WAR) { 180 if (R10000_LLSC_WAR) {
175 __asm__ __volatile__( 181 __asm__ __volatile__(
176 "1: ll %1, %2 # __raw_read_unlock \n" 182 "1: ll %1, %2 # __raw_read_unlock \n"
177 " sub %1, 1 \n" 183 " sub %1, 1 \n"
178 " sc %1, %0 \n" 184 " sc %1, %0 \n"
179 " beqzl %1, 1b \n" 185 " beqzl %1, 1b \n"
180 " sync \n"
181 : "=m" (rw->lock), "=&r" (tmp) 186 : "=m" (rw->lock), "=&r" (tmp)
182 : "m" (rw->lock) 187 : "m" (rw->lock)
183 : "memory"); 188 : "memory");
@@ -188,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
188 " sub %1, 1 \n" 193 " sub %1, 1 \n"
189 " sc %1, %0 \n" 194 " sc %1, %0 \n"
190 " beqz %1, 1b \n" 195 " beqz %1, 1b \n"
191 " sync \n" 196 " nop \n"
192 " .set reorder \n" 197 " .set reorder \n"
193 : "=m" (rw->lock), "=&r" (tmp) 198 : "=m" (rw->lock), "=&r" (tmp)
194 : "m" (rw->lock) 199 : "m" (rw->lock)
@@ -208,7 +213,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
208 " lui %1, 0x8000 \n" 213 " lui %1, 0x8000 \n"
209 " sc %1, %0 \n" 214 " sc %1, %0 \n"
210 " beqzl %1, 1b \n" 215 " beqzl %1, 1b \n"
211 " sync \n" 216 " nop \n"
212 " .set reorder \n" 217 " .set reorder \n"
213 : "=m" (rw->lock), "=&r" (tmp) 218 : "=m" (rw->lock), "=&r" (tmp)
214 : "m" (rw->lock) 219 : "m" (rw->lock)
@@ -221,18 +226,22 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
221 " lui %1, 0x8000 \n" 226 " lui %1, 0x8000 \n"
222 " sc %1, %0 \n" 227 " sc %1, %0 \n"
223 " beqz %1, 1b \n" 228 " beqz %1, 1b \n"
224 " sync \n" 229 " nop \n"
225 " .set reorder \n" 230 " .set reorder \n"
226 : "=m" (rw->lock), "=&r" (tmp) 231 : "=m" (rw->lock), "=&r" (tmp)
227 : "m" (rw->lock) 232 : "m" (rw->lock)
228 : "memory"); 233 : "memory");
229 } 234 }
235
236 smp_mb();
230} 237}
231 238
232static inline void __raw_write_unlock(raw_rwlock_t *rw) 239static inline void __raw_write_unlock(raw_rwlock_t *rw)
233{ 240{
241 smp_mb();
242
234 __asm__ __volatile__( 243 __asm__ __volatile__(
235 " sync # __raw_write_unlock \n" 244 " # __raw_write_unlock \n"
236 " sw $0, %0 \n" 245 " sw $0, %0 \n"
237 : "=m" (rw->lock) 246 : "=m" (rw->lock)
238 : "m" (rw->lock) 247 : "m" (rw->lock)
@@ -252,11 +261,10 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
252 " bnez %1, 2f \n" 261 " bnez %1, 2f \n"
253 " addu %1, 1 \n" 262 " addu %1, 1 \n"
254 " sc %1, %0 \n" 263 " sc %1, %0 \n"
255 " beqzl %1, 1b \n"
256 " .set reorder \n" 264 " .set reorder \n"
257#ifdef CONFIG_SMP 265 " beqzl %1, 1b \n"
258 " sync \n" 266 " nop \n"
259#endif 267 __WEAK_ORDERING_MB
260 " li %2, 1 \n" 268 " li %2, 1 \n"
261 "2: \n" 269 "2: \n"
262 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 270 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -271,10 +279,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
271 " addu %1, 1 \n" 279 " addu %1, 1 \n"
272 " sc %1, %0 \n" 280 " sc %1, %0 \n"
273 " beqz %1, 1b \n" 281 " beqz %1, 1b \n"
282 " nop \n"
274 " .set reorder \n" 283 " .set reorder \n"
275#ifdef CONFIG_SMP 284 __WEAK_ORDERING_MB
276 " sync \n"
277#endif
278 " li %2, 1 \n" 285 " li %2, 1 \n"
279 "2: \n" 286 "2: \n"
280 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 287 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -299,7 +306,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
299 " lui %1, 0x8000 \n" 306 " lui %1, 0x8000 \n"
300 " sc %1, %0 \n" 307 " sc %1, %0 \n"
301 " beqzl %1, 1b \n" 308 " beqzl %1, 1b \n"
302 " sync \n" 309 " nop \n"
310 __WEAK_ORDERING_MB
303 " li %2, 1 \n" 311 " li %2, 1 \n"
304 " .set reorder \n" 312 " .set reorder \n"
305 "2: \n" 313 "2: \n"
@@ -315,7 +323,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
315 " lui %1, 0x8000 \n" 323 " lui %1, 0x8000 \n"
316 " sc %1, %0 \n" 324 " sc %1, %0 \n"
317 " beqz %1, 1b \n" 325 " beqz %1, 1b \n"
318 " sync \n" 326 " nop \n"
327 __WEAK_ORDERING_MB
319 " li %2, 1 \n" 328 " li %2, 1 \n"
320 " .set reorder \n" 329 " .set reorder \n"
321 "2: \n" 330 "2: \n"
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 3056feed5a36..5e1289c85ed9 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine 7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics 8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com 9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
@@ -16,132 +16,11 @@
16#include <linux/irqflags.h> 16#include <linux/irqflags.h>
17 17
18#include <asm/addrspace.h> 18#include <asm/addrspace.h>
19#include <asm/barrier.h>
19#include <asm/cpu-features.h> 20#include <asm/cpu-features.h>
20#include <asm/dsp.h> 21#include <asm/dsp.h>
21#include <asm/ptrace.h>
22#include <asm/war.h> 22#include <asm/war.h>
23 23
24/*
25 * read_barrier_depends - Flush all pending reads that subsequents reads
26 * depend on.
27 *
28 * No data-dependent reads from memory-like regions are ever reordered
29 * over this barrier. All reads preceding this primitive are guaranteed
30 * to access memory (but not necessarily other CPUs' caches) before any
31 * reads following this primitive that depend on the data return by
32 * any of the preceding reads. This primitive is much lighter weight than
33 * rmb() on most CPUs, and is never heavier weight than is
34 * rmb().
35 *
36 * These ordering constraints are respected by both the local CPU
37 * and the compiler.
38 *
39 * Ordering is not guaranteed by anything other than these primitives,
40 * not even by data dependencies. See the documentation for
41 * memory_barrier() for examples and URLs to more information.
42 *
43 * For example, the following code would force ordering (the initial
44 * value of "a" is zero, "b" is one, and "p" is "&a"):
45 *
46 * <programlisting>
47 * CPU 0 CPU 1
48 *
49 * b = 2;
50 * memory_barrier();
51 * p = &b; q = p;
52 * read_barrier_depends();
53 * d = *q;
54 * </programlisting>
55 *
56 * because the read of "*q" depends on the read of "p" and these
57 * two reads are separated by a read_barrier_depends(). However,
58 * the following code, with the same initial values for "a" and "b":
59 *
60 * <programlisting>
61 * CPU 0 CPU 1
62 *
63 * a = 2;
64 * memory_barrier();
65 * b = 3; y = b;
66 * read_barrier_depends();
67 * x = a;
68 * </programlisting>
69 *
70 * does not enforce ordering, since there is no data dependency between
71 * the read of "a" and the read of "b". Therefore, on some CPUs, such
72 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
73 * in cases like this where there are no data dependencies.
74 */
75
76#define read_barrier_depends() do { } while(0)
77
78#ifdef CONFIG_CPU_HAS_SYNC
79#define __sync() \
80 __asm__ __volatile__( \
81 ".set push\n\t" \
82 ".set noreorder\n\t" \
83 ".set mips2\n\t" \
84 "sync\n\t" \
85 ".set pop" \
86 : /* no output */ \
87 : /* no input */ \
88 : "memory")
89#else
90#define __sync() do { } while(0)
91#endif
92
93#define __fast_iob() \
94 __asm__ __volatile__( \
95 ".set push\n\t" \
96 ".set noreorder\n\t" \
97 "lw $0,%0\n\t" \
98 "nop\n\t" \
99 ".set pop" \
100 : /* no output */ \
101 : "m" (*(int *)CKSEG1) \
102 : "memory")
103
104#define fast_wmb() __sync()
105#define fast_rmb() __sync()
106#define fast_mb() __sync()
107#define fast_iob() \
108 do { \
109 __sync(); \
110 __fast_iob(); \
111 } while (0)
112
113#ifdef CONFIG_CPU_HAS_WB
114
115#include <asm/wbflush.h>
116
117#define wmb() fast_wmb()
118#define rmb() fast_rmb()
119#define mb() wbflush()
120#define iob() wbflush()
121
122#else /* !CONFIG_CPU_HAS_WB */
123
124#define wmb() fast_wmb()
125#define rmb() fast_rmb()
126#define mb() fast_mb()
127#define iob() fast_iob()
128
129#endif /* !CONFIG_CPU_HAS_WB */
130
131#ifdef CONFIG_SMP
132#define smp_mb() mb()
133#define smp_rmb() rmb()
134#define smp_wmb() wmb()
135#define smp_read_barrier_depends() read_barrier_depends()
136#else
137#define smp_mb() barrier()
138#define smp_rmb() barrier()
139#define smp_wmb() barrier()
140#define smp_read_barrier_depends() do { } while(0)
141#endif
142
143#define set_mb(var, value) \
144do { var = value; mb(); } while (0)
145 24
146/* 25/*
147 * switch_to(n) should switch tasks to task nr n, first 26 * switch_to(n) should switch tasks to task nr n, first
@@ -217,9 +96,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
217 " .set mips3 \n" 96 " .set mips3 \n"
218 " sc %2, %1 \n" 97 " sc %2, %1 \n"
219 " beqzl %2, 1b \n" 98 " beqzl %2, 1b \n"
220#ifdef CONFIG_SMP
221 " sync \n"
222#endif
223 " .set mips0 \n" 99 " .set mips0 \n"
224 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 100 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
225 : "R" (*m), "Jr" (val) 101 : "R" (*m), "Jr" (val)
@@ -235,9 +111,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
235 " .set mips3 \n" 111 " .set mips3 \n"
236 " sc %2, %1 \n" 112 " sc %2, %1 \n"
237 " beqz %2, 1b \n" 113 " beqz %2, 1b \n"
238#ifdef CONFIG_SMP
239 " sync \n"
240#endif
241 " .set mips0 \n" 114 " .set mips0 \n"
242 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 115 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
243 : "R" (*m), "Jr" (val) 116 : "R" (*m), "Jr" (val)
@@ -251,6 +124,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
251 local_irq_restore(flags); /* implies memory barrier */ 124 local_irq_restore(flags); /* implies memory barrier */
252 } 125 }
253 126
127 smp_mb();
128
254 return retval; 129 return retval;
255} 130}
256 131
@@ -268,9 +143,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
268 " move %2, %z4 \n" 143 " move %2, %z4 \n"
269 " scd %2, %1 \n" 144 " scd %2, %1 \n"
270 " beqzl %2, 1b \n" 145 " beqzl %2, 1b \n"
271#ifdef CONFIG_SMP
272 " sync \n"
273#endif
274 " .set mips0 \n" 146 " .set mips0 \n"
275 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 147 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
276 : "R" (*m), "Jr" (val) 148 : "R" (*m), "Jr" (val)
@@ -284,9 +156,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
284 " move %2, %z4 \n" 156 " move %2, %z4 \n"
285 " scd %2, %1 \n" 157 " scd %2, %1 \n"
286 " beqz %2, 1b \n" 158 " beqz %2, 1b \n"
287#ifdef CONFIG_SMP
288 " sync \n"
289#endif
290 " .set mips0 \n" 159 " .set mips0 \n"
291 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 160 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
292 : "R" (*m), "Jr" (val) 161 : "R" (*m), "Jr" (val)
@@ -300,6 +169,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
300 local_irq_restore(flags); /* implies memory barrier */ 169 local_irq_restore(flags); /* implies memory barrier */
301 } 170 }
302 171
172 smp_mb();
173
303 return retval; 174 return retval;
304} 175}
305#else 176#else
@@ -345,9 +216,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
345 " .set mips3 \n" 216 " .set mips3 \n"
346 " sc $1, %1 \n" 217 " sc $1, %1 \n"
347 " beqzl $1, 1b \n" 218 " beqzl $1, 1b \n"
348#ifdef CONFIG_SMP
349 " sync \n"
350#endif
351 "2: \n" 219 "2: \n"
352 " .set pop \n" 220 " .set pop \n"
353 : "=&r" (retval), "=R" (*m) 221 : "=&r" (retval), "=R" (*m)
@@ -365,9 +233,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
365 " .set mips3 \n" 233 " .set mips3 \n"
366 " sc $1, %1 \n" 234 " sc $1, %1 \n"
367 " beqz $1, 1b \n" 235 " beqz $1, 1b \n"
368#ifdef CONFIG_SMP
369 " sync \n"
370#endif
371 "2: \n" 236 "2: \n"
372 " .set pop \n" 237 " .set pop \n"
373 : "=&r" (retval), "=R" (*m) 238 : "=&r" (retval), "=R" (*m)
@@ -383,6 +248,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
383 local_irq_restore(flags); /* implies memory barrier */ 248 local_irq_restore(flags); /* implies memory barrier */
384 } 249 }
385 250
251 smp_mb();
252
386 return retval; 253 return retval;
387} 254}
388 255
@@ -402,9 +269,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
402 " move $1, %z4 \n" 269 " move $1, %z4 \n"
403 " scd $1, %1 \n" 270 " scd $1, %1 \n"
404 " beqzl $1, 1b \n" 271 " beqzl $1, 1b \n"
405#ifdef CONFIG_SMP
406 " sync \n"
407#endif
408 "2: \n" 272 "2: \n"
409 " .set pop \n" 273 " .set pop \n"
410 : "=&r" (retval), "=R" (*m) 274 : "=&r" (retval), "=R" (*m)
@@ -420,9 +284,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
420 " move $1, %z4 \n" 284 " move $1, %z4 \n"
421 " scd $1, %1 \n" 285 " scd $1, %1 \n"
422 " beqz $1, 1b \n" 286 " beqz $1, 1b \n"
423#ifdef CONFIG_SMP
424 " sync \n"
425#endif
426 "2: \n" 287 "2: \n"
427 " .set pop \n" 288 " .set pop \n"
428 : "=&r" (retval), "=R" (*m) 289 : "=&r" (retval), "=R" (*m)
@@ -438,6 +299,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
438 local_irq_restore(flags); /* implies memory barrier */ 299 local_irq_restore(flags); /* implies memory barrier */
439 } 300 }
440 301
302 smp_mb();
303
441 return retval; 304 return retval;
442} 305}
443#else 306#else
@@ -472,14 +335,6 @@ extern void *set_except_vector(int n, void *addr);
472extern unsigned long ebase; 335extern unsigned long ebase;
473extern void per_cpu_trap_init(void); 336extern void per_cpu_trap_init(void);
474 337
475extern NORET_TYPE void die(const char *, struct pt_regs *);
476
477static inline void die_if_kernel(const char *str, struct pt_regs *regs)
478{
479 if (unlikely(!user_mode(regs)))
480 die(str, regs);
481}
482
483extern int stop_a_enabled; 338extern int stop_a_enabled;
484 339
485/* 340/*
diff --git a/include/asm-mips/termbits.h b/include/asm-mips/termbits.h
index b62ec7c521cc..0bbe07b42a07 100644
--- a/include/asm-mips/termbits.h
+++ b/include/asm-mips/termbits.h
@@ -30,6 +30,17 @@ struct termios {
30 cc_t c_cc[NCCS]; /* control characters */ 30 cc_t c_cc[NCCS]; /* control characters */
31}; 31};
32 32
33struct ktermios {
34 tcflag_t c_iflag; /* input mode flags */
35 tcflag_t c_oflag; /* output mode flags */
36 tcflag_t c_cflag; /* control mode flags */
37 tcflag_t c_lflag; /* local mode flags */
38 cc_t c_line; /* line discipline */
39 cc_t c_cc[NCCS]; /* control characters */
40 speed_t c_ispeed; /* input speed */
41 speed_t c_ospeed; /* output speed */
42};
43
33/* c_cc characters */ 44/* c_cc characters */
34#define VINTR 0 /* Interrupt character [ISIG]. */ 45#define VINTR 0 /* Interrupt character [ISIG]. */
35#define VQUIT 1 /* Quit character [ISIG]. */ 46#define VQUIT 1 /* Quit character [ISIG]. */
diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h
index 625acd337bc3..a632cef830a2 100644
--- a/include/asm-mips/time.h
+++ b/include/asm-mips/time.h
@@ -21,6 +21,7 @@
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/rtc.h> 22#include <linux/rtc.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/clocksource.h>
24 25
25extern spinlock_t rtc_lock; 26extern spinlock_t rtc_lock;
26 27
@@ -44,12 +45,10 @@ extern int (*mips_timer_state)(void);
44extern void (*mips_timer_ack)(void); 45extern void (*mips_timer_ack)(void);
45 46
46/* 47/*
47 * High precision timer functions. 48 * High precision timer clocksource.
48 * If mips_hpt_read is NULL, an R4k-compatible timer setup is attempted. 49 * If .read is NULL, an R4k-compatible timer setup is attempted.
49 */ 50 */
50extern unsigned int (*mips_hpt_read)(void); 51extern struct clocksource clocksource_mips;
51extern void (*mips_hpt_init)(void);
52extern unsigned int mips_hpt_mask;
53 52
54/* 53/*
55 * to_tm() converts system time back to (year, mon, day, hour, min, sec). 54 * to_tm() converts system time back to (year, mon, day, hour, min, sec).
diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h
index 2b52e180c6f2..63a13c5bd832 100644
--- a/include/asm-mips/types.h
+++ b/include/asm-mips/types.h
@@ -93,16 +93,6 @@ typedef unsigned long long phys_t;
93typedef unsigned long phys_t; 93typedef unsigned long phys_t;
94#endif 94#endif
95 95
96#ifdef CONFIG_LBD
97typedef u64 sector_t;
98#define HAVE_SECTOR_T
99#endif
100
101#ifdef CONFIG_LSF
102typedef u64 blkcnt_t;
103#define HAVE_BLKCNT_T
104#endif
105
106#endif /* __ASSEMBLY__ */ 96#endif /* __ASSEMBLY__ */
107 97
108#endif /* __KERNEL__ */ 98#endif /* __KERNEL__ */
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index ec56aa52f669..696cff39a1d3 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -933,268 +933,6 @@
933 933
934#ifndef __ASSEMBLY__ 934#ifndef __ASSEMBLY__
935 935
936/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
937#define _syscall0(type,name) \
938type name(void) \
939{ \
940 register unsigned long __a3 asm("$7"); \
941 unsigned long __v0; \
942 \
943 __asm__ volatile ( \
944 ".set\tnoreorder\n\t" \
945 "li\t$2, %2\t\t\t# " #name "\n\t" \
946 "syscall\n\t" \
947 "move\t%0, $2\n\t" \
948 ".set\treorder" \
949 : "=&r" (__v0), "=r" (__a3) \
950 : "i" (__NR_##name) \
951 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
952 "memory"); \
953 \
954 if (__a3 == 0) \
955 return (type) __v0; \
956 errno = __v0; \
957 return (type) -1; \
958}
959
960/*
961 * DANGER: This macro isn't usable for the pipe(2) call
962 * which has a unusual return convention.
963 */
964#define _syscall1(type,name,atype,a) \
965type name(atype a) \
966{ \
967 register unsigned long __a0 asm("$4") = (unsigned long) a; \
968 register unsigned long __a3 asm("$7"); \
969 unsigned long __v0; \
970 \
971 __asm__ volatile ( \
972 ".set\tnoreorder\n\t" \
973 "li\t$2, %3\t\t\t# " #name "\n\t" \
974 "syscall\n\t" \
975 "move\t%0, $2\n\t" \
976 ".set\treorder" \
977 : "=&r" (__v0), "=r" (__a3) \
978 : "r" (__a0), "i" (__NR_##name) \
979 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
980 "memory"); \
981 \
982 if (__a3 == 0) \
983 return (type) __v0; \
984 errno = __v0; \
985 return (type) -1; \
986}
987
988#define _syscall2(type,name,atype,a,btype,b) \
989type name(atype a, btype b) \
990{ \
991 register unsigned long __a0 asm("$4") = (unsigned long) a; \
992 register unsigned long __a1 asm("$5") = (unsigned long) b; \
993 register unsigned long __a3 asm("$7"); \
994 unsigned long __v0; \
995 \
996 __asm__ volatile ( \
997 ".set\tnoreorder\n\t" \
998 "li\t$2, %4\t\t\t# " #name "\n\t" \
999 "syscall\n\t" \
1000 "move\t%0, $2\n\t" \
1001 ".set\treorder" \
1002 : "=&r" (__v0), "=r" (__a3) \
1003 : "r" (__a0), "r" (__a1), "i" (__NR_##name) \
1004 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1005 "memory"); \
1006 \
1007 if (__a3 == 0) \
1008 return (type) __v0; \
1009 errno = __v0; \
1010 return (type) -1; \
1011}
1012
1013#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
1014type name(atype a, btype b, ctype c) \
1015{ \
1016 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1017 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1018 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1019 register unsigned long __a3 asm("$7"); \
1020 unsigned long __v0; \
1021 \
1022 __asm__ volatile ( \
1023 ".set\tnoreorder\n\t" \
1024 "li\t$2, %5\t\t\t# " #name "\n\t" \
1025 "syscall\n\t" \
1026 "move\t%0, $2\n\t" \
1027 ".set\treorder" \
1028 : "=&r" (__v0), "=r" (__a3) \
1029 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
1030 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1031 "memory"); \
1032 \
1033 if (__a3 == 0) \
1034 return (type) __v0; \
1035 errno = __v0; \
1036 return (type) -1; \
1037}
1038
1039#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
1040type name(atype a, btype b, ctype c, dtype d) \
1041{ \
1042 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1043 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1044 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1045 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1046 unsigned long __v0; \
1047 \
1048 __asm__ volatile ( \
1049 ".set\tnoreorder\n\t" \
1050 "li\t$2, %5\t\t\t# " #name "\n\t" \
1051 "syscall\n\t" \
1052 "move\t%0, $2\n\t" \
1053 ".set\treorder" \
1054 : "=&r" (__v0), "+r" (__a3) \
1055 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
1056 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1057 "memory"); \
1058 \
1059 if (__a3 == 0) \
1060 return (type) __v0; \
1061 errno = __v0; \
1062 return (type) -1; \
1063}
1064
1065#if (_MIPS_SIM == _MIPS_SIM_ABI32)
1066
1067/*
1068 * Using those means your brain needs more than an oil change ;-)
1069 */
1070
1071#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
1072type name(atype a, btype b, ctype c, dtype d, etype e) \
1073{ \
1074 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1075 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1076 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1077 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1078 unsigned long __v0; \
1079 \
1080 __asm__ volatile ( \
1081 ".set\tnoreorder\n\t" \
1082 "lw\t$2, %6\n\t" \
1083 "subu\t$29, 32\n\t" \
1084 "sw\t$2, 16($29)\n\t" \
1085 "li\t$2, %5\t\t\t# " #name "\n\t" \
1086 "syscall\n\t" \
1087 "move\t%0, $2\n\t" \
1088 "addiu\t$29, 32\n\t" \
1089 ".set\treorder" \
1090 : "=&r" (__v0), "+r" (__a3) \
1091 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
1092 "m" ((unsigned long)e) \
1093 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1094 "memory"); \
1095 \
1096 if (__a3 == 0) \
1097 return (type) __v0; \
1098 errno = __v0; \
1099 return (type) -1; \
1100}
1101
1102#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
1103type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
1104{ \
1105 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1106 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1107 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1108 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1109 unsigned long __v0; \
1110 \
1111 __asm__ volatile ( \
1112 ".set\tnoreorder\n\t" \
1113 "lw\t$2, %6\n\t" \
1114 "lw\t$8, %7\n\t" \
1115 "subu\t$29, 32\n\t" \
1116 "sw\t$2, 16($29)\n\t" \
1117 "sw\t$8, 20($29)\n\t" \
1118 "li\t$2, %5\t\t\t# " #name "\n\t" \
1119 "syscall\n\t" \
1120 "move\t%0, $2\n\t" \
1121 "addiu\t$29, 32\n\t" \
1122 ".set\treorder" \
1123 : "=&r" (__v0), "+r" (__a3) \
1124 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
1125 "m" ((unsigned long)e), "m" ((unsigned long)f) \
1126 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1127 "memory"); \
1128 \
1129 if (__a3 == 0) \
1130 return (type) __v0; \
1131 errno = __v0; \
1132 return (type) -1; \
1133}
1134
1135#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */
1136
1137#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
1138
1139#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
1140type name (atype a,btype b,ctype c,dtype d,etype e) \
1141{ \
1142 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1143 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1144 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1145 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1146 register unsigned long __a4 asm("$8") = (unsigned long) e; \
1147 unsigned long __v0; \
1148 \
1149 __asm__ volatile ( \
1150 ".set\tnoreorder\n\t" \
1151 "li\t$2, %6\t\t\t# " #name "\n\t" \
1152 "syscall\n\t" \
1153 "move\t%0, $2\n\t" \
1154 ".set\treorder" \
1155 : "=&r" (__v0), "+r" (__a3) \
1156 : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \
1157 : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1158 "memory"); \
1159 \
1160 if (__a3 == 0) \
1161 return (type) __v0; \
1162 errno = __v0; \
1163 return (type) -1; \
1164}
1165
1166#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
1167type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
1168{ \
1169 register unsigned long __a0 asm("$4") = (unsigned long) a; \
1170 register unsigned long __a1 asm("$5") = (unsigned long) b; \
1171 register unsigned long __a2 asm("$6") = (unsigned long) c; \
1172 register unsigned long __a3 asm("$7") = (unsigned long) d; \
1173 register unsigned long __a4 asm("$8") = (unsigned long) e; \
1174 register unsigned long __a5 asm("$9") = (unsigned long) f; \
1175 unsigned long __v0; \
1176 \
1177 __asm__ volatile ( \
1178 ".set\tnoreorder\n\t" \
1179 "li\t$2, %7\t\t\t# " #name "\n\t" \
1180 "syscall\n\t" \
1181 "move\t%0, $2\n\t" \
1182 ".set\treorder" \
1183 : "=&r" (__v0), "+r" (__a3) \
1184 : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \
1185 "i" (__NR_##name) \
1186 : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
1187 "memory"); \
1188 \
1189 if (__a3 == 0) \
1190 return (type) __v0; \
1191 errno = __v0; \
1192 return (type) -1; \
1193}
1194
1195#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */
1196
1197
1198#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64 936#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
1199#define __ARCH_WANT_IPC_PARSE_VERSION 937#define __ARCH_WANT_IPC_PARSE_VERSION
1200#define __ARCH_WANT_OLD_READDIR 938#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-mips/war.h b/include/asm-mips/war.h
index 3ac146c019c9..13a3502eef44 100644
--- a/include/asm-mips/war.h
+++ b/include/asm-mips/war.h
@@ -76,7 +76,7 @@
76/* 76/*
77 * But the RM200C seems to have been shipped only with V2.0 R4600s 77 * But the RM200C seems to have been shipped only with V2.0 R4600s
78 */ 78 */
79#ifdef CONFIG_SNI_RM200_PCI 79#ifdef CONFIG_SNI_RM
80 80
81#define R4600_V2_HIT_CACHEOP_WAR 1 81#define R4600_V2_HIT_CACHEOP_WAR 1
82 82