aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/abi.h4
-rw-r--r--include/asm-mips/bitops.h58
-rw-r--r--include/asm-mips/byteorder.h29
-rw-r--r--include/asm-mips/cacheflush.h3
-rw-r--r--include/asm-mips/hazards.h2
-rw-r--r--include/asm-mips/interrupt.h27
-rw-r--r--include/asm-mips/io.h35
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h4
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h (renamed from include/asm-mips/cobalt/cobalt.h)0
-rw-r--r--include/asm-mips/mach-cobalt/cpu-feature-overrides.h56
-rw-r--r--include/asm-mips/mach-cobalt/mach-gt64120.h (renamed from include/asm-mips/cobalt/mach-gt64120.h)0
-rw-r--r--include/asm-mips/mach-ip32/cpu-feature-overrides.h2
-rw-r--r--include/asm-mips/r4kcache.h400
-rw-r--r--include/asm-mips/reboot.h3
-rw-r--r--include/asm-mips/string.h22
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-mips/tx4927/tx4927.h21
-rw-r--r--include/asm-mips/tx4927/tx4927_pci.h10
-rw-r--r--include/asm-mips/uaccess.h71
-rw-r--r--include/asm-mips/unistd.h64
20 files changed, 328 insertions, 485 deletions
diff --git a/include/asm-mips/abi.h b/include/asm-mips/abi.h
index 2e7e651c3e3f..1ce0518ace2e 100644
--- a/include/asm-mips/abi.h
+++ b/include/asm-mips/abi.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2005 by Ralf Baechle 6 * Copyright (C) 2005, 06 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2005 MIPS Technologies, Inc. 7 * Copyright (C) 2005 MIPS Technologies, Inc.
8 */ 8 */
9#ifndef _ASM_ABI_H 9#ifndef _ASM_ABI_H
@@ -13,7 +13,7 @@
13#include <asm/siginfo.h> 13#include <asm/siginfo.h>
14 14
15struct mips_abi { 15struct mips_abi {
16 int (* const do_signal)(sigset_t *oldset, struct pt_regs *regs); 16 void (* const do_signal)(struct pt_regs *regs);
17 int (* const setup_frame)(struct k_sigaction * ka, 17 int (* const setup_frame)(struct k_sigaction * ka,
18 struct pt_regs *regs, int signr, 18 struct pt_regs *regs, int signr,
19 sigset_t *set); 19 sigset_t *set);
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 3b0c8aaf6e8b..8e802059fe67 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -644,20 +644,26 @@ static inline unsigned long ffz(unsigned long word)
644} 644}
645 645
646/* 646/*
647 * flz - find last zero in word. 647 * fls - find last bit set.
648 * @word: The word to search 648 * @word: The word to search
649 * 649 *
650 * Returns 0..SZLONG-1 650 * Returns 1..SZLONG
651 * Undefined if no zero exists, so code should check against ~0UL first. 651 * Returns 0 if no bit exists
652 */ 652 */
653static inline unsigned long flz(unsigned long word) 653static inline unsigned long fls(unsigned long word)
654{ 654{
655#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
656 return __ilog2(~word);
657#else
658#ifdef CONFIG_32BIT 655#ifdef CONFIG_32BIT
659 int r = 31, s; 656#ifdef CONFIG_CPU_MIPS32
660 word = ~word; 657 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
658
659 return 32 - word;
660#else
661 {
662 int r = 32, s;
663
664 if (word == 0)
665 return 0;
666
661 s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s; 667 s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
662 s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s; 668 s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
663 s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s; 669 s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
@@ -665,10 +671,23 @@ static inline unsigned long flz(unsigned long word)
665 s = 1; if ((word & 0x80000000)) s = 0; r -= s; 671 s = 1; if ((word & 0x80000000)) s = 0; r -= s;
666 672
667 return r; 673 return r;
674 }
668#endif 675#endif
676#endif /* CONFIG_32BIT */
677
669#ifdef CONFIG_64BIT 678#ifdef CONFIG_64BIT
670 int r = 63, s; 679#ifdef CONFIG_CPU_MIPS64
671 word = ~word; 680
681 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
682
683 return 64 - word;
684#else
685 {
686 int r = 64, s;
687
688 if (word == 0)
689 return 0;
690
672 s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s; 691 s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
673 s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s; 692 s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
674 s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s; 693 s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
@@ -677,24 +696,11 @@ static inline unsigned long flz(unsigned long word)
677 s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s; 696 s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
678 697
679 return r; 698 return r;
699 }
680#endif 700#endif
681#endif 701#endif /* CONFIG_64BIT */
682} 702}
683 703
684/*
685 * fls - find last bit set.
686 * @word: The word to search
687 *
688 * Returns 1..SZLONG
689 * Returns 0 if no bit exists
690 */
691static inline unsigned long fls(unsigned long word)
692{
693 if (word == 0)
694 return 0;
695
696 return flz(~word) + 1;
697}
698#define fls64(x) generic_fls64(x) 704#define fls64(x) generic_fls64(x)
699 705
700/* 706/*
diff --git a/include/asm-mips/byteorder.h b/include/asm-mips/byteorder.h
index d1fe9e5c62e4..584f8128fffd 100644
--- a/include/asm-mips/byteorder.h
+++ b/include/asm-mips/byteorder.h
@@ -8,10 +8,39 @@
8#ifndef _ASM_BYTEORDER_H 8#ifndef _ASM_BYTEORDER_H
9#define _ASM_BYTEORDER_H 9#define _ASM_BYTEORDER_H
10 10
11#include <linux/config.h>
12#include <linux/compiler.h>
11#include <asm/types.h> 13#include <asm/types.h>
12 14
13#ifdef __GNUC__ 15#ifdef __GNUC__
14 16
17#ifdef CONFIG_CPU_MIPSR2
18
19static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
20{
21 __asm__(
22 " wsbh %0, %1 \n"
23 : "=r" (x)
24 : "r" (x));
25
26 return x;
27}
28#define __arch__swab16(x) ___arch__swab16(x)
29
30static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
31{
32 __asm__(
33 " wsbh %0, %1 \n"
34 " rotr %0, %0, 16 \n"
35 : "=r" (x)
36 : "r" (x));
37
38 return x;
39}
40#define __arch__swab32(x) ___arch__swab32(x)
41
42#endif /* CONFIG_CPU_MIPSR2 */
43
15#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) 44#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
16# define __BYTEORDER_HAS_U64__ 45# define __BYTEORDER_HAS_U64__
17# define __SWAB_64_THRU_32__ 46# define __SWAB_64_THRU_32__
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index a18ba2edc0b6..aeae9fabf4a9 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -49,8 +49,7 @@ static inline void flush_dcache_page(struct page *page)
49 49
50extern void (*flush_icache_page)(struct vm_area_struct *vma, 50extern void (*flush_icache_page)(struct vm_area_struct *vma,
51 struct page *page); 51 struct page *page);
52extern void (*flush_icache_range)(unsigned long __user start, 52extern void (*flush_icache_range)(unsigned long start, unsigned long end);
53 unsigned long __user end);
54#define flush_cache_vmap(start, end) flush_cache_all() 53#define flush_cache_vmap(start, end) flush_cache_all()
55#define flush_cache_vunmap(start, end) flush_cache_all() 54#define flush_cache_vunmap(start, end) flush_cache_all()
56 55
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index 2fc90632f88c..6111a0ce58c4 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -100,7 +100,7 @@
100 100
101__asm__( 101__asm__(
102 " .macro _ssnop \n\t" 102 " .macro _ssnop \n\t"
103 " sll $0, $2, 1 \n\t" 103 " sll $0, $0, 1 \n\t"
104 " .endm \n\t" 104 " .endm \n\t"
105 " \n\t" 105 " \n\t"
106 " .macro _ehb \n\t" 106 " .macro _ehb \n\t"
diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h
index abdf54ee64cf..774348734fa0 100644
--- a/include/asm-mips/interrupt.h
+++ b/include/asm-mips/interrupt.h
@@ -47,6 +47,17 @@ static inline void local_irq_enable(void)
47 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs 47 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
48 * no nops at all. 48 * no nops at all.
49 */ 49 */
50/*
51 * For TX49, operating only IE bit is not enough.
52 *
53 * If mfc0 $12 follows store and the mfc0 is last instruction of a
54 * page and fetching the next instruction causes TLB miss, the result
55 * of the mfc0 might wrongly contain EXL bit.
56 *
57 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
58 *
59 * Workaround: mask EXL bit of the result or place a nop before mfc0.
60 */
50__asm__ ( 61__asm__ (
51 " .macro local_irq_disable\n" 62 " .macro local_irq_disable\n"
52 " .set push \n" 63 " .set push \n"
@@ -55,8 +66,8 @@ __asm__ (
55 " di \n" 66 " di \n"
56#else 67#else
57 " mfc0 $1,$12 \n" 68 " mfc0 $1,$12 \n"
58 " ori $1,1 \n" 69 " ori $1,0x1f \n"
59 " xori $1,1 \n" 70 " xori $1,0x1f \n"
60 " .set noreorder \n" 71 " .set noreorder \n"
61 " mtc0 $1,$12 \n" 72 " mtc0 $1,$12 \n"
62#endif 73#endif
@@ -96,8 +107,8 @@ __asm__ (
96 " andi \\result, 1 \n" 107 " andi \\result, 1 \n"
97#else 108#else
98 " mfc0 \\result, $12 \n" 109 " mfc0 \\result, $12 \n"
99 " ori $1, \\result, 1 \n" 110 " ori $1, \\result, 0x1f \n"
100 " xori $1, 1 \n" 111 " xori $1, 0x1f \n"
101 " .set noreorder \n" 112 " .set noreorder \n"
102 " mtc0 $1, $12 \n" 113 " mtc0 $1, $12 \n"
103#endif 114#endif
@@ -114,6 +125,7 @@ __asm__ __volatile__( \
114 125
115__asm__ ( 126__asm__ (
116 " .macro local_irq_restore flags \n" 127 " .macro local_irq_restore flags \n"
128 " .set push \n"
117 " .set noreorder \n" 129 " .set noreorder \n"
118 " .set noat \n" 130 " .set noat \n"
119#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 131#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
@@ -135,14 +147,13 @@ __asm__ (
135#else 147#else
136 " mfc0 $1, $12 \n" 148 " mfc0 $1, $12 \n"
137 " andi \\flags, 1 \n" 149 " andi \\flags, 1 \n"
138 " ori $1, 1 \n" 150 " ori $1, 0x1f \n"
139 " xori $1, 1 \n" 151 " xori $1, 0x1f \n"
140 " or \\flags, $1 \n" 152 " or \\flags, $1 \n"
141 " mtc0 \\flags, $12 \n" 153 " mtc0 \\flags, $12 \n"
142#endif 154#endif
143 " irq_disable_hazard \n" 155 " irq_disable_hazard \n"
144 " .set at \n" 156 " .set pop \n"
145 " .set reorder \n"
146 " .endm \n"); 157 " .endm \n");
147 158
148#define local_irq_restore(flags) \ 159#define local_irq_restore(flags) \
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index d42685747e7d..5a4c8a54b8f4 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -18,7 +18,6 @@
18#include <linux/types.h> 18#include <linux/types.h>
19 19
20#include <asm/addrspace.h> 20#include <asm/addrspace.h>
21#include <asm/bug.h>
22#include <asm/byteorder.h> 21#include <asm/byteorder.h>
23#include <asm/cpu.h> 22#include <asm/cpu.h>
24#include <asm/cpu-features.h> 23#include <asm/cpu-features.h>
@@ -57,38 +56,38 @@
57 * variations of functions: non-prefixed ones that preserve the value 56 * variations of functions: non-prefixed ones that preserve the value
58 * and prefixed ones that preserve byte addresses. The latters are 57 * and prefixed ones that preserve byte addresses. The latters are
59 * typically used for moving raw data between a peripheral and memory (cf. 58 * typically used for moving raw data between a peripheral and memory (cf.
60 * string I/O functions), hence the "mem_" prefix. 59 * string I/O functions), hence the "__mem_" prefix.
61 */ 60 */
62#if defined(CONFIG_SWAP_IO_SPACE) 61#if defined(CONFIG_SWAP_IO_SPACE)
63 62
64# define ioswabb(x) (x) 63# define ioswabb(x) (x)
65# define mem_ioswabb(x) (x) 64# define __mem_ioswabb(x) (x)
66# ifdef CONFIG_SGI_IP22 65# ifdef CONFIG_SGI_IP22
67/* 66/*
68 * IP22 seems braindead enough to swap 16bits values in hardware, but 67 * IP22 seems braindead enough to swap 16bits values in hardware, but
69 * not 32bits. Go figure... Can't tell without documentation. 68 * not 32bits. Go figure... Can't tell without documentation.
70 */ 69 */
71# define ioswabw(x) (x) 70# define ioswabw(x) (x)
72# define mem_ioswabw(x) le16_to_cpu(x) 71# define __mem_ioswabw(x) le16_to_cpu(x)
73# else 72# else
74# define ioswabw(x) le16_to_cpu(x) 73# define ioswabw(x) le16_to_cpu(x)
75# define mem_ioswabw(x) (x) 74# define __mem_ioswabw(x) (x)
76# endif 75# endif
77# define ioswabl(x) le32_to_cpu(x) 76# define ioswabl(x) le32_to_cpu(x)
78# define mem_ioswabl(x) (x) 77# define __mem_ioswabl(x) (x)
79# define ioswabq(x) le64_to_cpu(x) 78# define ioswabq(x) le64_to_cpu(x)
80# define mem_ioswabq(x) (x) 79# define __mem_ioswabq(x) (x)
81 80
82#else 81#else
83 82
84# define ioswabb(x) (x) 83# define ioswabb(x) (x)
85# define mem_ioswabb(x) (x) 84# define __mem_ioswabb(x) (x)
86# define ioswabw(x) (x) 85# define ioswabw(x) (x)
87# define mem_ioswabw(x) cpu_to_le16(x) 86# define __mem_ioswabw(x) cpu_to_le16(x)
88# define ioswabl(x) (x) 87# define ioswabl(x) (x)
89# define mem_ioswabl(x) cpu_to_le32(x) 88# define __mem_ioswabl(x) cpu_to_le32(x)
90# define ioswabq(x) (x) 89# define ioswabq(x) (x)
91# define mem_ioswabq(x) cpu_to_le32(x) 90# define __mem_ioswabq(x) cpu_to_le32(x)
92 91
93#endif 92#endif
94 93
@@ -343,7 +342,7 @@ static inline void pfx##write##bwlq(type val, \
343 BUG(); \ 342 BUG(); \
344} \ 343} \
345 \ 344 \
346static inline type pfx##read##bwlq(volatile void __iomem *mem) \ 345static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
347{ \ 346{ \
348 volatile type *__mem; \ 347 volatile type *__mem; \
349 type __val; \ 348 type __val; \
@@ -418,7 +417,7 @@ __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
418 \ 417 \
419__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 418__BUILD_MEMORY_PFX(__raw_, bwlq, type) \
420__BUILD_MEMORY_PFX(, bwlq, type) \ 419__BUILD_MEMORY_PFX(, bwlq, type) \
421__BUILD_MEMORY_PFX(mem_, bwlq, type) \ 420__BUILD_MEMORY_PFX(__mem_, bwlq, type) \
422 421
423BUILDIO_MEM(b, u8) 422BUILDIO_MEM(b, u8)
424BUILDIO_MEM(w, u16) 423BUILDIO_MEM(w, u16)
@@ -431,7 +430,7 @@ BUILDIO_MEM(q, u64)
431 430
432#define BUILDIO_IOPORT(bwlq, type) \ 431#define BUILDIO_IOPORT(bwlq, type) \
433 __BUILD_IOPORT_PFX(, bwlq, type) \ 432 __BUILD_IOPORT_PFX(, bwlq, type) \
434 __BUILD_IOPORT_PFX(mem_, bwlq, type) 433 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
435 434
436BUILDIO_IOPORT(b, u8) 435BUILDIO_IOPORT(b, u8)
437BUILDIO_IOPORT(w, u16) 436BUILDIO_IOPORT(w, u16)
@@ -465,7 +464,7 @@ static inline void writes##bwlq(volatile void __iomem *mem, \
465 const volatile type *__addr = addr; \ 464 const volatile type *__addr = addr; \
466 \ 465 \
467 while (count--) { \ 466 while (count--) { \
468 mem_write##bwlq(*__addr, mem); \ 467 __mem_write##bwlq(*__addr, mem); \
469 __addr++; \ 468 __addr++; \
470 } \ 469 } \
471} \ 470} \
@@ -476,7 +475,7 @@ static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
476 volatile type *__addr = addr; \ 475 volatile type *__addr = addr; \
477 \ 476 \
478 while (count--) { \ 477 while (count--) { \
479 *__addr = mem_read##bwlq(mem); \ 478 *__addr = __mem_read##bwlq(mem); \
480 __addr++; \ 479 __addr++; \
481 } \ 480 } \
482} 481}
@@ -489,7 +488,7 @@ static inline void outs##bwlq(unsigned long port, const void *addr, \
489 const volatile type *__addr = addr; \ 488 const volatile type *__addr = addr; \
490 \ 489 \
491 while (count--) { \ 490 while (count--) { \
492 mem_out##bwlq(*__addr, port); \ 491 __mem_out##bwlq(*__addr, port); \
493 __addr++; \ 492 __addr++; \
494 } \ 493 } \
495} \ 494} \
@@ -500,7 +499,7 @@ static inline void ins##bwlq(unsigned long port, void *addr, \
500 volatile type *__addr = addr; \ 499 volatile type *__addr = addr; \
501 \ 500 \
502 while (count--) { \ 501 while (count--) { \
503 *__addr = mem_in##bwlq(port); \ 502 *__addr = __mem_in##bwlq(port); \
504 __addr++; \ 503 __addr++; \
505 } \ 504 } \
506} 505}
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 8e1d7ed7d8e3..4686e17c206c 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -1198,7 +1198,11 @@ extern au1xxx_irq_map_t au1xxx_irq_map[];
1198 1198
1199/* UARTS 0-3 */ 1199/* UARTS 0-3 */
1200#define UART_BASE UART0_ADDR 1200#define UART_BASE UART0_ADDR
1201#ifdef CONFIG_SOC_AU1200
1202#define UART_DEBUG_BASE UART1_ADDR
1203#else
1201#define UART_DEBUG_BASE UART3_ADDR 1204#define UART_DEBUG_BASE UART3_ADDR
1205#endif
1202 1206
1203#define UART_RX 0 /* Receive buffer */ 1207#define UART_RX 0 /* Receive buffer */
1204#define UART_TX 4 /* Transmit buffer */ 1208#define UART_TX 4 /* Transmit buffer */
diff --git a/include/asm-mips/cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 78e1df2095fb..78e1df2095fb 100644
--- a/include/asm-mips/cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
diff --git a/include/asm-mips/mach-cobalt/cpu-feature-overrides.h b/include/asm-mips/mach-cobalt/cpu-feature-overrides.h
new file mode 100644
index 000000000000..ace8c5ef9701
--- /dev/null
+++ b/include/asm-mips/mach-cobalt/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_COBALT_CPU_FEATURE_OVERRIDES_H
9#define __ASM_COBALT_CPU_FEATURE_OVERRIDES_H
10
11#include <linux/config.h>
12
13#define cpu_has_tlb 1
14#define cpu_has_4kex 1
15#define cpu_has_3k_cache 0
16#define cpu_has_4k_cache 1
17#define cpu_has_tx39_cache 0
18#define cpu_has_sb1_cache 0
19#define cpu_has_fpu 1
20#define cpu_has_32fpr 1
21#define cpu_has_counter 1
22#define cpu_has_watch 0
23#define cpu_has_divec 1
24#define cpu_has_vce 0
25#define cpu_has_cache_cdex_p 0
26#define cpu_has_cache_cdex_s 0
27#define cpu_has_prefetch 0
28#define cpu_has_mcheck 0
29#define cpu_has_ejtag 0
30
31#define cpu_has_subset_pcaches 0
32#define cpu_dcache_line_size() 32
33#define cpu_icache_line_size() 32
34#define cpu_scache_line_size() 0
35
36#ifdef CONFIG_64BIT
37#define cpu_has_llsc 0
38#else
39#define cpu_has_llsc 1
40#endif
41
42#define cpu_has_mips16 0
43#define cpu_has_mdmx 0
44#define cpu_has_mips3d 0
45#define cpu_has_smartmips 0
46#define cpu_has_vtag_icache 0
47#define cpu_has_ic_fills_f_dc 0
48#define cpu_icache_snoops_remote_store 0
49#define cpu_has_dsp 0
50
51#define cpu_has_mips32r1 0
52#define cpu_has_mips32r2 0
53#define cpu_has_mips64r1 0
54#define cpu_has_mips64r2 0
55
56#endif /* __ASM_COBALT_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/cobalt/mach-gt64120.h b/include/asm-mips/mach-cobalt/mach-gt64120.h
index 587fc4378f44..587fc4378f44 100644
--- a/include/asm-mips/cobalt/mach-gt64120.h
+++ b/include/asm-mips/mach-cobalt/mach-gt64120.h
diff --git a/include/asm-mips/mach-ip32/cpu-feature-overrides.h b/include/asm-mips/mach-ip32/cpu-feature-overrides.h
index b80c30725cf6..36070b5654ab 100644
--- a/include/asm-mips/mach-ip32/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-ip32/cpu-feature-overrides.h
@@ -18,7 +18,7 @@
18 * so, for 64bit IP32 kernel we just don't use ll/sc. 18 * so, for 64bit IP32 kernel we just don't use ll/sc.
19 * This does not affect luserland. 19 * This does not affect luserland.
20 */ 20 */
21#if defined(CONFIG_CPU_R5000) && defined(CONFIG_64BIT) 21#if (defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_NEVADA)) && defined(CONFIG_64BIT)
22#define cpu_has_llsc 0 22#define cpu_has_llsc 0
23#else 23#else
24#define cpu_has_llsc 1 24#define cpu_has_llsc 1
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h
index a5ea9d828aee..cc53196efa40 100644
--- a/include/asm-mips/r4kcache.h
+++ b/include/asm-mips/r4kcache.h
@@ -166,123 +166,6 @@ static inline void invalidate_tcache_page(unsigned long addr)
166 : "r" (base), \ 166 : "r" (base), \
167 "i" (op)); 167 "i" (op));
168 168
169static inline void blast_dcache16(void)
170{
171 unsigned long start = INDEX_BASE;
172 unsigned long end = start + current_cpu_data.dcache.waysize;
173 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
174 unsigned long ws_end = current_cpu_data.dcache.ways <<
175 current_cpu_data.dcache.waybit;
176 unsigned long ws, addr;
177
178 for (ws = 0; ws < ws_end; ws += ws_inc)
179 for (addr = start; addr < end; addr += 0x200)
180 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
181}
182
183static inline void blast_dcache16_page(unsigned long page)
184{
185 unsigned long start = page;
186 unsigned long end = start + PAGE_SIZE;
187
188 do {
189 cache16_unroll32(start,Hit_Writeback_Inv_D);
190 start += 0x200;
191 } while (start < end);
192}
193
194static inline void blast_dcache16_page_indexed(unsigned long page)
195{
196 unsigned long start = page;
197 unsigned long end = start + PAGE_SIZE;
198 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
199 unsigned long ws_end = current_cpu_data.dcache.ways <<
200 current_cpu_data.dcache.waybit;
201 unsigned long ws, addr;
202
203 for (ws = 0; ws < ws_end; ws += ws_inc)
204 for (addr = start; addr < end; addr += 0x200)
205 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
206}
207
208static inline void blast_icache16(void)
209{
210 unsigned long start = INDEX_BASE;
211 unsigned long end = start + current_cpu_data.icache.waysize;
212 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
213 unsigned long ws_end = current_cpu_data.icache.ways <<
214 current_cpu_data.icache.waybit;
215 unsigned long ws, addr;
216
217 for (ws = 0; ws < ws_end; ws += ws_inc)
218 for (addr = start; addr < end; addr += 0x200)
219 cache16_unroll32(addr|ws,Index_Invalidate_I);
220}
221
222static inline void blast_icache16_page(unsigned long page)
223{
224 unsigned long start = page;
225 unsigned long end = start + PAGE_SIZE;
226
227 do {
228 cache16_unroll32(start,Hit_Invalidate_I);
229 start += 0x200;
230 } while (start < end);
231}
232
233static inline void blast_icache16_page_indexed(unsigned long page)
234{
235 unsigned long start = page;
236 unsigned long end = start + PAGE_SIZE;
237 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
238 unsigned long ws_end = current_cpu_data.icache.ways <<
239 current_cpu_data.icache.waybit;
240 unsigned long ws, addr;
241
242 for (ws = 0; ws < ws_end; ws += ws_inc)
243 for (addr = start; addr < end; addr += 0x200)
244 cache16_unroll32(addr|ws,Index_Invalidate_I);
245}
246
247static inline void blast_scache16(void)
248{
249 unsigned long start = INDEX_BASE;
250 unsigned long end = start + current_cpu_data.scache.waysize;
251 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
252 unsigned long ws_end = current_cpu_data.scache.ways <<
253 current_cpu_data.scache.waybit;
254 unsigned long ws, addr;
255
256 for (ws = 0; ws < ws_end; ws += ws_inc)
257 for (addr = start; addr < end; addr += 0x200)
258 cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
259}
260
261static inline void blast_scache16_page(unsigned long page)
262{
263 unsigned long start = page;
264 unsigned long end = page + PAGE_SIZE;
265
266 do {
267 cache16_unroll32(start,Hit_Writeback_Inv_SD);
268 start += 0x200;
269 } while (start < end);
270}
271
272static inline void blast_scache16_page_indexed(unsigned long page)
273{
274 unsigned long start = page;
275 unsigned long end = start + PAGE_SIZE;
276 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
277 unsigned long ws_end = current_cpu_data.scache.ways <<
278 current_cpu_data.scache.waybit;
279 unsigned long ws, addr;
280
281 for (ws = 0; ws < ws_end; ws += ws_inc)
282 for (addr = start; addr < end; addr += 0x200)
283 cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
284}
285
286#define cache32_unroll32(base,op) \ 169#define cache32_unroll32(base,op) \
287 __asm__ __volatile__( \ 170 __asm__ __volatile__( \
288 " .set push \n" \ 171 " .set push \n" \
@@ -309,123 +192,6 @@ static inline void blast_scache16_page_indexed(unsigned long page)
309 : "r" (base), \ 192 : "r" (base), \
310 "i" (op)); 193 "i" (op));
311 194
312static inline void blast_dcache32(void)
313{
314 unsigned long start = INDEX_BASE;
315 unsigned long end = start + current_cpu_data.dcache.waysize;
316 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
317 unsigned long ws_end = current_cpu_data.dcache.ways <<
318 current_cpu_data.dcache.waybit;
319 unsigned long ws, addr;
320
321 for (ws = 0; ws < ws_end; ws += ws_inc)
322 for (addr = start; addr < end; addr += 0x400)
323 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
324}
325
326static inline void blast_dcache32_page(unsigned long page)
327{
328 unsigned long start = page;
329 unsigned long end = start + PAGE_SIZE;
330
331 do {
332 cache32_unroll32(start,Hit_Writeback_Inv_D);
333 start += 0x400;
334 } while (start < end);
335}
336
337static inline void blast_dcache32_page_indexed(unsigned long page)
338{
339 unsigned long start = page;
340 unsigned long end = start + PAGE_SIZE;
341 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
342 unsigned long ws_end = current_cpu_data.dcache.ways <<
343 current_cpu_data.dcache.waybit;
344 unsigned long ws, addr;
345
346 for (ws = 0; ws < ws_end; ws += ws_inc)
347 for (addr = start; addr < end; addr += 0x400)
348 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
349}
350
351static inline void blast_icache32(void)
352{
353 unsigned long start = INDEX_BASE;
354 unsigned long end = start + current_cpu_data.icache.waysize;
355 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
356 unsigned long ws_end = current_cpu_data.icache.ways <<
357 current_cpu_data.icache.waybit;
358 unsigned long ws, addr;
359
360 for (ws = 0; ws < ws_end; ws += ws_inc)
361 for (addr = start; addr < end; addr += 0x400)
362 cache32_unroll32(addr|ws,Index_Invalidate_I);
363}
364
365static inline void blast_icache32_page(unsigned long page)
366{
367 unsigned long start = page;
368 unsigned long end = start + PAGE_SIZE;
369
370 do {
371 cache32_unroll32(start,Hit_Invalidate_I);
372 start += 0x400;
373 } while (start < end);
374}
375
376static inline void blast_icache32_page_indexed(unsigned long page)
377{
378 unsigned long start = page;
379 unsigned long end = start + PAGE_SIZE;
380 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
381 unsigned long ws_end = current_cpu_data.icache.ways <<
382 current_cpu_data.icache.waybit;
383 unsigned long ws, addr;
384
385 for (ws = 0; ws < ws_end; ws += ws_inc)
386 for (addr = start; addr < end; addr += 0x400)
387 cache32_unroll32(addr|ws,Index_Invalidate_I);
388}
389
390static inline void blast_scache32(void)
391{
392 unsigned long start = INDEX_BASE;
393 unsigned long end = start + current_cpu_data.scache.waysize;
394 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
395 unsigned long ws_end = current_cpu_data.scache.ways <<
396 current_cpu_data.scache.waybit;
397 unsigned long ws, addr;
398
399 for (ws = 0; ws < ws_end; ws += ws_inc)
400 for (addr = start; addr < end; addr += 0x400)
401 cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
402}
403
404static inline void blast_scache32_page(unsigned long page)
405{
406 unsigned long start = page;
407 unsigned long end = page + PAGE_SIZE;
408
409 do {
410 cache32_unroll32(start,Hit_Writeback_Inv_SD);
411 start += 0x400;
412 } while (start < end);
413}
414
415static inline void blast_scache32_page_indexed(unsigned long page)
416{
417 unsigned long start = page;
418 unsigned long end = start + PAGE_SIZE;
419 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
420 unsigned long ws_end = current_cpu_data.scache.ways <<
421 current_cpu_data.scache.waybit;
422 unsigned long ws, addr;
423
424 for (ws = 0; ws < ws_end; ws += ws_inc)
425 for (addr = start; addr < end; addr += 0x400)
426 cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
427}
428
429#define cache64_unroll32(base,op) \ 195#define cache64_unroll32(base,op) \
430 __asm__ __volatile__( \ 196 __asm__ __volatile__( \
431 " .set push \n" \ 197 " .set push \n" \
@@ -452,84 +218,6 @@ static inline void blast_scache32_page_indexed(unsigned long page)
452 : "r" (base), \ 218 : "r" (base), \
453 "i" (op)); 219 "i" (op));
454 220
455static inline void blast_icache64(void)
456{
457 unsigned long start = INDEX_BASE;
458 unsigned long end = start + current_cpu_data.icache.waysize;
459 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
460 unsigned long ws_end = current_cpu_data.icache.ways <<
461 current_cpu_data.icache.waybit;
462 unsigned long ws, addr;
463
464 for (ws = 0; ws < ws_end; ws += ws_inc)
465 for (addr = start; addr < end; addr += 0x800)
466 cache64_unroll32(addr|ws,Index_Invalidate_I);
467}
468
469static inline void blast_icache64_page(unsigned long page)
470{
471 unsigned long start = page;
472 unsigned long end = start + PAGE_SIZE;
473
474 do {
475 cache64_unroll32(start,Hit_Invalidate_I);
476 start += 0x800;
477 } while (start < end);
478}
479
480static inline void blast_icache64_page_indexed(unsigned long page)
481{
482 unsigned long start = page;
483 unsigned long end = start + PAGE_SIZE;
484 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
485 unsigned long ws_end = current_cpu_data.icache.ways <<
486 current_cpu_data.icache.waybit;
487 unsigned long ws, addr;
488
489 for (ws = 0; ws < ws_end; ws += ws_inc)
490 for (addr = start; addr < end; addr += 0x800)
491 cache64_unroll32(addr|ws,Index_Invalidate_I);
492}
493
494static inline void blast_scache64(void)
495{
496 unsigned long start = INDEX_BASE;
497 unsigned long end = start + current_cpu_data.scache.waysize;
498 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
499 unsigned long ws_end = current_cpu_data.scache.ways <<
500 current_cpu_data.scache.waybit;
501 unsigned long ws, addr;
502
503 for (ws = 0; ws < ws_end; ws += ws_inc)
504 for (addr = start; addr < end; addr += 0x800)
505 cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
506}
507
508static inline void blast_scache64_page(unsigned long page)
509{
510 unsigned long start = page;
511 unsigned long end = page + PAGE_SIZE;
512
513 do {
514 cache64_unroll32(start,Hit_Writeback_Inv_SD);
515 start += 0x800;
516 } while (start < end);
517}
518
519static inline void blast_scache64_page_indexed(unsigned long page)
520{
521 unsigned long start = page;
522 unsigned long end = start + PAGE_SIZE;
523 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
524 unsigned long ws_end = current_cpu_data.scache.ways <<
525 current_cpu_data.scache.waybit;
526 unsigned long ws, addr;
527
528 for (ws = 0; ws < ws_end; ws += ws_inc)
529 for (addr = start; addr < end; addr += 0x800)
530 cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
531}
532
533#define cache128_unroll32(base,op) \ 221#define cache128_unroll32(base,op) \
534 __asm__ __volatile__( \ 222 __asm__ __volatile__( \
535 " .set push \n" \ 223 " .set push \n" \
@@ -556,43 +244,55 @@ static inline void blast_scache64_page_indexed(unsigned long page)
556 : "r" (base), \ 244 : "r" (base), \
557 "i" (op)); 245 "i" (op));
558 246
559static inline void blast_scache128(void) 247/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
560{ 248#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
561 unsigned long start = INDEX_BASE; 249static inline void blast_##pfx##cache##lsize(void) \
562 unsigned long end = start + current_cpu_data.scache.waysize; 250{ \
563 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; 251 unsigned long start = INDEX_BASE; \
564 unsigned long ws_end = current_cpu_data.scache.ways << 252 unsigned long end = start + current_cpu_data.desc.waysize; \
565 current_cpu_data.scache.waybit; 253 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
566 unsigned long ws, addr; 254 unsigned long ws_end = current_cpu_data.desc.ways << \
567 255 current_cpu_data.desc.waybit; \
568 for (ws = 0; ws < ws_end; ws += ws_inc) 256 unsigned long ws, addr; \
569 for (addr = start; addr < end; addr += 0x1000) 257 \
570 cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); 258 for (ws = 0; ws < ws_end; ws += ws_inc) \
571} 259 for (addr = start; addr < end; addr += lsize * 32) \
572 260 cache##lsize##_unroll32(addr|ws,indexop); \
573static inline void blast_scache128_page(unsigned long page) 261} \
574{ 262 \
575 unsigned long start = page; 263static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
576 unsigned long end = page + PAGE_SIZE; 264{ \
577 265 unsigned long start = page; \
578 do { 266 unsigned long end = page + PAGE_SIZE; \
579 cache128_unroll32(start,Hit_Writeback_Inv_SD); 267 \
580 start += 0x1000; 268 do { \
581 } while (start < end); 269 cache##lsize##_unroll32(start,hitop); \
582} 270 start += lsize * 32; \
583 271 } while (start < end); \
584static inline void blast_scache128_page_indexed(unsigned long page) 272} \
585{ 273 \
586 unsigned long start = page; 274static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
587 unsigned long end = start + PAGE_SIZE; 275{ \
588 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; 276 unsigned long start = page; \
589 unsigned long ws_end = current_cpu_data.scache.ways << 277 unsigned long end = start + PAGE_SIZE; \
590 current_cpu_data.scache.waybit; 278 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
591 unsigned long ws, addr; 279 unsigned long ws_end = current_cpu_data.desc.ways << \
592 280 current_cpu_data.desc.waybit; \
593 for (ws = 0; ws < ws_end; ws += ws_inc) 281 unsigned long ws, addr; \
594 for (addr = start; addr < end; addr += 0x1000) 282 \
595 cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); 283 for (ws = 0; ws < ws_end; ws += ws_inc) \
596} 284 for (addr = start; addr < end; addr += lsize * 32) \
285 cache##lsize##_unroll32(addr|ws,indexop); \
286}
287
288__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
289__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
290__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
291__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
292__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
293__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
294__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
295__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
296__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
597 297
598#endif /* _ASM_R4KCACHE_H */ 298#endif /* _ASM_R4KCACHE_H */
diff --git a/include/asm-mips/reboot.h b/include/asm-mips/reboot.h
index 2f10ebcbe141..e48c0bfab257 100644
--- a/include/asm-mips/reboot.h
+++ b/include/asm-mips/reboot.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1997, 1999, 2001 by Ralf Baechle 6 * Copyright (C) 1997, 1999, 2001, 06 by Ralf Baechle
7 * Copyright (C) 2001 MIPS Technologies, Inc. 7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 */ 8 */
9#ifndef _ASM_REBOOT_H 9#ifndef _ASM_REBOOT_H
@@ -11,6 +11,5 @@
11 11
12extern void (*_machine_restart)(char *command); 12extern void (*_machine_restart)(char *command);
13extern void (*_machine_halt)(void); 13extern void (*_machine_halt)(void);
14extern void (*_machine_power_off)(void);
15 14
16#endif /* _ASM_REBOOT_H */ 15#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-mips/string.h b/include/asm-mips/string.h
index 5a06f6d13899..907da600fddd 100644
--- a/include/asm-mips/string.h
+++ b/include/asm-mips/string.h
@@ -141,26 +141,4 @@ extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
141#define __HAVE_ARCH_MEMMOVE 141#define __HAVE_ARCH_MEMMOVE
142extern void *memmove(void *__dest, __const__ void *__src, size_t __n); 142extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
143 143
144#ifdef CONFIG_32BIT
145#define __HAVE_ARCH_MEMSCAN
146static __inline__ void *memscan(void *__addr, int __c, size_t __size)
147{
148 char *__end = (char *)__addr + __size;
149 unsigned char __uc = (unsigned char) __c;
150
151 __asm__(".set\tpush\n\t"
152 ".set\tnoat\n\t"
153 ".set\treorder\n\t"
154 "1:\tbeq\t%0,%1,2f\n\t"
155 "addiu\t%0,1\n\t"
156 "lbu\t$1,-1(%0)\n\t"
157 "bne\t$1,%z4,1b\n"
158 "2:\t.set\tpop"
159 : "=r" (__addr), "=r" (__end)
160 : "0" (__addr), "1" (__end), "Jr" (__uc));
161
162 return __addr;
163}
164#endif /* CONFIG_32BIT */
165
166#endif /* _ASM_STRING_H */ 144#endif /* _ASM_STRING_H */
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index 1612b3fe1080..fa193f861e71 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -114,6 +114,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
114#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 114#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
115#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 115#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
116#define TIF_SECCOMP 5 /* secure computing */ 116#define TIF_SECCOMP 5 /* secure computing */
117#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
117#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 118#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
118#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 119#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
119#define TIF_MEMDIE 18 120#define TIF_MEMDIE 18
@@ -125,6 +126,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
125#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 126#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
126#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 127#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
127#define _TIF_SECCOMP (1<<TIF_SECCOMP) 128#define _TIF_SECCOMP (1<<TIF_SECCOMP)
129#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
128#define _TIF_USEDFPU (1<<TIF_USEDFPU) 130#define _TIF_USEDFPU (1<<TIF_USEDFPU)
129#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 131#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
130 132
diff --git a/include/asm-mips/tx4927/tx4927.h b/include/asm-mips/tx4927/tx4927.h
index 3bb7f0087d68..de85bd2245f7 100644
--- a/include/asm-mips/tx4927/tx4927.h
+++ b/include/asm-mips/tx4927/tx4927.h
@@ -2,7 +2,7 @@
2 * Author: MontaVista Software, Inc. 2 * Author: MontaVista Software, Inc.
3 * source@mvista.com 3 * source@mvista.com
4 * 4 *
5 * Copyright 2001-2002 MontaVista Software Inc. 5 * Copyright 2001-2006 MontaVista Software Inc.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
@@ -30,10 +30,10 @@
30#include <asm/tx4927/tx4927_mips.h> 30#include <asm/tx4927/tx4927_mips.h>
31 31
32/* 32/*
33 This register naming came from the intergrate cpu/controoler name TX4927 33 This register naming came from the integrated CPU/controller name TX4927
34 followed by the device name from table 4.2.2 on page 4-3 and then followed 34 followed by the device name from table 4.2.2 on page 4-3 and then followed
35 by the register name from table 4.2.3 on pages 4-4 to 4-8. The manaul 35 by the register name from table 4.2.3 on pages 4-4 to 4-8. The manaul
36 used is "TMPR4927BT Preliminary Rev 0.1 20.Jul.2001". 36 used was "TMPR4927BT Preliminary Rev 0.1 20.Jul.2001".
37 */ 37 */
38 38
39#define TX4927_SIO_0_BASE 39#define TX4927_SIO_0_BASE
@@ -251,8 +251,8 @@
251 251
252/* TX4927 Timer 0 (32-bit registers) */ 252/* TX4927 Timer 0 (32-bit registers) */
253#define TX4927_TMR0_BASE 0xf000 253#define TX4927_TMR0_BASE 0xf000
254#define TX4927_TMR0_TMTCR0 0xf004 254#define TX4927_TMR0_TMTCR0 0xf000
255#define TX4927_TMR0_TMTISR0 0xf008 255#define TX4927_TMR0_TMTISR0 0xf004
256#define TX4927_TMR0_TMCPRA0 0xf008 256#define TX4927_TMR0_TMCPRA0 0xf008
257#define TX4927_TMR0_TMCPRB0 0xf00c 257#define TX4927_TMR0_TMCPRB0 0xf00c
258#define TX4927_TMR0_TMITMR0 0xf010 258#define TX4927_TMR0_TMITMR0 0xf010
@@ -264,8 +264,8 @@
264 264
265/* TX4927 Timer 1 (32-bit registers) */ 265/* TX4927 Timer 1 (32-bit registers) */
266#define TX4927_TMR1_BASE 0xf100 266#define TX4927_TMR1_BASE 0xf100
267#define TX4927_TMR1_TMTCR1 0xf104 267#define TX4927_TMR1_TMTCR1 0xf100
268#define TX4927_TMR1_TMTISR1 0xf108 268#define TX4927_TMR1_TMTISR1 0xf104
269#define TX4927_TMR1_TMCPRA1 0xf108 269#define TX4927_TMR1_TMCPRA1 0xf108
270#define TX4927_TMR1_TMCPRB1 0xf10c 270#define TX4927_TMR1_TMCPRB1 0xf10c
271#define TX4927_TMR1_TMITMR1 0xf110 271#define TX4927_TMR1_TMITMR1 0xf110
@@ -277,13 +277,12 @@
277 277
278/* TX4927 Timer 2 (32-bit registers) */ 278/* TX4927 Timer 2 (32-bit registers) */
279#define TX4927_TMR2_BASE 0xf200 279#define TX4927_TMR2_BASE 0xf200
280#define TX4927_TMR2_TMTCR2 0xf104 280#define TX4927_TMR2_TMTCR2 0xf200
281#define TX4927_TMR2_TMTISR2 0xf208 281#define TX4927_TMR2_TMTISR2 0xf204
282#define TX4927_TMR2_TMCPRA2 0xf208 282#define TX4927_TMR2_TMCPRA2 0xf208
283#define TX4927_TMR2_TMCPRB2 0xf20c
284#define TX4927_TMR2_TMITMR2 0xf210 283#define TX4927_TMR2_TMITMR2 0xf210
285#define TX4927_TMR2_TMCCDR2 0xf220 284#define TX4927_TMR2_TMCCDR2 0xf220
286#define TX4927_TMR2_TMPGMR2 0xf230 285#define TX4927_TMR2_TMWTMR2 0xf240
287#define TX4927_TMR2_TMTRR2 0xf2f0 286#define TX4927_TMR2_TMTRR2 0xf2f0
288#define TX4927_TMR2_LIMIT 0xf2ff 287#define TX4927_TMR2_LIMIT 0xf2ff
289 288
diff --git a/include/asm-mips/tx4927/tx4927_pci.h b/include/asm-mips/tx4927/tx4927_pci.h
index 165f6b8b217f..66c064690f41 100644
--- a/include/asm-mips/tx4927/tx4927_pci.h
+++ b/include/asm-mips/tx4927/tx4927_pci.h
@@ -253,6 +253,16 @@ struct tx4927_pcic_reg {
253#define TX4927_CCFG_PCIDIVMODE_5 0x00001000 253#define TX4927_CCFG_PCIDIVMODE_5 0x00001000
254#define TX4927_CCFG_PCIDIVMODE_6 0x00001800 254#define TX4927_CCFG_PCIDIVMODE_6 0x00001800
255 255
256#define TX4937_CCFG_PCIDIVMODE_MASK 0x00001c00
257#define TX4937_CCFG_PCIDIVMODE_8 0x00000000
258#define TX4937_CCFG_PCIDIVMODE_4 0x00000400
259#define TX4937_CCFG_PCIDIVMODE_9 0x00000800
260#define TX4937_CCFG_PCIDIVMODE_4_5 0x00000c00
261#define TX4937_CCFG_PCIDIVMODE_10 0x00001000
262#define TX4937_CCFG_PCIDIVMODE_5 0x00001400
263#define TX4937_CCFG_PCIDIVMODE_11 0x00001800
264#define TX4937_CCFG_PCIDIVMODE_5_5 0x00001c00
265
256/* PCFG : Pin Configuration */ 266/* PCFG : Pin Configuration */
257#define TX4927_PCFG_PCICLKEN_ALL 0x003f0000 267#define TX4927_PCFG_PCICLKEN_ALL 0x003f0000
258#define TX4927_PCFG_PCICLKEN(ch) (0x00010000<<(ch)) 268#define TX4927_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 41bb96bb2120..91d813a37823 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -202,49 +202,49 @@ struct __large_struct { unsigned long buf[100]; };
202 * Yuck. We need two variants, one for 64bit operation and one 202 * Yuck. We need two variants, one for 64bit operation and one
203 * for 32 bit mode and old iron. 203 * for 32 bit mode and old iron.
204 */ 204 */
205#ifdef __mips64 205#ifdef CONFIG_32BIT
206#define __GET_USER_DW(ptr) __get_user_asm("ld", ptr) 206#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
207#else 207#endif
208#define __GET_USER_DW(ptr) __get_user_asm_ll32(ptr) 208#ifdef CONFIG_64BIT
209#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
209#endif 210#endif
210 211
211#define __get_user_nocheck(x,ptr,size) \ 212extern void __get_user_unknown(void);
212({ \ 213
213 __typeof(*(ptr)) __gu_val = (__typeof(*(ptr))) 0; \ 214#define __get_user_common(val, size, ptr) \
214 long __gu_err = 0; \ 215do { \
215 \
216 switch (size) { \ 216 switch (size) { \
217 case 1: __get_user_asm("lb", ptr); break; \ 217 case 1: __get_user_asm(val, "lb", ptr); break; \
218 case 2: __get_user_asm("lh", ptr); break; \ 218 case 2: __get_user_asm(val, "lh", ptr); break; \
219 case 4: __get_user_asm("lw", ptr); break; \ 219 case 4: __get_user_asm(val, "lw", ptr); break; \
220 case 8: __GET_USER_DW(ptr); break; \ 220 case 8: __GET_USER_DW(val, ptr); break; \
221 default: __get_user_unknown(); break; \ 221 default: __get_user_unknown(); break; \
222 } \ 222 } \
223 (x) = (__typeof__(*(ptr))) __gu_val; \ 223} while (0)
224
225#define __get_user_nocheck(x,ptr,size) \
226({ \
227 long __gu_err; \
228 \
229 __get_user_common((x), size, ptr); \
224 __gu_err; \ 230 __gu_err; \
225}) 231})
226 232
227#define __get_user_check(x,ptr,size) \ 233#define __get_user_check(x,ptr,size) \
228({ \ 234({ \
229 const __typeof__(*(ptr)) __user * __gu_addr = (ptr); \
230 __typeof__(*(ptr)) __gu_val = 0; \
231 long __gu_err = -EFAULT; \ 235 long __gu_err = -EFAULT; \
236 const void __user * __gu_ptr = (ptr); \
237 \
238 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
239 __get_user_common((x), size, __gu_ptr); \
232 \ 240 \
233 if (likely(access_ok(VERIFY_READ, __gu_addr, size))) { \
234 switch (size) { \
235 case 1: __get_user_asm("lb", __gu_addr); break; \
236 case 2: __get_user_asm("lh", __gu_addr); break; \
237 case 4: __get_user_asm("lw", __gu_addr); break; \
238 case 8: __GET_USER_DW(__gu_addr); break; \
239 default: __get_user_unknown(); break; \
240 } \
241 } \
242 (x) = (__typeof__(*(ptr))) __gu_val; \
243 __gu_err; \ 241 __gu_err; \
244}) 242})
245 243
246#define __get_user_asm(insn, addr) \ 244#define __get_user_asm(val, insn, addr) \
247{ \ 245{ \
246 long __gu_tmp; \
247 \
248 __asm__ __volatile__( \ 248 __asm__ __volatile__( \
249 "1: " insn " %1, %3 \n" \ 249 "1: " insn " %1, %3 \n" \
250 "2: \n" \ 250 "2: \n" \
@@ -255,14 +255,16 @@ struct __large_struct { unsigned long buf[100]; };
255 " .section __ex_table,\"a\" \n" \ 255 " .section __ex_table,\"a\" \n" \
256 " "__UA_ADDR "\t1b, 3b \n" \ 256 " "__UA_ADDR "\t1b, 3b \n" \
257 " .previous \n" \ 257 " .previous \n" \
258 : "=r" (__gu_err), "=r" (__gu_val) \ 258 : "=r" (__gu_err), "=r" (__gu_tmp) \
259 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 259 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
260 \
261 (val) = (__typeof__(val)) __gu_tmp; \
260} 262}
261 263
262/* 264/*
263 * Get a long long 64 using 32 bit registers. 265 * Get a long long 64 using 32 bit registers.
264 */ 266 */
265#define __get_user_asm_ll32(addr) \ 267#define __get_user_asm_ll32(val, addr) \
266{ \ 268{ \
267 __asm__ __volatile__( \ 269 __asm__ __volatile__( \
268 "1: lw %1, (%3) \n" \ 270 "1: lw %1, (%3) \n" \
@@ -278,21 +280,20 @@ struct __large_struct { unsigned long buf[100]; };
278 " " __UA_ADDR " 1b, 4b \n" \ 280 " " __UA_ADDR " 1b, 4b \n" \
279 " " __UA_ADDR " 2b, 4b \n" \ 281 " " __UA_ADDR " 2b, 4b \n" \
280 " .previous \n" \ 282 " .previous \n" \
281 : "=r" (__gu_err), "=&r" (__gu_val) \ 283 : "=r" (__gu_err), "=&r" (val) \
282 : "0" (0), "r" (addr), "i" (-EFAULT)); \ 284 : "0" (0), "r" (addr), "i" (-EFAULT)); \
283} 285}
284 286
285extern void __get_user_unknown(void);
286
287/* 287/*
288 * Yuck. We need two variants, one for 64bit operation and one 288 * Yuck. We need two variants, one for 64bit operation and one
289 * for 32 bit mode and old iron. 289 * for 32 bit mode and old iron.
290 */ 290 */
291#ifdef __mips64 291#ifdef CONFIG_32BIT
292#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
293#else
294#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) 292#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
295#endif 293#endif
294#ifdef CONFIG_64BIT
295#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
296#endif
296 297
297#define __put_user_nocheck(x,ptr,size) \ 298#define __put_user_nocheck(x,ptr,size) \
298({ \ 299({ \
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index 89ea8b60e945..e7ff9b187783 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -307,17 +307,33 @@
307#define __NR_inotify_init (__NR_Linux + 284) 307#define __NR_inotify_init (__NR_Linux + 284)
308#define __NR_inotify_add_watch (__NR_Linux + 285) 308#define __NR_inotify_add_watch (__NR_Linux + 285)
309#define __NR_inotify_rm_watch (__NR_Linux + 286) 309#define __NR_inotify_rm_watch (__NR_Linux + 286)
310 310#define __NR_migrate_pages (__NR_Linux + 287)
311#define __NR_openat (__NR_Linux + 288)
312#define __NR_mkdirat (__NR_Linux + 289)
313#define __NR_mknodat (__NR_Linux + 290)
314#define __NR_fchownat (__NR_Linux + 291)
315#define __NR_futimesat (__NR_Linux + 292)
316#define __NR_newfstatat (__NR_Linux + 293)
317#define __NR_unlinkat (__NR_Linux + 294)
318#define __NR_renameat (__NR_Linux + 295)
319#define __NR_linkat (__NR_Linux + 296)
320#define __NR_symlinkat (__NR_Linux + 297)
321#define __NR_readlinkat (__NR_Linux + 298)
322#define __NR_fchmodat (__NR_Linux + 299)
323#define __NR_faccessat (__NR_Linux + 300)
324#define __NR_pselect6 (__NR_Linux + 301)
325#define __NR_ppoll (__NR_Linux + 302)
326#define __NR_unshare (__NR_Linux + 303)
311 327
312/* 328/*
313 * Offset of the last Linux o32 flavoured syscall 329 * Offset of the last Linux o32 flavoured syscall
314 */ 330 */
315#define __NR_Linux_syscalls 286 331#define __NR_Linux_syscalls 303
316 332
317#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 333#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
318 334
319#define __NR_O32_Linux 4000 335#define __NR_O32_Linux 4000
320#define __NR_O32_Linux_syscalls 283 336#define __NR_O32_Linux_syscalls 303
321 337
322#if _MIPS_SIM == _MIPS_SIM_ABI64 338#if _MIPS_SIM == _MIPS_SIM_ABI64
323 339
@@ -571,16 +587,33 @@
571#define __NR_inotify_init (__NR_Linux + 243) 587#define __NR_inotify_init (__NR_Linux + 243)
572#define __NR_inotify_add_watch (__NR_Linux + 244) 588#define __NR_inotify_add_watch (__NR_Linux + 244)
573#define __NR_inotify_rm_watch (__NR_Linux + 245) 589#define __NR_inotify_rm_watch (__NR_Linux + 245)
590#define __NR_migrate_pages (__NR_Linux + 246)
591#define __NR_openat (__NR_Linux + 247)
592#define __NR_mkdirat (__NR_Linux + 248)
593#define __NR_mknodat (__NR_Linux + 249)
594#define __NR_fchownat (__NR_Linux + 250)
595#define __NR_futimesat (__NR_Linux + 251)
596#define __NR_newfstatat (__NR_Linux + 252)
597#define __NR_unlinkat (__NR_Linux + 253)
598#define __NR_renameat (__NR_Linux + 254)
599#define __NR_linkat (__NR_Linux + 255)
600#define __NR_symlinkat (__NR_Linux + 256)
601#define __NR_readlinkat (__NR_Linux + 257)
602#define __NR_fchmodat (__NR_Linux + 258)
603#define __NR_faccessat (__NR_Linux + 259)
604#define __NR_pselect6 (__NR_Linux + 260)
605#define __NR_ppoll (__NR_Linux + 261)
606#define __NR_unshare (__NR_Linux + 262)
574 607
575/* 608/*
576 * Offset of the last Linux 64-bit flavoured syscall 609 * Offset of the last Linux 64-bit flavoured syscall
577 */ 610 */
578#define __NR_Linux_syscalls 245 611#define __NR_Linux_syscalls 262
579 612
580#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 613#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
581 614
582#define __NR_64_Linux 5000 615#define __NR_64_Linux 5000
583#define __NR_64_Linux_syscalls 242 616#define __NR_64_Linux_syscalls 262
584 617
585#if _MIPS_SIM == _MIPS_SIM_NABI32 618#if _MIPS_SIM == _MIPS_SIM_NABI32
586 619
@@ -838,16 +871,33 @@
838#define __NR_inotify_init (__NR_Linux + 247) 871#define __NR_inotify_init (__NR_Linux + 247)
839#define __NR_inotify_add_watch (__NR_Linux + 248) 872#define __NR_inotify_add_watch (__NR_Linux + 248)
840#define __NR_inotify_rm_watch (__NR_Linux + 249) 873#define __NR_inotify_rm_watch (__NR_Linux + 249)
874#define __NR_migrate_pages (__NR_Linux + 250)
875#define __NR_openat (__NR_Linux + 251)
876#define __NR_mkdirat (__NR_Linux + 252)
877#define __NR_mknodat (__NR_Linux + 253)
878#define __NR_fchownat (__NR_Linux + 254)
879#define __NR_futimesat (__NR_Linux + 255)
880#define __NR_newfstatat (__NR_Linux + 256)
881#define __NR_unlinkat (__NR_Linux + 257)
882#define __NR_renameat (__NR_Linux + 258)
883#define __NR_linkat (__NR_Linux + 259)
884#define __NR_symlinkat (__NR_Linux + 260)
885#define __NR_readlinkat (__NR_Linux + 261)
886#define __NR_fchmodat (__NR_Linux + 262)
887#define __NR_faccessat (__NR_Linux + 263)
888#define __NR_pselect6 (__NR_Linux + 264)
889#define __NR_ppoll (__NR_Linux + 265)
890#define __NR_unshare (__NR_Linux + 266)
841 891
842/* 892/*
843 * Offset of the last N32 flavoured syscall 893 * Offset of the last N32 flavoured syscall
844 */ 894 */
845#define __NR_Linux_syscalls 249 895#define __NR_Linux_syscalls 266
846 896
847#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 897#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
848 898
849#define __NR_N32_Linux 6000 899#define __NR_N32_Linux 6000
850#define __NR_N32_Linux_syscalls 246 900#define __NR_N32_Linux_syscalls 266
851 901
852#ifndef __ASSEMBLY__ 902#ifndef __ASSEMBLY__
853 903