diff options
Diffstat (limited to 'include')
31 files changed, 711 insertions, 352 deletions
diff --git a/include/asm-blackfin/mach-bf561/cdefBF561.h b/include/asm-blackfin/mach-bf561/cdefBF561.h index 6e87ab269ffe..73d4d65249cd 100644 --- a/include/asm-blackfin/mach-bf561/cdefBF561.h +++ b/include/asm-blackfin/mach-bf561/cdefBF561.h | |||
@@ -83,9 +83,9 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val) | |||
83 | 83 | ||
84 | /* For MMR's that are reserved on Core B, set up defines to better integrate with other ports */ | 84 | /* For MMR's that are reserved on Core B, set up defines to better integrate with other ports */ |
85 | #define bfin_read_SWRST() bfin_read_SICA_SWRST() | 85 | #define bfin_read_SWRST() bfin_read_SICA_SWRST() |
86 | #define bfin_write_SWRST() bfin_write_SICA_SWRST() | 86 | #define bfin_write_SWRST(val) bfin_write_SICA_SWRST(val) |
87 | #define bfin_read_SYSCR() bfin_read_SICA_SYSCR() | 87 | #define bfin_read_SYSCR() bfin_read_SICA_SYSCR() |
88 | #define bfin_write_SYSCR() bfin_write_SICA_SYSCR() | 88 | #define bfin_write_SYSCR(val) bfin_write_SICA_SYSCR(val) |
89 | 89 | ||
90 | /* System Reset and Interrupt Controller registers for core A (0xFFC0 0100-0xFFC0 01FF) */ | 90 | /* System Reset and Interrupt Controller registers for core A (0xFFC0 0100-0xFFC0 01FF) */ |
91 | #define bfin_read_SICA_SWRST() bfin_read16(SICA_SWRST) | 91 | #define bfin_read_SICA_SWRST() bfin_read16(SICA_SWRST) |
diff --git a/include/asm-blackfin/string.h b/include/asm-blackfin/string.h index 6f1eb7d6d3cb..e8ada91ab002 100644 --- a/include/asm-blackfin/string.h +++ b/include/asm-blackfin/string.h | |||
@@ -9,13 +9,16 @@ extern inline char *strcpy(char *dest, const char *src) | |||
9 | char *xdest = dest; | 9 | char *xdest = dest; |
10 | char temp = 0; | 10 | char temp = 0; |
11 | 11 | ||
12 | __asm__ __volatile__ | 12 | __asm__ __volatile__ ( |
13 | ("1:\t%2 = B [%1++] (Z);\n\t" | 13 | "1:" |
14 | "B [%0++] = %2;\n\t" | 14 | "%2 = B [%1++] (Z);" |
15 | "CC = %2;\n\t" | 15 | "B [%0++] = %2;" |
16 | "if cc jump 1b (bp);\n" | 16 | "CC = %2;" |
17 | : "+&a" (dest), "+&a" (src), "=&d" (temp) | 17 | "if cc jump 1b (bp);" |
18 | ::"memory", "CC"); | 18 | : "+&a" (dest), "+&a" (src), "=&d" (temp) |
19 | : | ||
20 | : "memory", "CC"); | ||
21 | |||
19 | return xdest; | 22 | return xdest; |
20 | } | 23 | } |
21 | 24 | ||
@@ -28,37 +31,56 @@ extern inline char *strncpy(char *dest, const char *src, size_t n) | |||
28 | if (n == 0) | 31 | if (n == 0) |
29 | return xdest; | 32 | return xdest; |
30 | 33 | ||
31 | __asm__ __volatile__ | 34 | __asm__ __volatile__ ( |
32 | ("1:\t%3 = B [%1++] (Z);\n\t" | 35 | "1:" |
33 | "B [%0++] = %3;\n\t" | 36 | "%3 = B [%1++] (Z);" |
34 | "CC = %3;\n\t" | 37 | "B [%0++] = %3;" |
35 | "if ! cc jump 2f;\n\t" | 38 | "CC = %3;" |
36 | "%2 += -1;\n\t" | 39 | "if ! cc jump 2f;" |
37 | "CC = %2 == 0;\n\t" | 40 | "%2 += -1;" |
38 | "if ! cc jump 1b (bp);\n" | 41 | "CC = %2 == 0;" |
39 | "2:\n" | 42 | "if ! cc jump 1b (bp);" |
40 | : "+&a" (dest), "+&a" (src), "+&da" (n), "=&d" (temp) | 43 | "jump 4f;" |
41 | ::"memory", "CC"); | 44 | "2:" |
45 | /* if src is shorter than n, we need to null pad bytes now */ | ||
46 | "%3 = 0;" | ||
47 | "3:" | ||
48 | "%2 += -1;" | ||
49 | "CC = %2 == 0;" | ||
50 | "if cc jump 4f;" | ||
51 | "B [%0++] = %3;" | ||
52 | "jump 3b;" | ||
53 | "4:" | ||
54 | : "+&a" (dest), "+&a" (src), "+&da" (n), "=&d" (temp) | ||
55 | : | ||
56 | : "memory", "CC"); | ||
57 | |||
42 | return xdest; | 58 | return xdest; |
43 | } | 59 | } |
44 | 60 | ||
45 | #define __HAVE_ARCH_STRCMP | 61 | #define __HAVE_ARCH_STRCMP |
46 | extern inline int strcmp(const char *cs, const char *ct) | 62 | extern inline int strcmp(const char *cs, const char *ct) |
47 | { | 63 | { |
48 | char __res1, __res2; | 64 | /* need to use int's here so the char's in the assembly don't get |
49 | 65 | * sign extended incorrectly when we don't want them to be | |
50 | __asm__ | 66 | */ |
51 | ("1:\t%2 = B[%0++] (Z);\n\t" /* get *cs */ | 67 | int __res1, __res2; |
52 | "%3 = B[%1++] (Z);\n\t" /* get *ct */ | 68 | |
53 | "CC = %2 == %3;\n\t" /* compare a byte */ | 69 | __asm__ __volatile__ ( |
54 | "if ! cc jump 2f;\n\t" /* not equal, break out */ | 70 | "1:" |
55 | "CC = %2;\n\t" /* at end of cs? */ | 71 | "%2 = B[%0++] (Z);" /* get *cs */ |
56 | "if cc jump 1b (bp);\n\t" /* no, keep going */ | 72 | "%3 = B[%1++] (Z);" /* get *ct */ |
57 | "jump.s 3f;\n" /* strings are equal */ | 73 | "CC = %2 == %3;" /* compare a byte */ |
58 | "2:\t%2 = %2 - %3;\n" /* *cs - *ct */ | 74 | "if ! cc jump 2f;" /* not equal, break out */ |
59 | "3:\n" | 75 | "CC = %2;" /* at end of cs? */ |
60 | : "+&a" (cs), "+&a" (ct), "=&d" (__res1), "=&d" (__res2) | 76 | "if cc jump 1b (bp);" /* no, keep going */ |
61 | : : "CC"); | 77 | "jump.s 3f;" /* strings are equal */ |
78 | "2:" | ||
79 | "%2 = %2 - %3;" /* *cs - *ct */ | ||
80 | "3:" | ||
81 | : "+&a" (cs), "+&a" (ct), "=&d" (__res1), "=&d" (__res2) | ||
82 | : | ||
83 | : "memory", "CC"); | ||
62 | 84 | ||
63 | return __res1; | 85 | return __res1; |
64 | } | 86 | } |
@@ -66,26 +88,35 @@ extern inline int strcmp(const char *cs, const char *ct) | |||
66 | #define __HAVE_ARCH_STRNCMP | 88 | #define __HAVE_ARCH_STRNCMP |
67 | extern inline int strncmp(const char *cs, const char *ct, size_t count) | 89 | extern inline int strncmp(const char *cs, const char *ct, size_t count) |
68 | { | 90 | { |
69 | char __res1, __res2; | 91 | /* need to use int's here so the char's in the assembly don't get |
92 | * sign extended incorrectly when we don't want them to be | ||
93 | */ | ||
94 | int __res1, __res2; | ||
70 | 95 | ||
71 | if (!count) | 96 | if (!count) |
72 | return 0; | 97 | return 0; |
73 | __asm__ | 98 | |
74 | ("1:\t%3 = B[%0++] (Z);\n\t" /* get *cs */ | 99 | __asm__ __volatile__ ( |
75 | "%4 = B[%1++] (Z);\n\t" /* get *ct */ | 100 | "1:" |
76 | "CC = %3 == %4;\n\t" /* compare a byte */ | 101 | "%3 = B[%0++] (Z);" /* get *cs */ |
77 | "if ! cc jump 3f;\n\t" /* not equal, break out */ | 102 | "%4 = B[%1++] (Z);" /* get *ct */ |
78 | "CC = %3;\n\t" /* at end of cs? */ | 103 | "CC = %3 == %4;" /* compare a byte */ |
79 | "if ! cc jump 4f;\n\t" /* yes, all done */ | 104 | "if ! cc jump 3f;" /* not equal, break out */ |
80 | "%2 += -1;\n\t" /* no, adjust count */ | 105 | "CC = %3;" /* at end of cs? */ |
81 | "CC = %2 == 0;\n\t" | 106 | "if ! cc jump 4f;" /* yes, all done */ |
82 | "if ! cc jump 1b;\n" /* more to do, keep going */ | 107 | "%2 += -1;" /* no, adjust count */ |
83 | "2:\t%3 = 0;\n\t" /* strings are equal */ | 108 | "CC = %2 == 0;" |
84 | "jump.s 4f;\n" | 109 | "if ! cc jump 1b;" /* more to do, keep going */ |
85 | "3:\t%3 = %3 - %4;\n" /* *cs - *ct */ | 110 | "2:" |
86 | "4:" | 111 | "%3 = 0;" /* strings are equal */ |
87 | : "+&a" (cs), "+&a" (ct), "+&da" (count), "=&d" (__res1), "=&d" (__res2) | 112 | "jump.s 4f;" |
88 | : : "CC"); | 113 | "3:" |
114 | "%3 = %3 - %4;" /* *cs - *ct */ | ||
115 | "4:" | ||
116 | : "+&a" (cs), "+&a" (ct), "+&da" (count), "=&d" (__res1), "=&d" (__res2) | ||
117 | : | ||
118 | : "memory", "CC"); | ||
119 | |||
89 | return __res1; | 120 | return __res1; |
90 | } | 121 | } |
91 | 122 | ||
diff --git a/include/asm-mips/compiler.h b/include/asm-mips/compiler.h index 169ae26105e9..aa6b876bbd78 100644 --- a/include/asm-mips/compiler.h +++ b/include/asm-mips/compiler.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004 Maciej W. Rozycki | 2 | * Copyright (C) 2004, 2007 Maciej W. Rozycki |
3 | * | 3 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 4 | * This file is subject to the terms and conditions of the GNU General Public |
5 | * License. See the file "COPYING" in the main directory of this archive | 5 | * License. See the file "COPYING" in the main directory of this archive |
@@ -9,8 +9,10 @@ | |||
9 | #define _ASM_COMPILER_H | 9 | #define _ASM_COMPILER_H |
10 | 10 | ||
11 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) | 11 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) |
12 | #define GCC_IMM_ASM "n" | ||
12 | #define GCC_REG_ACCUM "$0" | 13 | #define GCC_REG_ACCUM "$0" |
13 | #else | 14 | #else |
15 | #define GCC_IMM_ASM "rn" | ||
14 | #define GCC_REG_ACCUM "accum" | 16 | #define GCC_REG_ACCUM "accum" |
15 | #endif | 17 | #endif |
16 | 18 | ||
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h index 2b928577be5d..a77128362a7d 100644 --- a/include/asm-mips/mach-generic/ide.h +++ b/include/asm-mips/mach-generic/ide.h | |||
@@ -29,6 +29,35 @@ | |||
29 | 29 | ||
30 | #define IDE_ARCH_OBSOLETE_DEFAULTS | 30 | #define IDE_ARCH_OBSOLETE_DEFAULTS |
31 | 31 | ||
32 | static __inline__ int ide_probe_legacy(void) | ||
33 | { | ||
34 | #ifdef CONFIG_PCI | ||
35 | struct pci_dev *dev; | ||
36 | /* | ||
37 | * This can be called on the ide_setup() path, super-early in | ||
38 | * boot. But the down_read() will enable local interrupts, | ||
39 | * which can cause some machines to crash. So here we detect | ||
40 | * and flag that situation and bail out early. | ||
41 | */ | ||
42 | if (no_pci_devices()) | ||
43 | return 0; | ||
44 | dev = pci_get_class(PCI_CLASS_BRIDGE_EISA << 8, NULL); | ||
45 | if (dev) | ||
46 | goto found; | ||
47 | dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); | ||
48 | if (dev) | ||
49 | goto found; | ||
50 | return 0; | ||
51 | found: | ||
52 | pci_dev_put(dev); | ||
53 | return 1; | ||
54 | #elif defined(CONFIG_EISA) || defined(CONFIG_ISA) | ||
55 | return 1; | ||
56 | #else | ||
57 | return 0; | ||
58 | #endif | ||
59 | } | ||
60 | |||
32 | static __inline__ int ide_default_irq(unsigned long base) | 61 | static __inline__ int ide_default_irq(unsigned long base) |
33 | { | 62 | { |
34 | switch (base) { | 63 | switch (base) { |
@@ -45,6 +74,8 @@ static __inline__ int ide_default_irq(unsigned long base) | |||
45 | 74 | ||
46 | static __inline__ unsigned long ide_default_io_base(int index) | 75 | static __inline__ unsigned long ide_default_io_base(int index) |
47 | { | 76 | { |
77 | if (!ide_probe_legacy()) | ||
78 | return 0; | ||
48 | /* | 79 | /* |
49 | * If PCI is present then it is not safe to poke around | 80 | * If PCI is present then it is not safe to poke around |
50 | * the other legacy IDE ports. Only 0x1f0 and 0x170 are | 81 | * the other legacy IDE ports. Only 0x1f0 and 0x170 are |
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index fdc271ebe41c..fa331dad97cb 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h | |||
@@ -149,6 +149,11 @@ static inline u64 get_tb(void) | |||
149 | } | 149 | } |
150 | #endif /* !CONFIG_PPC64 */ | 150 | #endif /* !CONFIG_PPC64 */ |
151 | 151 | ||
152 | static inline u64 get_tb_or_rtc(void) | ||
153 | { | ||
154 | return __USE_RTC() ? get_rtc() : get_tb(); | ||
155 | } | ||
156 | |||
152 | static inline void set_tb(unsigned int upper, unsigned int lower) | 157 | static inline void set_tb(unsigned int upper, unsigned int lower) |
153 | { | 158 | { |
154 | mtspr(SPRN_TBWL, 0); | 159 | mtspr(SPRN_TBWL, 0); |
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 86dc5c018a19..55c5bb27e4da 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h | |||
@@ -297,11 +297,7 @@ extern void prom_sun4v_guest_soft_state(void); | |||
297 | extern int prom_ihandle2path(int handle, char *buffer, int bufsize); | 297 | extern int prom_ihandle2path(int handle, char *buffer, int bufsize); |
298 | 298 | ||
299 | /* Client interface level routines. */ | 299 | /* Client interface level routines. */ |
300 | extern void prom_set_trap_table(unsigned long tba); | ||
301 | extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa); | ||
302 | |||
303 | extern long p1275_cmd(const char *, long, ...); | 300 | extern long p1275_cmd(const char *, long, ...); |
304 | |||
305 | 301 | ||
306 | #if 0 | 302 | #if 0 |
307 | #define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x)) | 303 | #define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x)) |
diff --git a/include/asm-xtensa/bugs.h b/include/asm-xtensa/bugs.h index c42285320133..69b29d198249 100644 --- a/include/asm-xtensa/bugs.h +++ b/include/asm-xtensa/bugs.h | |||
@@ -13,10 +13,6 @@ | |||
13 | #ifndef _XTENSA_BUGS_H | 13 | #ifndef _XTENSA_BUGS_H |
14 | #define _XTENSA_BUGS_H | 14 | #define _XTENSA_BUGS_H |
15 | 15 | ||
16 | #include <asm/processor.h> | 16 | static void check_bugs(void) { } |
17 | |||
18 | static void __init check_bugs(void) | ||
19 | { | ||
20 | } | ||
21 | 17 | ||
22 | #endif /* _XTENSA_BUGS_H */ | 18 | #endif /* _XTENSA_BUGS_H */ |
diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h index 1c4a78f29ae2..3bba2a540cf0 100644 --- a/include/asm-xtensa/cache.h +++ b/include/asm-xtensa/cache.h | |||
@@ -19,6 +19,15 @@ | |||
19 | 19 | ||
20 | #define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS) | 20 | #define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS) |
21 | #define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS) | 21 | #define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS) |
22 | #define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH) | ||
23 | #define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH) | ||
24 | |||
25 | /* Maximum cache size per way. */ | ||
26 | #if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE | ||
27 | # define CACHE_WAY_SIZE DCACHE_WAY_SIZE | ||
28 | #else | ||
29 | # define CACHE_WAY_SIZE ICACHE_WAY_SIZE | ||
30 | #endif | ||
22 | 31 | ||
23 | 32 | ||
24 | #endif /* _XTENSA_CACHE_H */ | 33 | #endif /* _XTENSA_CACHE_H */ |
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h index 22ef901b7845..b773c57e75a5 100644 --- a/include/asm-xtensa/cacheflush.h +++ b/include/asm-xtensa/cacheflush.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * License. See the file "COPYING" in the main directory of this archive | 5 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 6 | * for more details. |
7 | * | 7 | * |
8 | * (C) 2001 - 2006 Tensilica Inc. | 8 | * (C) 2001 - 2007 Tensilica Inc. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _XTENSA_CACHEFLUSH_H | 11 | #ifndef _XTENSA_CACHEFLUSH_H |
@@ -18,10 +18,7 @@ | |||
18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * flush and invalidate data cache, invalidate instruction cache: | 21 | * Lo-level routines for cache flushing. |
22 | * | ||
23 | * __flush_invalidate_cache_all() | ||
24 | * __flush_invalidate_cache_range(from,sze) | ||
25 | * | 22 | * |
26 | * invalidate data or instruction cache: | 23 | * invalidate data or instruction cache: |
27 | * | 24 | * |
@@ -40,26 +37,39 @@ | |||
40 | * __flush_invalidate_dcache_all() | 37 | * __flush_invalidate_dcache_all() |
41 | * __flush_invalidate_dcache_page(adr) | 38 | * __flush_invalidate_dcache_page(adr) |
42 | * __flush_invalidate_dcache_range(from,size) | 39 | * __flush_invalidate_dcache_range(from,size) |
40 | * | ||
41 | * specials for cache aliasing: | ||
42 | * | ||
43 | * __flush_invalidate_dcache_page_alias(vaddr,paddr) | ||
44 | * __invalidate_icache_page_alias(vaddr,paddr) | ||
43 | */ | 45 | */ |
44 | 46 | ||
45 | extern void __flush_invalidate_cache_all(void); | 47 | extern void __invalidate_dcache_all(void); |
46 | extern void __flush_invalidate_cache_range(unsigned long, unsigned long); | ||
47 | extern void __flush_invalidate_dcache_all(void); | ||
48 | extern void __invalidate_icache_all(void); | 48 | extern void __invalidate_icache_all(void); |
49 | |||
50 | extern void __invalidate_dcache_page(unsigned long); | 49 | extern void __invalidate_dcache_page(unsigned long); |
51 | extern void __invalidate_icache_page(unsigned long); | 50 | extern void __invalidate_icache_page(unsigned long); |
52 | extern void __invalidate_icache_range(unsigned long, unsigned long); | 51 | extern void __invalidate_icache_range(unsigned long, unsigned long); |
53 | extern void __invalidate_dcache_range(unsigned long, unsigned long); | 52 | extern void __invalidate_dcache_range(unsigned long, unsigned long); |
54 | 53 | ||
54 | |||
55 | #if XCHAL_DCACHE_IS_WRITEBACK | 55 | #if XCHAL_DCACHE_IS_WRITEBACK |
56 | extern void __flush_invalidate_dcache_all(void); | ||
56 | extern void __flush_dcache_page(unsigned long); | 57 | extern void __flush_dcache_page(unsigned long); |
58 | extern void __flush_dcache_range(unsigned long, unsigned long); | ||
57 | extern void __flush_invalidate_dcache_page(unsigned long); | 59 | extern void __flush_invalidate_dcache_page(unsigned long); |
58 | extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); | 60 | extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); |
59 | #else | 61 | #else |
60 | # define __flush_dcache_page(p) do { } while(0) | 62 | # define __flush_dcache_range(p,s) do { } while(0) |
61 | # define __flush_invalidate_dcache_page(p) do { } while(0) | 63 | # define __flush_dcache_page(p) do { } while(0) |
62 | # define __flush_invalidate_dcache_range(p,s) do { } while(0) | 64 | # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) |
65 | # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s) | ||
66 | #endif | ||
67 | |||
68 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | ||
69 | extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); | ||
70 | #endif | ||
71 | #if (ICACHE_WAY_SIZE > PAGE_SIZE) | ||
72 | extern void __invalidate_icache_page_alias(unsigned long, unsigned long); | ||
63 | #endif | 73 | #endif |
64 | 74 | ||
65 | /* | 75 | /* |
@@ -71,17 +81,21 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); | |||
71 | * (see also Documentation/cachetlb.txt) | 81 | * (see also Documentation/cachetlb.txt) |
72 | */ | 82 | */ |
73 | 83 | ||
74 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 84 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) |
75 | 85 | ||
76 | #define flush_cache_all() __flush_invalidate_cache_all(); | 86 | #define flush_cache_all() \ |
77 | #define flush_cache_mm(mm) __flush_invalidate_cache_all(); | 87 | do { \ |
78 | #define flush_cache_dup_mm(mm) __flush_invalidate_cache_all(); | 88 | __flush_invalidate_dcache_all(); \ |
89 | __invalidate_icache_all(); \ | ||
90 | } while (0) | ||
79 | 91 | ||
80 | #define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); | 92 | #define flush_cache_mm(mm) flush_cache_all() |
81 | #define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); | 93 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
82 | 94 | ||
83 | extern void flush_dcache_page(struct page*); | 95 | #define flush_cache_vmap(start,end) flush_cache_all() |
96 | #define flush_cache_vunmap(start,end) flush_cache_all() | ||
84 | 97 | ||
98 | extern void flush_dcache_page(struct page*); | ||
85 | extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); | 99 | extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); |
86 | extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); | 100 | extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); |
87 | 101 | ||
@@ -101,24 +115,39 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon | |||
101 | 115 | ||
102 | #endif | 116 | #endif |
103 | 117 | ||
118 | /* Ensure consistency between data and instruction cache. */ | ||
104 | #define flush_icache_range(start,end) \ | 119 | #define flush_icache_range(start,end) \ |
105 | __invalidate_icache_range(start,(end)-(start)) | 120 | do { \ |
121 | __flush_dcache_range(start, (end) - (start)); \ | ||
122 | __invalidate_icache_range(start,(end) - (start)); \ | ||
123 | } while (0) | ||
106 | 124 | ||
107 | /* This is not required, see Documentation/cachetlb.txt */ | 125 | /* This is not required, see Documentation/cachetlb.txt */ |
108 | 126 | #define flush_icache_page(vma,page) do { } while (0) | |
109 | #define flush_icache_page(vma,page) do { } while(0) | ||
110 | 127 | ||
111 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 128 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
112 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 129 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
113 | 130 | ||
131 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | ||
114 | 132 | ||
115 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 133 | extern void copy_to_user_page(struct vm_area_struct*, struct page*, |
116 | memcpy(dst, src, len) | 134 | unsigned long, void*, const void*, unsigned long); |
135 | extern void copy_from_user_page(struct vm_area_struct*, struct page*, | ||
136 | unsigned long, void*, const void*, unsigned long); | ||
137 | |||
138 | #else | ||
139 | |||
140 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
141 | do { \ | ||
142 | memcpy(dst, src, len); \ | ||
143 | __flush_dcache_range((unsigned long) dst, len); \ | ||
144 | __invalidate_icache_range((unsigned long) dst, len); \ | ||
145 | } while (0) | ||
117 | 146 | ||
118 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 147 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
119 | memcpy(dst, src, len) | 148 | memcpy(dst, src, len) |
120 | 149 | ||
121 | #endif /* __KERNEL__ */ | 150 | #endif |
122 | 151 | ||
152 | #endif /* __KERNEL__ */ | ||
123 | #endif /* _XTENSA_CACHEFLUSH_H */ | 153 | #endif /* _XTENSA_CACHEFLUSH_H */ |
124 | |||
diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h index 1569b53cec91..7083d46766a8 100644 --- a/include/asm-xtensa/elf.h +++ b/include/asm-xtensa/elf.h | |||
@@ -20,6 +20,56 @@ | |||
20 | #define EM_XTENSA 94 | 20 | #define EM_XTENSA 94 |
21 | #define EM_XTENSA_OLD 0xABC7 | 21 | #define EM_XTENSA_OLD 0xABC7 |
22 | 22 | ||
23 | /* Xtensa relocations defined by the ABIs */ | ||
24 | |||
25 | #define R_XTENSA_NONE 0 | ||
26 | #define R_XTENSA_32 1 | ||
27 | #define R_XTENSA_RTLD 2 | ||
28 | #define R_XTENSA_GLOB_DAT 3 | ||
29 | #define R_XTENSA_JMP_SLOT 4 | ||
30 | #define R_XTENSA_RELATIVE 5 | ||
31 | #define R_XTENSA_PLT 6 | ||
32 | #define R_XTENSA_OP0 8 | ||
33 | #define R_XTENSA_OP1 9 | ||
34 | #define R_XTENSA_OP2 10 | ||
35 | #define R_XTENSA_ASM_EXPAND 11 | ||
36 | #define R_XTENSA_ASM_SIMPLIFY 12 | ||
37 | #define R_XTENSA_GNU_VTINHERIT 15 | ||
38 | #define R_XTENSA_GNU_VTENTRY 16 | ||
39 | #define R_XTENSA_DIFF8 17 | ||
40 | #define R_XTENSA_DIFF16 18 | ||
41 | #define R_XTENSA_DIFF32 19 | ||
42 | #define R_XTENSA_SLOT0_OP 20 | ||
43 | #define R_XTENSA_SLOT1_OP 21 | ||
44 | #define R_XTENSA_SLOT2_OP 22 | ||
45 | #define R_XTENSA_SLOT3_OP 23 | ||
46 | #define R_XTENSA_SLOT4_OP 24 | ||
47 | #define R_XTENSA_SLOT5_OP 25 | ||
48 | #define R_XTENSA_SLOT6_OP 26 | ||
49 | #define R_XTENSA_SLOT7_OP 27 | ||
50 | #define R_XTENSA_SLOT8_OP 28 | ||
51 | #define R_XTENSA_SLOT9_OP 29 | ||
52 | #define R_XTENSA_SLOT10_OP 30 | ||
53 | #define R_XTENSA_SLOT11_OP 31 | ||
54 | #define R_XTENSA_SLOT12_OP 32 | ||
55 | #define R_XTENSA_SLOT13_OP 33 | ||
56 | #define R_XTENSA_SLOT14_OP 34 | ||
57 | #define R_XTENSA_SLOT0_ALT 35 | ||
58 | #define R_XTENSA_SLOT1_ALT 36 | ||
59 | #define R_XTENSA_SLOT2_ALT 37 | ||
60 | #define R_XTENSA_SLOT3_ALT 38 | ||
61 | #define R_XTENSA_SLOT4_ALT 39 | ||
62 | #define R_XTENSA_SLOT5_ALT 40 | ||
63 | #define R_XTENSA_SLOT6_ALT 41 | ||
64 | #define R_XTENSA_SLOT7_ALT 42 | ||
65 | #define R_XTENSA_SLOT8_ALT 43 | ||
66 | #define R_XTENSA_SLOT9_ALT 44 | ||
67 | #define R_XTENSA_SLOT10_ALT 45 | ||
68 | #define R_XTENSA_SLOT11_ALT 46 | ||
69 | #define R_XTENSA_SLOT12_ALT 47 | ||
70 | #define R_XTENSA_SLOT13_ALT 48 | ||
71 | #define R_XTENSA_SLOT14_ALT 49 | ||
72 | |||
23 | /* ELF register definitions. This is needed for core dump support. */ | 73 | /* ELF register definitions. This is needed for core dump support. */ |
24 | 74 | ||
25 | /* | 75 | /* |
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h index 0faa614d9696..47c3616ea9ac 100644 --- a/include/asm-xtensa/io.h +++ b/include/asm-xtensa/io.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | #include <linux/kernel.h> | ||
17 | 18 | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | 20 | ||
diff --git a/include/asm-xtensa/ioctls.h b/include/asm-xtensa/ioctls.h index 39e6f23921bb..0ffa942954b9 100644 --- a/include/asm-xtensa/ioctls.h +++ b/include/asm-xtensa/ioctls.h | |||
@@ -91,6 +91,10 @@ | |||
91 | #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ | 91 | #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ |
92 | #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ | 92 | #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ |
93 | #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ | 93 | #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ |
94 | #define TCGETS2 _IOR('T', 42, struct termios2) | ||
95 | #define TCSETS2 _IOW('T', 43, struct termios2) | ||
96 | #define TCSETSW2 _IOW('T', 44, struct termios2) | ||
97 | #define TCSETSF2 _IOW('T', 45, struct termios2) | ||
94 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 98 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ |
95 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 99 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
96 | 100 | ||
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h index 1213cde75438..55ce2c9749a3 100644 --- a/include/asm-xtensa/page.h +++ b/include/asm-xtensa/page.h | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/asm-xtensa/page.h | 2 | * include/asm-xtensa/page.h |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version2 as | 5 | * it under the terms of the GNU General Public License version2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 8 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _XTENSA_PAGE_H | 11 | #ifndef _XTENSA_PAGE_H |
@@ -14,6 +14,12 @@ | |||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | 15 | ||
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | #include <asm/types.h> | ||
18 | #include <asm/cache.h> | ||
19 | |||
20 | /* | ||
21 | * Fixed TLB translations in the processor. | ||
22 | */ | ||
17 | 23 | ||
18 | #define XCHAL_KSEG_CACHED_VADDR 0xd0000000 | 24 | #define XCHAL_KSEG_CACHED_VADDR 0xd0000000 |
19 | #define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 | 25 | #define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 |
@@ -26,13 +32,60 @@ | |||
26 | */ | 32 | */ |
27 | 33 | ||
28 | #define PAGE_SHIFT 12 | 34 | #define PAGE_SHIFT 12 |
29 | #define PAGE_SIZE (1 << PAGE_SHIFT) | 35 | #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) |
30 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 36 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
31 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) | 37 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) |
32 | 38 | ||
33 | #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR | 39 | #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR |
34 | #define MAX_MEM_PFN XCHAL_KSEG_SIZE | 40 | #define MAX_MEM_PFN XCHAL_KSEG_SIZE |
35 | #define PGTABLE_START 0x80000000 | 41 | #define PGTABLE_START 0x80000000 |
42 | |||
43 | /* | ||
44 | * Cache aliasing: | ||
45 | * | ||
46 | * If the cache size for one way is greater than the page size, we have to | ||
47 | * deal with cache aliasing. The cache index is wider than the page size: | ||
48 | * | ||
49 | * | |cache| cache index | ||
50 | * | pfn |off| virtual address | ||
51 | * |xxxx:X|zzz| | ||
52 | * | : | | | ||
53 | * | \ / | | | ||
54 | * |trans.| | | ||
55 | * | / \ | | | ||
56 | * |yyyy:Y|zzz| physical address | ||
57 | * | ||
58 | * When the page number is translated to the physical page address, the lowest | ||
59 | * bit(s) (X) that are part of the cache index are also translated (Y). | ||
60 | * If this translation changes bit(s) (X), the cache index is also afected, | ||
61 | * thus resulting in a different cache line than before. | ||
62 | * The kernel does not provide a mechanism to ensure that the page color | ||
63 | * (represented by this bit) remains the same when allocated or when pages | ||
64 | * are remapped. When user pages are mapped into kernel space, the color of | ||
65 | * the page might also change. | ||
66 | * | ||
67 | * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 | ||
68 | * to temporarily map a patch so we can match the color. | ||
69 | */ | ||
70 | |||
71 | #if DCACHE_WAY_SIZE > PAGE_SIZE | ||
72 | # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT) | ||
73 | # define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1)) | ||
74 | # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT) | ||
75 | # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) | ||
76 | #else | ||
77 | # define DCACHE_ALIAS_ORDER 0 | ||
78 | #endif | ||
79 | |||
80 | #if ICACHE_WAY_SIZE > PAGE_SIZE | ||
81 | # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) | ||
82 | # define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1)) | ||
83 | # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT) | ||
84 | # define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0) | ||
85 | #else | ||
86 | # define ICACHE_ALIAS_ORDER 0 | ||
87 | #endif | ||
88 | |||
36 | 89 | ||
37 | #ifdef __ASSEMBLY__ | 90 | #ifdef __ASSEMBLY__ |
38 | 91 | ||
@@ -58,34 +111,23 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
58 | 111 | ||
59 | /* | 112 | /* |
60 | * Pure 2^n version of get_order | 113 | * Pure 2^n version of get_order |
114 | * Use 'nsau' instructions if supported by the processor or the generic version. | ||
61 | */ | 115 | */ |
62 | 116 | ||
63 | static inline int get_order(unsigned long size) | 117 | #if XCHAL_HAVE_NSA |
118 | |||
119 | static inline __attribute_const__ int get_order(unsigned long size) | ||
64 | { | 120 | { |
65 | int order; | 121 | int lz; |
66 | #ifndef XCHAL_HAVE_NSU | 122 | asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT)); |
67 | unsigned long x1, x2, x4, x8, x16; | 123 | return 32 - lz; |
68 | |||
69 | size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
70 | x1 = size & 0xAAAAAAAA; | ||
71 | x2 = size & 0xCCCCCCCC; | ||
72 | x4 = size & 0xF0F0F0F0; | ||
73 | x8 = size & 0xFF00FF00; | ||
74 | x16 = size & 0xFFFF0000; | ||
75 | order = x2 ? 2 : 0; | ||
76 | order += (x16 != 0) * 16; | ||
77 | order += (x8 != 0) * 8; | ||
78 | order += (x4 != 0) * 4; | ||
79 | order += (x1 != 0); | ||
80 | |||
81 | return order; | ||
82 | #else | ||
83 | size = (size - 1) >> PAGE_SHIFT; | ||
84 | asm ("nsau %0, %1" : "=r" (order) : "r" (size)); | ||
85 | return 32 - order; | ||
86 | #endif | ||
87 | } | 124 | } |
88 | 125 | ||
126 | #else | ||
127 | |||
128 | # include <asm-generic/page.h> | ||
129 | |||
130 | #endif | ||
89 | 131 | ||
90 | struct page; | 132 | struct page; |
91 | extern void clear_page(void *page); | 133 | extern void clear_page(void *page); |
@@ -96,11 +138,11 @@ extern void copy_page(void *to, void *from); | |||
96 | * some extra work | 138 | * some extra work |
97 | */ | 139 | */ |
98 | 140 | ||
99 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | 141 | #if DCACHE_WAY_SIZE > PAGE_SIZE |
100 | void clear_user_page(void *addr, unsigned long vaddr, struct page* page); | 142 | extern void clear_user_page(void*, unsigned long, struct page*); |
101 | void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); | 143 | extern void copy_user_page(void*, void*, unsigned long, struct page*); |
102 | #else | 144 | #else |
103 | # define clear_user_page(page,vaddr,pg) clear_page(page) | 145 | # define clear_user_page(page, vaddr, pg) clear_page(page) |
104 | # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 146 | # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
105 | #endif | 147 | #endif |
106 | 148 | ||
diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h index d56ddf2055e1..3e5b56525102 100644 --- a/include/asm-xtensa/pgalloc.h +++ b/include/asm-xtensa/pgalloc.h | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/asm-xtensa/pgalloc.h | 2 | * include/asm-xtensa/pgalloc.h |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * Copyright (C) 2001-2005 Tensilica Inc. | 8 | * Copyright (C) 2001-2007 Tensilica Inc. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _XTENSA_PGALLOC_H | 11 | #ifndef _XTENSA_PGALLOC_H |
@@ -13,103 +13,54 @@ | |||
13 | 13 | ||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | 15 | ||
16 | #include <linux/threads.h> | ||
17 | #include <linux/highmem.h> | 16 | #include <linux/highmem.h> |
18 | #include <asm/processor.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | |||
21 | |||
22 | /* Cache aliasing: | ||
23 | * | ||
24 | * If the cache size for one way is greater than the page size, we have to | ||
25 | * deal with cache aliasing. The cache index is wider than the page size: | ||
26 | * | ||
27 | * |cache | | ||
28 | * |pgnum |page| virtual address | ||
29 | * |xxxxxX|zzzz| | ||
30 | * | | | | ||
31 | * \ / | | | ||
32 | * trans.| | | ||
33 | * / \ | | | ||
34 | * |yyyyyY|zzzz| physical address | ||
35 | * | ||
36 | * When the page number is translated to the physical page address, the lowest | ||
37 | * bit(s) (X) that are also part of the cache index are also translated (Y). | ||
38 | * If this translation changes this bit (X), the cache index is also afected, | ||
39 | * thus resulting in a different cache line than before. | ||
40 | * The kernel does not provide a mechanism to ensure that the page color | ||
41 | * (represented by this bit) remains the same when allocated or when pages | ||
42 | * are remapped. When user pages are mapped into kernel space, the color of | ||
43 | * the page might also change. | ||
44 | * | ||
45 | * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 | ||
46 | * to temporarily map a patch so we can match the color. | ||
47 | */ | ||
48 | |||
49 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | ||
50 | # define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1)) | ||
51 | # define PAGE_COLOR(a) \ | ||
52 | (((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT) | ||
53 | # define PAGE_COLOR_EQ(a,b) \ | ||
54 | ((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0) | ||
55 | # define PAGE_COLOR_MAP0(v) \ | ||
56 | (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK)) | ||
57 | # define PAGE_COLOR_MAP1(v) \ | ||
58 | (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE) | ||
59 | #endif | ||
60 | 17 | ||
61 | /* | 18 | /* |
62 | * Allocating and freeing a pmd is trivial: the 1-entry pmd is | 19 | * Allocating and freeing a pmd is trivial: the 1-entry pmd is |
63 | * inside the pgd, so has no extra memory associated with it. | 20 | * inside the pgd, so has no extra memory associated with it. |
64 | */ | 21 | */ |
65 | 22 | ||
66 | #define pgd_free(pgd) free_page((unsigned long)(pgd)) | 23 | #define pmd_populate_kernel(mm, pmdp, ptep) \ |
67 | 24 | (pmd_val(*(pmdp)) = ((unsigned long)ptep)) | |
68 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 25 | #define pmd_populate(mm, pmdp, page) \ |
26 | (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page))) | ||
69 | 27 | ||
70 | static inline void | 28 | static inline pgd_t* |
71 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) | 29 | pgd_alloc(struct mm_struct *mm) |
72 | { | 30 | { |
73 | pmd_val(*(pmdp)) = (unsigned long)(pte); | 31 | return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER); |
74 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); | ||
75 | } | 32 | } |
76 | 33 | ||
77 | static inline void | 34 | static inline void pgd_free(pgd_t *pgd) |
78 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page) | ||
79 | { | 35 | { |
80 | pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page); | 36 | free_page((unsigned long)pgd); |
81 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); | ||
82 | } | 37 | } |
83 | 38 | ||
39 | /* Use a slab cache for the pte pages (see also sparc64 implementation) */ | ||
84 | 40 | ||
41 | extern struct kmem_cache *pgtable_cache; | ||
85 | 42 | ||
86 | #else | 43 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
87 | 44 | unsigned long address) | |
88 | # define pmd_populate_kernel(mm, pmdp, pte) \ | ||
89 | (pmd_val(*(pmdp)) = (unsigned long)(pte)) | ||
90 | # define pmd_populate(mm, pmdp, page) \ | ||
91 | (pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page)) | ||
92 | |||
93 | #endif | ||
94 | |||
95 | static inline pgd_t* | ||
96 | pgd_alloc(struct mm_struct *mm) | ||
97 | { | 45 | { |
98 | pgd_t *pgd; | 46 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); |
99 | 47 | } | |
100 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER); | ||
101 | |||
102 | if (likely(pgd != NULL)) | ||
103 | __flush_dcache_page((unsigned long)pgd); | ||
104 | 48 | ||
105 | return pgd; | 49 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
50 | unsigned long addr) | ||
51 | { | ||
52 | return virt_to_page(pte_alloc_one_kernel(mm, addr)); | ||
106 | } | 53 | } |
107 | 54 | ||
108 | extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr); | 55 | static inline void pte_free_kernel(pte_t *pte) |
109 | extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr); | 56 | { |
57 | kmem_cache_free(pgtable_cache, pte); | ||
58 | } | ||
110 | 59 | ||
111 | #define pte_free_kernel(pte) free_page((unsigned long)pte) | 60 | static inline void pte_free(struct page *page) |
112 | #define pte_free(pte) __free_page(pte) | 61 | { |
62 | kmem_cache_free(pgtable_cache, page_address(page)); | ||
63 | } | ||
113 | 64 | ||
114 | #endif /* __KERNEL__ */ | 65 | #endif /* __KERNEL__ */ |
115 | #endif /* _XTENSA_PGALLOC_H */ | 66 | #endif /* _XTENSA_PGALLOC_H */ |
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h index 06850f3b26a7..c0fcc1c9660c 100644 --- a/include/asm-xtensa/pgtable.h +++ b/include/asm-xtensa/pgtable.h | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/asm-xtensa/pgtable.h | 2 | * include/asm-xtensa/pgtable.h |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 8 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _XTENSA_PGTABLE_H | 11 | #ifndef _XTENSA_PGTABLE_H |
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * The Xtensa architecture port of Linux has a two-level page table system, | 25 | * The Xtensa architecture port of Linux has a two-level page table system, |
26 | * i.e. the logical three-level Linux page table layout are folded. | 26 | * i.e. the logical three-level Linux page table layout is folded. |
27 | * Each task has the following memory page tables: | 27 | * Each task has the following memory page tables: |
28 | * | 28 | * |
29 | * PGD table (page directory), ie. 3rd-level page table: | 29 | * PGD table (page directory), ie. 3rd-level page table: |
@@ -43,6 +43,7 @@ | |||
43 | * | 43 | * |
44 | * The individual pages are 4 kB big with special pages for the empty_zero_page. | 44 | * The individual pages are 4 kB big with special pages for the empty_zero_page. |
45 | */ | 45 | */ |
46 | |||
46 | #define PGDIR_SHIFT 22 | 47 | #define PGDIR_SHIFT 22 |
47 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
48 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
@@ -53,24 +54,26 @@ | |||
53 | */ | 54 | */ |
54 | #define PTRS_PER_PTE 1024 | 55 | #define PTRS_PER_PTE 1024 |
55 | #define PTRS_PER_PTE_SHIFT 10 | 56 | #define PTRS_PER_PTE_SHIFT 10 |
56 | #define PTRS_PER_PMD 1 | ||
57 | #define PTRS_PER_PGD 1024 | 57 | #define PTRS_PER_PGD 1024 |
58 | #define PGD_ORDER 0 | 58 | #define PGD_ORDER 0 |
59 | #define PMD_ORDER 0 | ||
60 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 59 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
61 | #define FIRST_USER_ADDRESS 0 | 60 | #define FIRST_USER_ADDRESS 0 |
62 | #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) | 61 | #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) |
63 | 62 | ||
64 | /* virtual memory area. We keep a distance to other memory regions to be | 63 | /* |
64 | * Virtual memory area. We keep a distance to other memory regions to be | ||
65 | * on the safe side. We also use this area for cache aliasing. | 65 | * on the safe side. We also use this area for cache aliasing. |
66 | */ | 66 | */ |
67 | 67 | ||
68 | // FIXME: virtual memory area must be configuration-dependent | ||
69 | |||
70 | #define VMALLOC_START 0xC0000000 | 68 | #define VMALLOC_START 0xC0000000 |
71 | #define VMALLOC_END 0xC7FF0000 | 69 | #define VMALLOC_END 0xC6FEFFFF |
70 | #define TLBTEMP_BASE_1 0xC6FF0000 | ||
71 | #define TLBTEMP_BASE_2 0xC6FF8000 | ||
72 | #define MODULE_START 0xC7000000 | ||
73 | #define MODULE_END 0xC7FFFFFF | ||
72 | 74 | ||
73 | /* Xtensa Linux config PTE layout (when present): | 75 | /* |
76 | * Xtensa Linux config PTE layout (when present): | ||
74 | * 31-12: PPN | 77 | * 31-12: PPN |
75 | * 11-6: Software | 78 | * 11-6: Software |
76 | * 5-4: RING | 79 | * 5-4: RING |
@@ -86,47 +89,55 @@ | |||
86 | * See further below for PTE layout for swapped-out pages. | 89 | * See further below for PTE layout for swapped-out pages. |
87 | */ | 90 | */ |
88 | 91 | ||
89 | #define _PAGE_VALID (1<<0) /* hardware: page is accessible */ | 92 | #define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */ |
90 | #define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ | 93 | #define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */ |
94 | |||
95 | #define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */ | ||
96 | #define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */ | ||
91 | 97 | ||
92 | /* None of these cache modes include MP coherency: */ | 98 | /* None of these cache modes include MP coherency: */ |
93 | #define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ | 99 | #define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */ |
94 | #if XCHAL_DCACHE_IS_WRITEBACK | 100 | #define _PAGE_CA_WB (1<<2) /* write-back */ |
95 | # define _PAGE_WRITEBACK (1<<2) /* write back */ | 101 | #define _PAGE_CA_WT (2<<2) /* write-through */ |
96 | # define _PAGE_WRITETHRU (2<<2) /* write through */ | 102 | #define _PAGE_CA_MASK (3<<2) |
97 | #else | 103 | #define _PAGE_INVALID (3<<2) |
98 | # define _PAGE_WRITEBACK (1<<2) /* assume write through */ | ||
99 | # define _PAGE_WRITETHRU (1<<2) | ||
100 | #endif | ||
101 | #define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */ | ||
102 | #define _CACHE_MASK (3<<2) | ||
103 | 104 | ||
104 | #define _PAGE_USER (1<<4) /* user access (ring=1) */ | 105 | #define _PAGE_USER (1<<4) /* user access (ring=1) */ |
105 | #define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */ | ||
106 | 106 | ||
107 | /* Software */ | 107 | /* Software */ |
108 | #define _PAGE_RW (1<<6) /* software: page writable */ | 108 | #define _PAGE_WRITABLE_BIT 6 |
109 | #define _PAGE_WRITABLE (1<<6) /* software: page writable */ | ||
109 | #define _PAGE_DIRTY (1<<7) /* software: page dirty */ | 110 | #define _PAGE_DIRTY (1<<7) /* software: page dirty */ |
110 | #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ | 111 | #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ |
111 | #define _PAGE_FILE (1<<9) /* nonlinear file mapping*/ | ||
112 | 112 | ||
113 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) | 113 | /* On older HW revisions, we always have to set bit 0 */ |
114 | #define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) | 114 | #if XCHAL_HW_VERSION_MAJOR < 2000 |
115 | # define _PAGE_VALID (1<<0) | ||
116 | #else | ||
117 | # define _PAGE_VALID 0 | ||
118 | #endif | ||
115 | 119 | ||
116 | #ifdef CONFIG_MMU | 120 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
121 | #define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED) | ||
117 | 122 | ||
118 | # define PAGE_NONE __pgprot(_PAGE_PRESENT) | 123 | #ifdef CONFIG_MMU |
119 | # define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW) | ||
120 | # define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) | ||
121 | # define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) | ||
122 | # define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE) | ||
123 | # define PAGE_INVALID __pgprot(_PAGE_USER) | ||
124 | 124 | ||
125 | # if (DCACHE_WAY_SIZE > PAGE_SIZE) | 125 | #define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE) |
126 | # define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) | 126 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) |
127 | # else | 127 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC) |
128 | # define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) | 128 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) |
129 | # endif | 129 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC) |
130 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE) | ||
131 | #define PAGE_SHARED_EXEC \ | ||
132 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) | ||
133 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) | ||
134 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) | ||
135 | |||
136 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | ||
137 | # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED) | ||
138 | #else | ||
139 | # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB) | ||
140 | #endif | ||
130 | 141 | ||
131 | #else /* no mmu */ | 142 | #else /* no mmu */ |
132 | 143 | ||
@@ -145,23 +156,23 @@ | |||
145 | * What follows is the closest we can get by reasonable means.. | 156 | * What follows is the closest we can get by reasonable means.. |
146 | * See linux/mm/mmap.c for protection_map[] array that uses these definitions. | 157 | * See linux/mm/mmap.c for protection_map[] array that uses these definitions. |
147 | */ | 158 | */ |
148 | #define __P000 PAGE_NONE /* private --- */ | 159 | #define __P000 PAGE_NONE /* private --- */ |
149 | #define __P001 PAGE_READONLY /* private --r */ | 160 | #define __P001 PAGE_READONLY /* private --r */ |
150 | #define __P010 PAGE_COPY /* private -w- */ | 161 | #define __P010 PAGE_COPY /* private -w- */ |
151 | #define __P011 PAGE_COPY /* private -wr */ | 162 | #define __P011 PAGE_COPY /* private -wr */ |
152 | #define __P100 PAGE_READONLY /* private x-- */ | 163 | #define __P100 PAGE_READONLY_EXEC /* private x-- */ |
153 | #define __P101 PAGE_READONLY /* private x-r */ | 164 | #define __P101 PAGE_READONLY_EXEC /* private x-r */ |
154 | #define __P110 PAGE_COPY /* private xw- */ | 165 | #define __P110 PAGE_COPY_EXEC /* private xw- */ |
155 | #define __P111 PAGE_COPY /* private xwr */ | 166 | #define __P111 PAGE_COPY_EXEC /* private xwr */ |
156 | 167 | ||
157 | #define __S000 PAGE_NONE /* shared --- */ | 168 | #define __S000 PAGE_NONE /* shared --- */ |
158 | #define __S001 PAGE_READONLY /* shared --r */ | 169 | #define __S001 PAGE_READONLY /* shared --r */ |
159 | #define __S010 PAGE_SHARED /* shared -w- */ | 170 | #define __S010 PAGE_SHARED /* shared -w- */ |
160 | #define __S011 PAGE_SHARED /* shared -wr */ | 171 | #define __S011 PAGE_SHARED /* shared -wr */ |
161 | #define __S100 PAGE_READONLY /* shared x-- */ | 172 | #define __S100 PAGE_READONLY_EXEC /* shared x-- */ |
162 | #define __S101 PAGE_READONLY /* shared x-r */ | 173 | #define __S101 PAGE_READONLY_EXEC /* shared x-r */ |
163 | #define __S110 PAGE_SHARED /* shared xw- */ | 174 | #define __S110 PAGE_SHARED_EXEC /* shared xw- */ |
164 | #define __S111 PAGE_SHARED /* shared xwr */ | 175 | #define __S111 PAGE_SHARED_EXEC /* shared xwr */ |
165 | 176 | ||
166 | #ifndef __ASSEMBLY__ | 177 | #ifndef __ASSEMBLY__ |
167 | 178 | ||
@@ -183,35 +194,42 @@ extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; | |||
183 | #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) | 194 | #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) |
184 | 195 | ||
185 | /* | 196 | /* |
186 | * The following only work if pte_present() is true. | 197 | * pte status. |
187 | */ | 198 | */ |
188 | #define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) | 199 | #define pte_none(pte) (pte_val(pte) == _PAGE_INVALID) |
189 | #define pte_present(pte) (pte_val(pte) & _PAGE_VALID) | 200 | #define pte_present(pte) \ |
201 | (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \ | ||
202 | || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE)) | ||
190 | #define pte_clear(mm,addr,ptep) \ | 203 | #define pte_clear(mm,addr,ptep) \ |
191 | do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) | 204 | do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0) |
192 | 205 | ||
193 | #define pmd_none(pmd) (!pmd_val(pmd)) | 206 | #define pmd_none(pmd) (!pmd_val(pmd)) |
194 | #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) | 207 | #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) |
195 | #define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0) | ||
196 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) | 208 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
209 | #define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0) | ||
197 | 210 | ||
198 | /* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ | 211 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; } |
199 | |||
200 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
201 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | 212 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
202 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 213 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
203 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 214 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
204 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } | 215 | static inline pte_t pte_wrprotect(pte_t pte) |
205 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | 216 | { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; } |
206 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | 217 | static inline pte_t pte_mkclean(pte_t pte) |
207 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } | 218 | { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; } |
208 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | 219 | static inline pte_t pte_mkold(pte_t pte) |
209 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } | 220 | { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
221 | static inline pte_t pte_mkdirty(pte_t pte) | ||
222 | { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
223 | static inline pte_t pte_mkyoung(pte_t pte) | ||
224 | { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
225 | static inline pte_t pte_mkwrite(pte_t pte) | ||
226 | { pte_val(pte) |= _PAGE_WRITABLE; return pte; } | ||
210 | 227 | ||
211 | /* | 228 | /* |
212 | * Conversion functions: convert a page and protection to a page entry, | 229 | * Conversion functions: convert a page and protection to a page entry, |
213 | * and a page entry and page directory to the page they refer to. | 230 | * and a page entry and page directory to the page they refer to. |
214 | */ | 231 | */ |
232 | |||
215 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | 233 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
216 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | 234 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) |
217 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 235 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
@@ -232,8 +250,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval) | |||
232 | { | 250 | { |
233 | *ptep = pteval; | 251 | *ptep = pteval; |
234 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 252 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK |
235 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); | 253 | __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep)); |
236 | #endif | 254 | #endif |
255 | |||
237 | } | 256 | } |
238 | 257 | ||
239 | struct mm_struct; | 258 | struct mm_struct; |
@@ -249,9 +268,6 @@ static inline void | |||
249 | set_pmd(pmd_t *pmdp, pmd_t pmdval) | 268 | set_pmd(pmd_t *pmdp, pmd_t pmdval) |
250 | { | 269 | { |
251 | *pmdp = pmdval; | 270 | *pmdp = pmdval; |
252 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | ||
253 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); | ||
254 | #endif | ||
255 | } | 271 | } |
256 | 272 | ||
257 | struct vm_area_struct; | 273 | struct vm_area_struct; |
@@ -306,52 +322,34 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |||
306 | 322 | ||
307 | /* | 323 | /* |
308 | * Encode and decode a swap entry. | 324 | * Encode and decode a swap entry. |
309 | * Each PTE in a process VM's page table is either: | ||
310 | * "present" -- valid and not swapped out, protection bits are meaningful; | ||
311 | * "not present" -- which further subdivides in these two cases: | ||
312 | * "none" -- no mapping at all; identified by pte_none(), set by pte_clear( | ||
313 | * "swapped out" -- the page is swapped out, and the SWP macros below | ||
314 | * are used to store swap file info in the PTE itself. | ||
315 | * | 325 | * |
316 | * In the Xtensa processor MMU, any PTE entries in user space (or anywhere | 326 | * Format of swap pte: |
317 | * in virtual memory that can map differently across address spaces) | 327 | * bit 0 MBZ |
318 | * must have a correct ring value that represents the RASID field that | 328 | * bit 1 page-file (must be zero) |
319 | * is changed when switching address spaces. Eg. such PTE entries cannot | 329 | * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) |
320 | * be set to ring zero, because that can cause a (global) kernel ASID | 330 | * bits 4 - 5 ring protection (must be 01: _PAGE_USER) |
321 | * entry to be created in the TLBs (even with invalid cache attribute), | 331 | * bits 6 - 10 swap type (5 bits -> 32 types) |
322 | * potentially causing a multihit exception when going back to another | 332 | * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB) |
323 | * address space that mapped the same virtual address at another ring. | 333 | |
324 | * | 334 | * Format of file pte: |
325 | * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs. | 335 | * bit 0 MBZ |
326 | * We also avoid using the _PAGE_VALID bit which must be zero for non-present | 336 | * bit 1 page-file (must be one: _PAGE_FILE) |
327 | * pages. | 337 | * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) |
328 | * | 338 | * bits 4 - 5 ring protection (must be 01: _PAGE_USER) |
329 | * We end up with the following available bits: 1..3 and 7..31. | 339 | * bits 6 - 31 file offset / PAGE_SIZE |
330 | * We don't bother with 1..3 for now (we can use them later if needed), | ||
331 | * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits | ||
332 | * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it | ||
333 | * is currently implemented as an index into swap_info[MAX_SWAPFILES] | ||
334 | * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>. | ||
335 | * However, for some reason all other architectures in the 2.4 kernel | ||
336 | * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :) | ||
337 | * SWP_OFFSET is an offset into the swap file in page-size units, so | ||
338 | * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB. | ||
339 | * | ||
340 | * FIXME: 2 GB isn't very big. Other bits can be used to allow | ||
341 | * larger swap sizes. In the meantime, it appears relatively easy to get | ||
342 | * around the 2 GB limitation by simply using multiple swap files. | ||
343 | */ | 340 | */ |
344 | 341 | ||
345 | #define __swp_type(entry) (((entry).val >> 7) & 0x3f) | 342 | #define __swp_type(entry) (((entry).val >> 6) & 0x1f) |
346 | #define __swp_offset(entry) ((entry).val >> 13) | 343 | #define __swp_offset(entry) ((entry).val >> 11) |
347 | #define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) | 344 | #define __swp_entry(type,offs) \ |
345 | ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID}) | ||
348 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 346 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
349 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 347 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
350 | 348 | ||
351 | #define PTE_FILE_MAX_BITS 29 | 349 | #define PTE_FILE_MAX_BITS 28 |
352 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) | 350 | #define pte_to_pgoff(pte) (pte_val(pte) >> 4) |
353 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) | 351 | #define pgoff_to_pte(off) \ |
354 | 352 | ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE }) | |
355 | 353 | ||
356 | #endif /* !defined (__ASSEMBLY__) */ | 354 | #endif /* !defined (__ASSEMBLY__) */ |
357 | 355 | ||
@@ -394,13 +392,12 @@ extern void update_mmu_cache(struct vm_area_struct * vma, | |||
394 | * remap a physical page `pfn' of size `size' with page protection `prot' | 392 | * remap a physical page `pfn' of size `size' with page protection `prot' |
395 | * into virtual address `from' | 393 | * into virtual address `from' |
396 | */ | 394 | */ |
395 | |||
397 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ | 396 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ |
398 | remap_pfn_range(vma, from, pfn, size, prot) | 397 | remap_pfn_range(vma, from, pfn, size, prot) |
399 | 398 | ||
400 | 399 | ||
401 | /* No page table caches to init */ | 400 | extern void pgtable_cache_init(void); |
402 | |||
403 | #define pgtable_cache_init() do { } while (0) | ||
404 | 401 | ||
405 | typedef pte_t *pte_addr_t; | 402 | typedef pte_t *pte_addr_t; |
406 | 403 | ||
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h index 4feb9f7f35a6..35145bcd96eb 100644 --- a/include/asm-xtensa/processor.h +++ b/include/asm-xtensa/processor.h | |||
@@ -33,7 +33,7 @@ | |||
33 | * the 1 GB requirement applies to the stack as well. | 33 | * the 1 GB requirement applies to the stack as well. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define TASK_SIZE 0x40000000 | 36 | #define TASK_SIZE __XTENSA_UL_CONST(0x40000000) |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * General exception cause assigned to debug exceptions. Debug exceptions go | 39 | * General exception cause assigned to debug exceptions. Debug exceptions go |
diff --git a/include/asm-xtensa/syscall.h b/include/asm-xtensa/syscall.h index 6cb0d42f11c8..05cebf8f62b1 100644 --- a/include/asm-xtensa/syscall.h +++ b/include/asm-xtensa/syscall.h | |||
@@ -1,3 +1,13 @@ | |||
1 | /* | ||
2 | * include/asm-xtensa/syscall.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2001 - 2007 Tensilica Inc. | ||
9 | */ | ||
10 | |||
1 | struct pt_regs; | 11 | struct pt_regs; |
2 | struct sigaction; | 12 | struct sigaction; |
3 | asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); | 13 | asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); |
@@ -17,4 +27,16 @@ asmlinkage long sys_rt_sigaction(int, | |||
17 | const struct sigaction __user *, | 27 | const struct sigaction __user *, |
18 | struct sigaction __user *, | 28 | struct sigaction __user *, |
19 | size_t); | 29 | size_t); |
20 | asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg); | 30 | asmlinkage long xtensa_shmat(int, char __user *, int); |
31 | asmlinkage long xtensa_fadvise64_64(int, int, | ||
32 | unsigned long long, unsigned long long); | ||
33 | |||
34 | /* Should probably move to linux/syscalls.h */ | ||
35 | struct pollfd; | ||
36 | asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, | ||
37 | fd_set __user *exp, struct timespec __user *tsp, void __user *sig); | ||
38 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, | ||
39 | struct timespec __user *tsp, const sigset_t __user *sigmask, | ||
40 | size_t sigsetsize); | ||
41 | |||
42 | |||
diff --git a/include/asm-xtensa/termbits.h b/include/asm-xtensa/termbits.h index 9972c25ec86f..85aa6a3c0b6e 100644 --- a/include/asm-xtensa/termbits.h +++ b/include/asm-xtensa/termbits.h | |||
@@ -157,6 +157,7 @@ struct ktermios { | |||
157 | #define HUPCL 0002000 | 157 | #define HUPCL 0002000 |
158 | #define CLOCAL 0004000 | 158 | #define CLOCAL 0004000 |
159 | #define CBAUDEX 0010000 | 159 | #define CBAUDEX 0010000 |
160 | #define BOTHER 0010000 | ||
160 | #define B57600 0010001 | 161 | #define B57600 0010001 |
161 | #define B115200 0010002 | 162 | #define B115200 0010002 |
162 | #define B230400 0010003 | 163 | #define B230400 0010003 |
@@ -172,10 +173,12 @@ struct ktermios { | |||
172 | #define B3000000 0010015 | 173 | #define B3000000 0010015 |
173 | #define B3500000 0010016 | 174 | #define B3500000 0010016 |
174 | #define B4000000 0010017 | 175 | #define B4000000 0010017 |
175 | #define CIBAUD 002003600000 /* input baud rate (not used) */ | 176 | #define CIBAUD 002003600000 /* input baud rate */ |
176 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | 177 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ |
177 | #define CRTSCTS 020000000000 /* flow control */ | 178 | #define CRTSCTS 020000000000 /* flow control */ |
178 | 179 | ||
180 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
181 | |||
179 | /* c_lflag bits */ | 182 | /* c_lflag bits */ |
180 | 183 | ||
181 | #define ISIG 0000001 | 184 | #define ISIG 0000001 |
diff --git a/include/asm-xtensa/termios.h b/include/asm-xtensa/termios.h index f14b42c8dac0..4673f42f88a7 100644 --- a/include/asm-xtensa/termios.h +++ b/include/asm-xtensa/termios.h | |||
@@ -95,8 +95,10 @@ struct termio { | |||
95 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ | 95 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ |
96 | }) | 96 | }) |
97 | 97 | ||
98 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) | 98 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) |
99 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) | 99 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) |
100 | #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) | ||
101 | #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) | ||
100 | 102 | ||
101 | #endif /* __KERNEL__ */ | 103 | #endif /* __KERNEL__ */ |
102 | 104 | ||
diff --git a/include/asm-xtensa/timex.h b/include/asm-xtensa/timex.h index 28c7985a4000..a5fca59fba9e 100644 --- a/include/asm-xtensa/timex.h +++ b/include/asm-xtensa/timex.h | |||
@@ -41,10 +41,10 @@ | |||
41 | extern unsigned long ccount_per_jiffy; | 41 | extern unsigned long ccount_per_jiffy; |
42 | extern unsigned long ccount_nsec; | 42 | extern unsigned long ccount_nsec; |
43 | #define CCOUNT_PER_JIFFY ccount_per_jiffy | 43 | #define CCOUNT_PER_JIFFY ccount_per_jiffy |
44 | #define CCOUNT_NSEC ccount_nsec | 44 | #define NSEC_PER_CCOUNT ccount_nsec |
45 | #else | 45 | #else |
46 | #define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) | 46 | #define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) |
47 | #define CCOUNT_NSEC (1000000000UL / CONFIG_XTENSA_CPU_CLOCK) | 47 | #define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK) |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | 50 | ||
diff --git a/include/asm-xtensa/tlb.h b/include/asm-xtensa/tlb.h index 4562b2dcfbc0..4830232017af 100644 --- a/include/asm-xtensa/tlb.h +++ b/include/asm-xtensa/tlb.h | |||
@@ -11,14 +11,36 @@ | |||
11 | #ifndef _XTENSA_TLB_H | 11 | #ifndef _XTENSA_TLB_H |
12 | #define _XTENSA_TLB_H | 12 | #define _XTENSA_TLB_H |
13 | 13 | ||
14 | #define tlb_start_vma(tlb,vma) do { } while (0) | 14 | #include <asm/cache.h> |
15 | #define tlb_end_vma(tlb,vma) do { } while (0) | 15 | #include <asm/page.h> |
16 | #define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0) | 16 | |
17 | #if (DCACHE_WAY_SIZE <= PAGE_SIZE) | ||
18 | |||
19 | /* Note, read http://lkml.org/lkml/2004/1/15/6 */ | ||
20 | |||
21 | # define tlb_start_vma(tlb,vma) do { } while (0) | ||
22 | # define tlb_end_vma(tlb,vma) do { } while (0) | ||
23 | |||
24 | #else | ||
17 | 25 | ||
26 | # define tlb_start_vma(tlb, vma) \ | ||
27 | do { \ | ||
28 | if (!tlb->fullmm) \ | ||
29 | flush_cache_range(vma, vma->vm_start, vma->vm_end); \ | ||
30 | } while(0) | ||
31 | |||
32 | # define tlb_end_vma(tlb, vma) \ | ||
33 | do { \ | ||
34 | if (!tlb->fullmm) \ | ||
35 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ | ||
36 | } while(0) | ||
37 | |||
38 | #endif | ||
39 | |||
40 | #define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0) | ||
18 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | 41 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) |
19 | 42 | ||
20 | #include <asm-generic/tlb.h> | 43 | #include <asm-generic/tlb.h> |
21 | #include <asm/page.h> | ||
22 | 44 | ||
23 | #define __pte_free_tlb(tlb,pte) pte_free(pte) | 45 | #define __pte_free_tlb(tlb,pte) pte_free(pte) |
24 | 46 | ||
diff --git a/include/asm-xtensa/types.h b/include/asm-xtensa/types.h index 9d99a8e9e337..f1e84526f999 100644 --- a/include/asm-xtensa/types.h +++ b/include/asm-xtensa/types.h | |||
@@ -11,6 +11,15 @@ | |||
11 | #ifndef _XTENSA_TYPES_H | 11 | #ifndef _XTENSA_TYPES_H |
12 | #define _XTENSA_TYPES_H | 12 | #define _XTENSA_TYPES_H |
13 | 13 | ||
14 | |||
15 | #ifdef __ASSEMBLY__ | ||
16 | # define __XTENSA_UL(x) (x) | ||
17 | # define __XTENSA_UL_CONST(x) x | ||
18 | #else | ||
19 | # define __XTENSA_UL(x) ((unsigned long)(x)) | ||
20 | # define __XTENSA_UL_CONST(x) x##UL | ||
21 | #endif | ||
22 | |||
14 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
15 | 24 | ||
16 | typedef unsigned short umode_t; | 25 | typedef unsigned short umode_t; |
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h index 9bd34024431c..92968aabe34e 100644 --- a/include/asm-xtensa/unistd.h +++ b/include/asm-xtensa/unistd.h | |||
@@ -151,7 +151,7 @@ __SYSCALL( 61, sys_fcntl64, 3) | |||
151 | #define __NR_available62 62 | 151 | #define __NR_available62 62 |
152 | __SYSCALL( 62, sys_ni_syscall, 0) | 152 | __SYSCALL( 62, sys_ni_syscall, 0) |
153 | #define __NR_fadvise64_64 63 | 153 | #define __NR_fadvise64_64 63 |
154 | __SYSCALL( 63, sys_fadvise64_64, 6) | 154 | __SYSCALL( 63, xtensa_fadvise64_64, 6) |
155 | #define __NR_utime 64 /* glibc 2.3.3 ?? */ | 155 | #define __NR_utime 64 /* glibc 2.3.3 ?? */ |
156 | __SYSCALL( 64, sys_utime, 2) | 156 | __SYSCALL( 64, sys_utime, 2) |
157 | #define __NR_utimes 65 | 157 | #define __NR_utimes 65 |
@@ -339,8 +339,8 @@ __SYSCALL(148, sys_setpgid, 2) | |||
339 | __SYSCALL(149, sys_getpgid, 1) | 339 | __SYSCALL(149, sys_getpgid, 1) |
340 | #define __NR_getppid 150 | 340 | #define __NR_getppid 150 |
341 | __SYSCALL(150, sys_getppid, 0) | 341 | __SYSCALL(150, sys_getppid, 0) |
342 | #define __NR_available151 151 | 342 | #define __NR_getpgrp 151 |
343 | __SYSCALL(151, sys_ni_syscall, 0) | 343 | __SYSCALL(151, sys_getpgrp, 0) |
344 | 344 | ||
345 | #define __NR_reserved152 152 /* set_thread_area */ | 345 | #define __NR_reserved152 152 /* set_thread_area */ |
346 | __SYSCALL(152, sys_ni_syscall, 0) | 346 | __SYSCALL(152, sys_ni_syscall, 0) |
@@ -577,7 +577,112 @@ __SYSCALL(258, sys_keyctl, 5) | |||
577 | #define __NR_available259 259 | 577 | #define __NR_available259 259 |
578 | __SYSCALL(259, sys_ni_syscall, 0) | 578 | __SYSCALL(259, sys_ni_syscall, 0) |
579 | 579 | ||
580 | #define __NR_syscall_count 261 | 580 | |
581 | #define __NR_readahead 260 | ||
582 | __SYSCALL(260, sys_readahead, 5) | ||
583 | #define __NR_remap_file_pages 261 | ||
584 | __SYSCALL(261, sys_remap_file_pages, 5) | ||
585 | #define __NR_migrate_pages 262 | ||
586 | __SYSCALL(262, sys_migrate_pages, 0) | ||
587 | #define __NR_mbind 263 | ||
588 | __SYSCALL(263, sys_mbind, 6) | ||
589 | #define __NR_get_mempolicy 264 | ||
590 | __SYSCALL(264, sys_get_mempolicy, 5) | ||
591 | #define __NR_set_mempolicy 265 | ||
592 | __SYSCALL(265, sys_set_mempolicy, 3) | ||
593 | #define __NR_unshare 266 | ||
594 | __SYSCALL(266, sys_unshare, 1) | ||
595 | #define __NR_move_pages 267 | ||
596 | __SYSCALL(267, sys_move_pages, 0) | ||
597 | #define __NR_splice 268 | ||
598 | __SYSCALL(268, sys_splice, 0) | ||
599 | #define __NR_tee 269 | ||
600 | __SYSCALL(269, sys_tee, 0) | ||
601 | #define __NR_vmsplice 270 | ||
602 | __SYSCALL(270, sys_vmsplice, 0) | ||
603 | #define __NR_available271 271 | ||
604 | __SYSCALL(271, sys_ni_syscall, 0) | ||
605 | |||
606 | #define __NR_pselect6 272 | ||
607 | __SYSCALL(272, sys_pselect6, 0) | ||
608 | #define __NR_ppoll 273 | ||
609 | __SYSCALL(273, sys_ppoll, 0) | ||
610 | #define __NR_epoll_pwait 274 | ||
611 | __SYSCALL(274, sys_epoll_pwait, 0) | ||
612 | #define __NR_available275 275 | ||
613 | __SYSCALL(275, sys_ni_syscall, 0) | ||
614 | |||
615 | #define __NR_inotify_init 276 | ||
616 | __SYSCALL(276, sys_inotify_init, 0) | ||
617 | #define __NR_inotify_add_watch 277 | ||
618 | __SYSCALL(277, sys_inotify_add_watch, 3) | ||
619 | #define __NR_inotify_rm_watch 278 | ||
620 | __SYSCALL(278, sys_inotify_rm_watch, 2) | ||
621 | #define __NR_available279 279 | ||
622 | __SYSCALL(279, sys_ni_syscall, 0) | ||
623 | |||
624 | #define __NR_getcpu 280 | ||
625 | __SYSCALL(280, sys_getcpu, 0) | ||
626 | #define __NR_kexec_load 281 | ||
627 | __SYSCALL(281, sys_ni_syscall, 0) | ||
628 | |||
629 | #define __NR_ioprio_set 282 | ||
630 | __SYSCALL(282, sys_ioprio_set, 2) | ||
631 | #define __NR_ioprio_get 283 | ||
632 | __SYSCALL(283, sys_ioprio_get, 3) | ||
633 | |||
634 | #define __NR_set_robust_list 284 | ||
635 | __SYSCALL(284, sys_set_robust_list, 3) | ||
636 | #define __NR_get_robust_list 285 | ||
637 | __SYSCALL(285, sys_get_robust_list, 3) | ||
638 | #define __NR_reserved286 286 /* sync_file_rangeX */ | ||
639 | __SYSCALL(286, sys_ni_syscall, 3) | ||
640 | #define __NR_available287 287 | ||
641 | __SYSCALL(287, sys_faccessat, 0) | ||
642 | |||
643 | /* Relative File Operations */ | ||
644 | |||
645 | #define __NR_openat 288 | ||
646 | __SYSCALL(288, sys_openat, 4) | ||
647 | #define __NR_mkdirat 289 | ||
648 | __SYSCALL(289, sys_mkdirat, 3) | ||
649 | #define __NR_mknodat 290 | ||
650 | __SYSCALL(290, sys_mknodat, 4) | ||
651 | #define __NR_unlinkat 291 | ||
652 | __SYSCALL(291, sys_unlinkat, 3) | ||
653 | #define __NR_renameat 292 | ||
654 | __SYSCALL(292, sys_renameat, 4) | ||
655 | #define __NR_linkat 293 | ||
656 | __SYSCALL(293, sys_linkat, 5) | ||
657 | #define __NR_symlinkat 294 | ||
658 | __SYSCALL(294, sys_symlinkat, 3) | ||
659 | #define __NR_readlinkat 295 | ||
660 | __SYSCALL(295, sys_readlinkat, 4) | ||
661 | #define __NR_utimensat 296 | ||
662 | __SYSCALL(296, sys_utimensat, 0) | ||
663 | #define __NR_fchownat 297 | ||
664 | __SYSCALL(297, sys_fchownat, 5) | ||
665 | #define __NR_futimesat 298 | ||
666 | __SYSCALL(298, sys_futimesat, 4) | ||
667 | #define __NR_fstatat64 299 | ||
668 | __SYSCALL(299, sys_fstatat64, 0) | ||
669 | #define __NR_fchmodat 300 | ||
670 | __SYSCALL(300, sys_fchmodat, 4) | ||
671 | #define __NR_faccessat 301 | ||
672 | __SYSCALL(301, sys_faccessat, 4) | ||
673 | #define __NR_available302 302 | ||
674 | __SYSCALL(302, sys_ni_syscall, 0) | ||
675 | #define __NR_available303 303 | ||
676 | __SYSCALL(303, sys_ni_syscall, 0) | ||
677 | |||
678 | #define __NR_signalfd 304 | ||
679 | __SYSCALL(304, sys_signalfd, 3) | ||
680 | #define __NR_timerfd 305 | ||
681 | __SYSCALL(305, sys_timerfd, 4) | ||
682 | #define __NR_eventfd 306 | ||
683 | __SYSCALL(306, sys_eventfd, 1) | ||
684 | |||
685 | #define __NR_syscall_count 307 | ||
581 | 686 | ||
582 | /* | 687 | /* |
583 | * sysxtensa syscall handler | 688 | * sysxtensa syscall handler |
@@ -612,8 +717,19 @@ __SYSCALL(259, sys_ni_syscall, 0) | |||
612 | #define __ARCH_WANT_SYS_LLSEEK | 717 | #define __ARCH_WANT_SYS_LLSEEK |
613 | #define __ARCH_WANT_SYS_RT_SIGACTION | 718 | #define __ARCH_WANT_SYS_RT_SIGACTION |
614 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | 719 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND |
720 | #define __ARCH_WANT_SYS_GETPGRP | ||
615 | 721 | ||
616 | #endif /* __KERNEL__ */ | 722 | /* |
723 | * Ignore legacy system calls in the checksyscalls.sh script | ||
724 | */ | ||
617 | 725 | ||
618 | #endif /* _XTENSA_UNISTD_H */ | 726 | #define __IGNORE_fork /* use clone */ |
727 | #define __IGNORE_time | ||
728 | #define __IGNORE_alarm /* use setitimer */ | ||
729 | #define __IGNORE_pause | ||
730 | #define __IGNORE_mmap /* use mmap2 */ | ||
731 | #define __IGNORE_vfork /* use clone */ | ||
732 | #define __IGNORE_fadvise64 /* use fadvise64_64 */ | ||
619 | 733 | ||
734 | #endif /* __KERNEL__ */ | ||
735 | #endif /* _XTENSA_UNISTD_H */ | ||
diff --git a/include/linux/isa.h b/include/linux/isa.h index 1b855335cb11..b0270e3814c8 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h | |||
@@ -22,7 +22,18 @@ struct isa_driver { | |||
22 | 22 | ||
23 | #define to_isa_driver(x) container_of((x), struct isa_driver, driver) | 23 | #define to_isa_driver(x) container_of((x), struct isa_driver, driver) |
24 | 24 | ||
25 | #ifdef CONFIG_ISA | ||
25 | int isa_register_driver(struct isa_driver *, unsigned int); | 26 | int isa_register_driver(struct isa_driver *, unsigned int); |
26 | void isa_unregister_driver(struct isa_driver *); | 27 | void isa_unregister_driver(struct isa_driver *); |
28 | #else | ||
29 | static inline int isa_register_driver(struct isa_driver *d, unsigned int i) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | static inline void isa_unregister_driver(struct isa_driver *d) | ||
35 | { | ||
36 | } | ||
37 | #endif | ||
27 | 38 | ||
28 | #endif /* __LINUX_ISA_H */ | 39 | #endif /* __LINUX_ISA_H */ |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5bdd656e88cf..a020eb2d4e2a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -159,7 +159,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p); | |||
159 | 159 | ||
160 | extern struct mempolicy default_policy; | 160 | extern struct mempolicy default_policy; |
161 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 161 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
162 | unsigned long addr, gfp_t gfp_flags); | 162 | unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol); |
163 | extern unsigned slab_node(struct mempolicy *policy); | 163 | extern unsigned slab_node(struct mempolicy *policy); |
164 | 164 | ||
165 | extern enum zone_type policy_zone; | 165 | extern enum zone_type policy_zone; |
@@ -256,7 +256,7 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p) | |||
256 | #define set_cpuset_being_rebound(x) do {} while (0) | 256 | #define set_cpuset_being_rebound(x) do {} while (0) |
257 | 257 | ||
258 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 258 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
259 | unsigned long addr, gfp_t gfp_flags) | 259 | unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol) |
260 | { | 260 | { |
261 | return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); | 261 | return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); |
262 | } | 262 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f4e324ed2e44..5445eaec6908 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -593,7 +593,7 @@ struct user_struct { | |||
593 | #endif | 593 | #endif |
594 | 594 | ||
595 | /* Hash table maintenance information */ | 595 | /* Hash table maintenance information */ |
596 | struct list_head uidhash_list; | 596 | struct hlist_node uidhash_node; |
597 | uid_t uid; | 597 | uid_t uid; |
598 | }; | 598 | }; |
599 | 599 | ||
@@ -1472,6 +1472,7 @@ static inline struct user_struct *get_uid(struct user_struct *u) | |||
1472 | } | 1472 | } |
1473 | extern void free_uid(struct user_struct *); | 1473 | extern void free_uid(struct user_struct *); |
1474 | extern void switch_uid(struct user_struct *); | 1474 | extern void switch_uid(struct user_struct *); |
1475 | extern void release_uids(struct user_namespace *ns); | ||
1475 | 1476 | ||
1476 | #include <asm/current.h> | 1477 | #include <asm/current.h> |
1477 | 1478 | ||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93c27f71122a..a656cecd373c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1352,6 +1352,22 @@ static inline int skb_clone_writable(struct sk_buff *skb, int len) | |||
1352 | skb_headroom(skb) + len <= skb->hdr_len; | 1352 | skb_headroom(skb) + len <= skb->hdr_len; |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, | ||
1356 | int cloned) | ||
1357 | { | ||
1358 | int delta = 0; | ||
1359 | |||
1360 | if (headroom < NET_SKB_PAD) | ||
1361 | headroom = NET_SKB_PAD; | ||
1362 | if (headroom > skb_headroom(skb)) | ||
1363 | delta = headroom - skb_headroom(skb); | ||
1364 | |||
1365 | if (delta || cloned) | ||
1366 | return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, | ||
1367 | GFP_ATOMIC); | ||
1368 | return 0; | ||
1369 | } | ||
1370 | |||
1355 | /** | 1371 | /** |
1356 | * skb_cow - copy header of skb when it is required | 1372 | * skb_cow - copy header of skb when it is required |
1357 | * @skb: buffer to cow | 1373 | * @skb: buffer to cow |
@@ -1366,16 +1382,22 @@ static inline int skb_clone_writable(struct sk_buff *skb, int len) | |||
1366 | */ | 1382 | */ |
1367 | static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) | 1383 | static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) |
1368 | { | 1384 | { |
1369 | int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - | 1385 | return __skb_cow(skb, headroom, skb_cloned(skb)); |
1370 | skb_headroom(skb); | 1386 | } |
1371 | |||
1372 | if (delta < 0) | ||
1373 | delta = 0; | ||
1374 | 1387 | ||
1375 | if (delta || skb_cloned(skb)) | 1388 | /** |
1376 | return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & | 1389 | * skb_cow_head - skb_cow but only making the head writable |
1377 | ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); | 1390 | * @skb: buffer to cow |
1378 | return 0; | 1391 | * @headroom: needed headroom |
1392 | * | ||
1393 | * This function is identical to skb_cow except that we replace the | ||
1394 | * skb_cloned check by skb_header_cloned. It should be used when | ||
1395 | * you only need to push on some header and do not need to modify | ||
1396 | * the data. | ||
1397 | */ | ||
1398 | static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) | ||
1399 | { | ||
1400 | return __skb_cow(skb, headroom, skb_header_cloned(skb)); | ||
1379 | } | 1401 | } |
1380 | 1402 | ||
1381 | /** | 1403 | /** |
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 1101b0ce878f..b5f41d4c2eec 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | struct user_namespace { | 12 | struct user_namespace { |
13 | struct kref kref; | 13 | struct kref kref; |
14 | struct list_head uidhash_table[UIDHASH_SZ]; | 14 | struct hlist_head uidhash_table[UIDHASH_SZ]; |
15 | struct user_struct *root_user; | 15 | struct user_struct *root_user; |
16 | }; | 16 | }; |
17 | 17 | ||
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index d62847f846c2..17f8f3a2f0a3 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h | |||
@@ -337,6 +337,9 @@ void *priv; | |||
337 | struct class_device class_dev; /* sysfs */ | 337 | struct class_device class_dev; /* sysfs */ |
338 | }; | 338 | }; |
339 | 339 | ||
340 | /* Class-dev to video-device */ | ||
341 | #define to_video_device(cd) container_of(cd, struct video_device, class_dev) | ||
342 | |||
340 | /* Version 2 functions */ | 343 | /* Version 2 functions */ |
341 | extern int video_register_device(struct video_device *vfd, int type, int nr); | 344 | extern int video_register_device(struct video_device *vfd, int type, int nr); |
342 | void video_unregister_device(struct video_device *); | 345 | void video_unregister_device(struct video_device *); |
@@ -354,11 +357,9 @@ extern int video_usercopy(struct inode *inode, struct file *file, | |||
354 | int (*func)(struct inode *inode, struct file *file, | 357 | int (*func)(struct inode *inode, struct file *file, |
355 | unsigned int cmd, void *arg)); | 358 | unsigned int cmd, void *arg)); |
356 | 359 | ||
357 | |||
358 | #ifdef CONFIG_VIDEO_V4L1_COMPAT | 360 | #ifdef CONFIG_VIDEO_V4L1_COMPAT |
359 | #include <linux/mm.h> | 361 | #include <linux/mm.h> |
360 | 362 | ||
361 | #define to_video_device(cd) container_of(cd, struct video_device, class_dev) | ||
362 | static inline int __must_check | 363 | static inline int __must_check |
363 | video_device_create_file(struct video_device *vfd, | 364 | video_device_create_file(struct video_device *vfd, |
364 | struct class_device_attribute *attr) | 365 | struct class_device_attribute *attr) |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index d529045c1679..c9cc00c85782 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -123,6 +123,7 @@ | |||
123 | * sctp/protocol.c | 123 | * sctp/protocol.c |
124 | */ | 124 | */ |
125 | extern struct sock *sctp_get_ctl_sock(void); | 125 | extern struct sock *sctp_get_ctl_sock(void); |
126 | extern void sctp_local_addr_free(struct rcu_head *head); | ||
126 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, | 127 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, |
127 | sctp_scope_t, gfp_t gfp, | 128 | sctp_scope_t, gfp_t gfp, |
128 | int flags); | 129 | int flags); |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index c0d5848c33dc..c2fe2dcc9afc 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -207,6 +207,9 @@ extern struct sctp_globals { | |||
207 | * It is a list of sctp_sockaddr_entry. | 207 | * It is a list of sctp_sockaddr_entry. |
208 | */ | 208 | */ |
209 | struct list_head local_addr_list; | 209 | struct list_head local_addr_list; |
210 | |||
211 | /* Lock that protects the local_addr_list writers */ | ||
212 | spinlock_t addr_list_lock; | ||
210 | 213 | ||
211 | /* Flag to indicate if addip is enabled. */ | 214 | /* Flag to indicate if addip is enabled. */ |
212 | int addip_enable; | 215 | int addip_enable; |
@@ -242,6 +245,7 @@ extern struct sctp_globals { | |||
242 | #define sctp_port_alloc_lock (sctp_globals.port_alloc_lock) | 245 | #define sctp_port_alloc_lock (sctp_globals.port_alloc_lock) |
243 | #define sctp_port_hashtable (sctp_globals.port_hashtable) | 246 | #define sctp_port_hashtable (sctp_globals.port_hashtable) |
244 | #define sctp_local_addr_list (sctp_globals.local_addr_list) | 247 | #define sctp_local_addr_list (sctp_globals.local_addr_list) |
248 | #define sctp_local_addr_lock (sctp_globals.addr_list_lock) | ||
245 | #define sctp_addip_enable (sctp_globals.addip_enable) | 249 | #define sctp_addip_enable (sctp_globals.addip_enable) |
246 | #define sctp_prsctp_enable (sctp_globals.prsctp_enable) | 250 | #define sctp_prsctp_enable (sctp_globals.prsctp_enable) |
247 | 251 | ||
@@ -737,8 +741,10 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); | |||
737 | /* This is a structure for holding either an IPv6 or an IPv4 address. */ | 741 | /* This is a structure for holding either an IPv6 or an IPv4 address. */ |
738 | struct sctp_sockaddr_entry { | 742 | struct sctp_sockaddr_entry { |
739 | struct list_head list; | 743 | struct list_head list; |
744 | struct rcu_head rcu; | ||
740 | union sctp_addr a; | 745 | union sctp_addr a; |
741 | __u8 use_as_src; | 746 | __u8 use_as_src; |
747 | __u8 valid; | ||
742 | }; | 748 | }; |
743 | 749 | ||
744 | typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); | 750 | typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); |
@@ -1149,7 +1155,9 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest, | |||
1149 | int flags); | 1155 | int flags); |
1150 | int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, | 1156 | int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, |
1151 | __u8 use_as_src, gfp_t gfp); | 1157 | __u8 use_as_src, gfp_t gfp); |
1152 | int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); | 1158 | int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *, |
1159 | void (*rcu_call)(struct rcu_head *, | ||
1160 | void (*func)(struct rcu_head *))); | ||
1153 | int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, | 1161 | int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, |
1154 | struct sctp_sock *); | 1162 | struct sctp_sock *); |
1155 | union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, | 1163 | union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, |
@@ -1220,9 +1228,6 @@ struct sctp_ep_common { | |||
1220 | * bind_addr.address_list is our set of local IP addresses. | 1228 | * bind_addr.address_list is our set of local IP addresses. |
1221 | */ | 1229 | */ |
1222 | struct sctp_bind_addr bind_addr; | 1230 | struct sctp_bind_addr bind_addr; |
1223 | |||
1224 | /* Protection during address list comparisons. */ | ||
1225 | rwlock_t addr_lock; | ||
1226 | }; | 1231 | }; |
1227 | 1232 | ||
1228 | 1233 | ||