diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2015-04-13 10:03:32 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2015-04-13 10:03:32 -0400 |
commit | 3e20a26b02bd4f24945c87407df51948dd488620 (patch) | |
tree | f466d3b2a47a98ec2910724e17ee2f3a93c1a49e /arch/mips/include/asm | |
parent | 98b0429b7abd5c05efdb23f3eba02ec3f696748e (diff) | |
parent | 5306a5450824691e27d68f711758515debedeeac (diff) |
Merge branch '4.0-fixes' into mips-for-linux-next
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r-- | arch/mips/include/asm/asm-eva.h | 137 | ||||
-rw-r--r-- | arch/mips/include/asm/cacheflush.h | 38 | ||||
-rw-r--r-- | arch/mips/include/asm/cpu-features.h | 36 | ||||
-rw-r--r-- | arch/mips/include/asm/cpu.h | 3 | ||||
-rw-r--r-- | arch/mips/include/asm/elf.h | 5 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/octeon/pci-octeon.h | 3 | ||||
-rw-r--r-- | arch/mips/include/asm/pgtable-32.h | 15 | ||||
-rw-r--r-- | arch/mips/include/asm/pgtable-bits.h | 96 | ||||
-rw-r--r-- | arch/mips/include/asm/pgtable.h | 83 | ||||
-rw-r--r-- | arch/mips/include/asm/r4kcache.h | 89 | ||||
-rw-r--r-- | arch/mips/include/asm/spinlock.h | 2 |
12 files changed, 317 insertions, 191 deletions
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h index e41c56e375b1..1e38f0e1ea3e 100644 --- a/arch/mips/include/asm/asm-eva.h +++ b/arch/mips/include/asm/asm-eva.h | |||
@@ -11,6 +11,36 @@ | |||
11 | #define __ASM_ASM_EVA_H | 11 | #define __ASM_ASM_EVA_H |
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
14 | |||
15 | /* Kernel variants */ | ||
16 | |||
17 | #define kernel_cache(op, base) "cache " op ", " base "\n" | ||
18 | #define kernel_ll(reg, addr) "ll " reg ", " addr "\n" | ||
19 | #define kernel_sc(reg, addr) "sc " reg ", " addr "\n" | ||
20 | #define kernel_lw(reg, addr) "lw " reg ", " addr "\n" | ||
21 | #define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n" | ||
22 | #define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n" | ||
23 | #define kernel_lh(reg, addr) "lh " reg ", " addr "\n" | ||
24 | #define kernel_lb(reg, addr) "lb " reg ", " addr "\n" | ||
25 | #define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n" | ||
26 | #define kernel_sw(reg, addr) "sw " reg ", " addr "\n" | ||
27 | #define kernel_swl(reg, addr) "swl " reg ", " addr "\n" | ||
28 | #define kernel_swr(reg, addr) "swr " reg ", " addr "\n" | ||
29 | #define kernel_sh(reg, addr) "sh " reg ", " addr "\n" | ||
30 | #define kernel_sb(reg, addr) "sb " reg ", " addr "\n" | ||
31 | |||
32 | #ifdef CONFIG_32BIT | ||
33 | /* | ||
34 | * No 'sd' or 'ld' instructions in 32-bit but the code will | ||
35 | * do the correct thing | ||
36 | */ | ||
37 | #define kernel_sd(reg, addr) user_sw(reg, addr) | ||
38 | #define kernel_ld(reg, addr) user_lw(reg, addr) | ||
39 | #else | ||
40 | #define kernel_sd(reg, addr) "sd " reg", " addr "\n" | ||
41 | #define kernel_ld(reg, addr) "ld " reg", " addr "\n" | ||
42 | #endif /* CONFIG_32BIT */ | ||
43 | |||
14 | #ifdef CONFIG_EVA | 44 | #ifdef CONFIG_EVA |
15 | 45 | ||
16 | #define __BUILD_EVA_INSN(insn, reg, addr) \ | 46 | #define __BUILD_EVA_INSN(insn, reg, addr) \ |
@@ -41,37 +71,60 @@ | |||
41 | 71 | ||
42 | #else | 72 | #else |
43 | 73 | ||
44 | #define user_cache(op, base) "cache " op ", " base "\n" | 74 | #define user_cache(op, base) kernel_cache(op, base) |
45 | #define user_ll(reg, addr) "ll " reg ", " addr "\n" | 75 | #define user_ll(reg, addr) kernel_ll(reg, addr) |
46 | #define user_sc(reg, addr) "sc " reg ", " addr "\n" | 76 | #define user_sc(reg, addr) kernel_sc(reg, addr) |
47 | #define user_lw(reg, addr) "lw " reg ", " addr "\n" | 77 | #define user_lw(reg, addr) kernel_lw(reg, addr) |
48 | #define user_lwl(reg, addr) "lwl " reg ", " addr "\n" | 78 | #define user_lwl(reg, addr) kernel_lwl(reg, addr) |
49 | #define user_lwr(reg, addr) "lwr " reg ", " addr "\n" | 79 | #define user_lwr(reg, addr) kernel_lwr(reg, addr) |
50 | #define user_lh(reg, addr) "lh " reg ", " addr "\n" | 80 | #define user_lh(reg, addr) kernel_lh(reg, addr) |
51 | #define user_lb(reg, addr) "lb " reg ", " addr "\n" | 81 | #define user_lb(reg, addr) kernel_lb(reg, addr) |
52 | #define user_lbu(reg, addr) "lbu " reg ", " addr "\n" | 82 | #define user_lbu(reg, addr) kernel_lbu(reg, addr) |
53 | #define user_sw(reg, addr) "sw " reg ", " addr "\n" | 83 | #define user_sw(reg, addr) kernel_sw(reg, addr) |
54 | #define user_swl(reg, addr) "swl " reg ", " addr "\n" | 84 | #define user_swl(reg, addr) kernel_swl(reg, addr) |
55 | #define user_swr(reg, addr) "swr " reg ", " addr "\n" | 85 | #define user_swr(reg, addr) kernel_swr(reg, addr) |
56 | #define user_sh(reg, addr) "sh " reg ", " addr "\n" | 86 | #define user_sh(reg, addr) kernel_sh(reg, addr) |
57 | #define user_sb(reg, addr) "sb " reg ", " addr "\n" | 87 | #define user_sb(reg, addr) kernel_sb(reg, addr) |
58 | 88 | ||
59 | #ifdef CONFIG_32BIT | 89 | #ifdef CONFIG_32BIT |
60 | /* | 90 | #define user_sd(reg, addr) kernel_sw(reg, addr) |
61 | * No 'sd' or 'ld' instructions in 32-bit but the code will | 91 | #define user_ld(reg, addr) kernel_lw(reg, addr) |
62 | * do the correct thing | ||
63 | */ | ||
64 | #define user_sd(reg, addr) user_sw(reg, addr) | ||
65 | #define user_ld(reg, addr) user_lw(reg, addr) | ||
66 | #else | 92 | #else |
67 | #define user_sd(reg, addr) "sd " reg", " addr "\n" | 93 | #define user_sd(reg, addr) kernel_sd(reg, addr) |
68 | #define user_ld(reg, addr) "ld " reg", " addr "\n" | 94 | #define user_ld(reg, addr) kernel_ld(reg, addr) |
69 | #endif /* CONFIG_32BIT */ | 95 | #endif /* CONFIG_32BIT */ |
70 | 96 | ||
71 | #endif /* CONFIG_EVA */ | 97 | #endif /* CONFIG_EVA */ |
72 | 98 | ||
73 | #else /* __ASSEMBLY__ */ | 99 | #else /* __ASSEMBLY__ */ |
74 | 100 | ||
101 | #define kernel_cache(op, base) cache op, base | ||
102 | #define kernel_ll(reg, addr) ll reg, addr | ||
103 | #define kernel_sc(reg, addr) sc reg, addr | ||
104 | #define kernel_lw(reg, addr) lw reg, addr | ||
105 | #define kernel_lwl(reg, addr) lwl reg, addr | ||
106 | #define kernel_lwr(reg, addr) lwr reg, addr | ||
107 | #define kernel_lh(reg, addr) lh reg, addr | ||
108 | #define kernel_lb(reg, addr) lb reg, addr | ||
109 | #define kernel_lbu(reg, addr) lbu reg, addr | ||
110 | #define kernel_sw(reg, addr) sw reg, addr | ||
111 | #define kernel_swl(reg, addr) swl reg, addr | ||
112 | #define kernel_swr(reg, addr) swr reg, addr | ||
113 | #define kernel_sh(reg, addr) sh reg, addr | ||
114 | #define kernel_sb(reg, addr) sb reg, addr | ||
115 | |||
116 | #ifdef CONFIG_32BIT | ||
117 | /* | ||
118 | * No 'sd' or 'ld' instructions in 32-bit but the code will | ||
119 | * do the correct thing | ||
120 | */ | ||
121 | #define kernel_sd(reg, addr) user_sw(reg, addr) | ||
122 | #define kernel_ld(reg, addr) user_lw(reg, addr) | ||
123 | #else | ||
124 | #define kernel_sd(reg, addr) sd reg, addr | ||
125 | #define kernel_ld(reg, addr) ld reg, addr | ||
126 | #endif /* CONFIG_32BIT */ | ||
127 | |||
75 | #ifdef CONFIG_EVA | 128 | #ifdef CONFIG_EVA |
76 | 129 | ||
77 | #define __BUILD_EVA_INSN(insn, reg, addr) \ | 130 | #define __BUILD_EVA_INSN(insn, reg, addr) \ |
@@ -101,31 +154,27 @@ | |||
101 | #define user_sd(reg, addr) user_sw(reg, addr) | 154 | #define user_sd(reg, addr) user_sw(reg, addr) |
102 | #else | 155 | #else |
103 | 156 | ||
104 | #define user_cache(op, base) cache op, base | 157 | #define user_cache(op, base) kernel_cache(op, base) |
105 | #define user_ll(reg, addr) ll reg, addr | 158 | #define user_ll(reg, addr) kernel_ll(reg, addr) |
106 | #define user_sc(reg, addr) sc reg, addr | 159 | #define user_sc(reg, addr) kernel_sc(reg, addr) |
107 | #define user_lw(reg, addr) lw reg, addr | 160 | #define user_lw(reg, addr) kernel_lw(reg, addr) |
108 | #define user_lwl(reg, addr) lwl reg, addr | 161 | #define user_lwl(reg, addr) kernel_lwl(reg, addr) |
109 | #define user_lwr(reg, addr) lwr reg, addr | 162 | #define user_lwr(reg, addr) kernel_lwr(reg, addr) |
110 | #define user_lh(reg, addr) lh reg, addr | 163 | #define user_lh(reg, addr) kernel_lh(reg, addr) |
111 | #define user_lb(reg, addr) lb reg, addr | 164 | #define user_lb(reg, addr) kernel_lb(reg, addr) |
112 | #define user_lbu(reg, addr) lbu reg, addr | 165 | #define user_lbu(reg, addr) kernel_lbu(reg, addr) |
113 | #define user_sw(reg, addr) sw reg, addr | 166 | #define user_sw(reg, addr) kernel_sw(reg, addr) |
114 | #define user_swl(reg, addr) swl reg, addr | 167 | #define user_swl(reg, addr) kernel_swl(reg, addr) |
115 | #define user_swr(reg, addr) swr reg, addr | 168 | #define user_swr(reg, addr) kernel_swr(reg, addr) |
116 | #define user_sh(reg, addr) sh reg, addr | 169 | #define user_sh(reg, addr) kernel_sh(reg, addr) |
117 | #define user_sb(reg, addr) sb reg, addr | 170 | #define user_sb(reg, addr) kernel_sb(reg, addr) |
118 | 171 | ||
119 | #ifdef CONFIG_32BIT | 172 | #ifdef CONFIG_32BIT |
120 | /* | 173 | #define user_sd(reg, addr) kernel_sw(reg, addr) |
121 | * No 'sd' or 'ld' instructions in 32-bit but the code will | 174 | #define user_ld(reg, addr) kernel_lw(reg, addr) |
122 | * do the correct thing | ||
123 | */ | ||
124 | #define user_sd(reg, addr) user_sw(reg, addr) | ||
125 | #define user_ld(reg, addr) user_lw(reg, addr) | ||
126 | #else | 175 | #else |
127 | #define user_sd(reg, addr) sd reg, addr | 176 | #define user_sd(reg, addr) kernel_sd(reg, addr) |
128 | #define user_ld(reg, addr) ld reg, addr | 177 | #define user_ld(reg, addr) kernel_sd(reg, addr) |
129 | #endif /* CONFIG_32BIT */ | 178 | #endif /* CONFIG_32BIT */ |
130 | 179 | ||
131 | #endif /* CONFIG_EVA */ | 180 | #endif /* CONFIG_EVA */ |
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index e08381a37f8b..723229f4cf27 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h | |||
@@ -29,6 +29,20 @@ | |||
29 | * - flush_icache_all() flush the entire instruction cache | 29 | * - flush_icache_all() flush the entire instruction cache |
30 | * - flush_data_cache_page() flushes a page from the data cache | 30 | * - flush_data_cache_page() flushes a page from the data cache |
31 | */ | 31 | */ |
32 | |||
33 | /* | ||
34 | * This flag is used to indicate that the page pointed to by a pte | ||
35 | * is dirty and requires cleaning before returning it to the user. | ||
36 | */ | ||
37 | #define PG_dcache_dirty PG_arch_1 | ||
38 | |||
39 | #define Page_dcache_dirty(page) \ | ||
40 | test_bit(PG_dcache_dirty, &(page)->flags) | ||
41 | #define SetPageDcacheDirty(page) \ | ||
42 | set_bit(PG_dcache_dirty, &(page)->flags) | ||
43 | #define ClearPageDcacheDirty(page) \ | ||
44 | clear_bit(PG_dcache_dirty, &(page)->flags) | ||
45 | |||
32 | extern void (*flush_cache_all)(void); | 46 | extern void (*flush_cache_all)(void); |
33 | extern void (*__flush_cache_all)(void); | 47 | extern void (*__flush_cache_all)(void); |
34 | extern void (*flush_cache_mm)(struct mm_struct *mm); | 48 | extern void (*flush_cache_mm)(struct mm_struct *mm); |
@@ -37,13 +51,15 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma, | |||
37 | unsigned long start, unsigned long end); | 51 | unsigned long start, unsigned long end); |
38 | extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); | 52 | extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); |
39 | extern void __flush_dcache_page(struct page *page); | 53 | extern void __flush_dcache_page(struct page *page); |
54 | extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page); | ||
40 | 55 | ||
41 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | 56 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
42 | static inline void flush_dcache_page(struct page *page) | 57 | static inline void flush_dcache_page(struct page *page) |
43 | { | 58 | { |
44 | if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) | 59 | if (cpu_has_dc_aliases) |
45 | __flush_dcache_page(page); | 60 | __flush_dcache_page(page); |
46 | 61 | else if (!cpu_has_ic_fills_f_dc) | |
62 | SetPageDcacheDirty(page); | ||
47 | } | 63 | } |
48 | 64 | ||
49 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 65 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
@@ -61,6 +77,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma, | |||
61 | static inline void flush_icache_page(struct vm_area_struct *vma, | 77 | static inline void flush_icache_page(struct vm_area_struct *vma, |
62 | struct page *page) | 78 | struct page *page) |
63 | { | 79 | { |
80 | if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) && | ||
81 | Page_dcache_dirty(page)) { | ||
82 | __flush_icache_page(vma, page); | ||
83 | ClearPageDcacheDirty(page); | ||
84 | } | ||
64 | } | 85 | } |
65 | 86 | ||
66 | extern void (*flush_icache_range)(unsigned long start, unsigned long end); | 87 | extern void (*flush_icache_range)(unsigned long start, unsigned long end); |
@@ -95,19 +116,6 @@ extern void (*flush_icache_all)(void); | |||
95 | extern void (*local_flush_data_cache_page)(void * addr); | 116 | extern void (*local_flush_data_cache_page)(void * addr); |
96 | extern void (*flush_data_cache_page)(unsigned long addr); | 117 | extern void (*flush_data_cache_page)(unsigned long addr); |
97 | 118 | ||
98 | /* | ||
99 | * This flag is used to indicate that the page pointed to by a pte | ||
100 | * is dirty and requires cleaning before returning it to the user. | ||
101 | */ | ||
102 | #define PG_dcache_dirty PG_arch_1 | ||
103 | |||
104 | #define Page_dcache_dirty(page) \ | ||
105 | test_bit(PG_dcache_dirty, &(page)->flags) | ||
106 | #define SetPageDcacheDirty(page) \ | ||
107 | set_bit(PG_dcache_dirty, &(page)->flags) | ||
108 | #define ClearPageDcacheDirty(page) \ | ||
109 | clear_bit(PG_dcache_dirty, &(page)->flags) | ||
110 | |||
111 | /* Run kernel code uncached, useful for cache probing functions. */ | 119 | /* Run kernel code uncached, useful for cache probing functions. */ |
112 | unsigned long run_uncached(void *func); | 120 | unsigned long run_uncached(void *func); |
113 | 121 | ||
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index fc2ad332541c..5aeaf19c26b0 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h | |||
@@ -140,6 +140,9 @@ | |||
140 | # endif | 140 | # endif |
141 | #endif | 141 | #endif |
142 | 142 | ||
143 | #ifndef cpu_has_xpa | ||
144 | #define cpu_has_xpa (cpu_data[0].options & MIPS_CPU_XPA) | ||
145 | #endif | ||
143 | #ifndef cpu_has_vtag_icache | 146 | #ifndef cpu_has_vtag_icache |
144 | #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) | 147 | #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) |
145 | #endif | 148 | #endif |
@@ -239,8 +242,39 @@ | |||
239 | /* MIPSR2 and MIPSR6 have a lot of similarities */ | 242 | /* MIPSR2 and MIPSR6 have a lot of similarities */ |
240 | #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) | 243 | #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) |
241 | 244 | ||
245 | /* | ||
246 | * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor | ||
247 | * | ||
248 | * Returns non-zero value if the current processor implementation requires | ||
249 | * an IHB instruction to deal with an instruction hazard as per MIPS R2 | ||
250 | * architecture specification, zero otherwise. | ||
251 | */ | ||
242 | #ifndef cpu_has_mips_r2_exec_hazard | 252 | #ifndef cpu_has_mips_r2_exec_hazard |
243 | #define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6) | 253 | #define cpu_has_mips_r2_exec_hazard \ |
254 | ({ \ | ||
255 | int __res; \ | ||
256 | \ | ||
257 | switch (current_cpu_type()) { \ | ||
258 | case CPU_M14KC: \ | ||
259 | case CPU_74K: \ | ||
260 | case CPU_1074K: \ | ||
261 | case CPU_PROAPTIV: \ | ||
262 | case CPU_P5600: \ | ||
263 | case CPU_M5150: \ | ||
264 | case CPU_QEMU_GENERIC: \ | ||
265 | case CPU_CAVIUM_OCTEON: \ | ||
266 | case CPU_CAVIUM_OCTEON_PLUS: \ | ||
267 | case CPU_CAVIUM_OCTEON2: \ | ||
268 | case CPU_CAVIUM_OCTEON3: \ | ||
269 | __res = 0; \ | ||
270 | break; \ | ||
271 | \ | ||
272 | default: \ | ||
273 | __res = 1; \ | ||
274 | } \ | ||
275 | \ | ||
276 | __res; \ | ||
277 | }) | ||
244 | #endif | 278 | #endif |
245 | 279 | ||
246 | /* | 280 | /* |
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index fd2e893e9d9f..e3adca1d0b99 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h | |||
@@ -377,7 +377,8 @@ enum cpu_type_enum { | |||
377 | #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ | 377 | #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ |
378 | #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ | 378 | #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ |
379 | #define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ | 379 | #define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ |
380 | #define MIPS_CPU_CDMM 0x2000000000ull /* CPU has Common Device Memory Map */ | 380 | #define MIPS_CPU_XPA 0x2000000000ull /* CPU supports Extended Physical Addressing */ |
381 | #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ | ||
381 | 382 | ||
382 | /* | 383 | /* |
383 | * CPU ASE encodings | 384 | * CPU ASE encodings |
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 612cf519bd88..9a74248e7821 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h | |||
@@ -297,6 +297,9 @@ do { \ | |||
297 | if (personality(current->personality) != PER_LINUX) \ | 297 | if (personality(current->personality) != PER_LINUX) \ |
298 | set_personality(PER_LINUX); \ | 298 | set_personality(PER_LINUX); \ |
299 | \ | 299 | \ |
300 | clear_thread_flag(TIF_HYBRID_FPREGS); \ | ||
301 | set_thread_flag(TIF_32BIT_FPREGS); \ | ||
302 | \ | ||
300 | mips_set_personality_fp(state); \ | 303 | mips_set_personality_fp(state); \ |
301 | \ | 304 | \ |
302 | current->thread.abi = &mips_abi; \ | 305 | current->thread.abi = &mips_abi; \ |
@@ -324,6 +327,8 @@ do { \ | |||
324 | do { \ | 327 | do { \ |
325 | set_thread_flag(TIF_32BIT_REGS); \ | 328 | set_thread_flag(TIF_32BIT_REGS); \ |
326 | set_thread_flag(TIF_32BIT_ADDR); \ | 329 | set_thread_flag(TIF_32BIT_ADDR); \ |
330 | clear_thread_flag(TIF_HYBRID_FPREGS); \ | ||
331 | set_thread_flag(TIF_32BIT_FPREGS); \ | ||
327 | \ | 332 | \ |
328 | mips_set_personality_fp(state); \ | 333 | mips_set_personality_fp(state); \ |
329 | \ | 334 | \ |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index fa1f3cfbae8d..d68e685cde60 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h | |||
@@ -50,7 +50,6 @@ | |||
50 | #define cpu_has_mips32r2 0 | 50 | #define cpu_has_mips32r2 0 |
51 | #define cpu_has_mips64r1 0 | 51 | #define cpu_has_mips64r1 0 |
52 | #define cpu_has_mips64r2 1 | 52 | #define cpu_has_mips64r2 1 |
53 | #define cpu_has_mips_r2_exec_hazard 0 | ||
54 | #define cpu_has_dsp 0 | 53 | #define cpu_has_dsp 0 |
55 | #define cpu_has_dsp2 0 | 54 | #define cpu_has_dsp2 0 |
56 | #define cpu_has_mipsmt 0 | 55 | #define cpu_has_mipsmt 0 |
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h index 64ba56a02843..1884609741a8 100644 --- a/arch/mips/include/asm/octeon/pci-octeon.h +++ b/arch/mips/include/asm/octeon/pci-octeon.h | |||
@@ -11,9 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | 13 | ||
14 | /* Some PCI cards require delays when accessing config space. */ | ||
15 | #define PCI_CONFIG_SPACE_DELAY 10000 | ||
16 | |||
17 | /* | 14 | /* |
18 | * The physical memory base mapped by BAR1. 256MB at the end of the | 15 | * The physical memory base mapped by BAR1. 256MB at the end of the |
19 | * first 4GB. | 16 | * first 4GB. |
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index a6be006b6f75..7d56686c0e62 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h | |||
@@ -105,13 +105,16 @@ static inline void pmd_clear(pmd_t *pmdp) | |||
105 | 105 | ||
106 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) | 106 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
107 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 107 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
108 | #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) | 108 | #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) |
109 | static inline pte_t | 109 | static inline pte_t |
110 | pfn_pte(unsigned long pfn, pgprot_t prot) | 110 | pfn_pte(unsigned long pfn, pgprot_t prot) |
111 | { | 111 | { |
112 | pte_t pte; | 112 | pte_t pte; |
113 | pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); | 113 | |
114 | pte.pte_low = pgprot_val(prot); | 114 | pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) | |
115 | (pgprot_val(prot) & ~_PFNX_MASK); | ||
116 | pte.pte_high = (pfn << _PFN_SHIFT) | | ||
117 | (pgprot_val(prot) & ~_PFN_MASK); | ||
115 | return pte; | 118 | return pte; |
116 | } | 119 | } |
117 | 120 | ||
@@ -166,9 +169,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot) | |||
166 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) | 169 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
167 | 170 | ||
168 | /* Swap entries must have VALID and GLOBAL bits cleared. */ | 171 | /* Swap entries must have VALID and GLOBAL bits cleared. */ |
169 | #define __swp_type(x) (((x).val >> 2) & 0x1f) | 172 | #define __swp_type(x) (((x).val >> 4) & 0x1f) |
170 | #define __swp_offset(x) ((x).val >> 7) | 173 | #define __swp_offset(x) ((x).val >> 9) |
171 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) | 174 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) }) |
172 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) | 175 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) |
173 | #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) | 176 | #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) |
174 | 177 | ||
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 91747c282bb3..18ae5ddef118 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h | |||
@@ -37,7 +37,11 @@ | |||
37 | /* | 37 | /* |
38 | * The following bits are implemented by the TLB hardware | 38 | * The following bits are implemented by the TLB hardware |
39 | */ | 39 | */ |
40 | #define _PAGE_GLOBAL_SHIFT 0 | 40 | #define _PAGE_NO_EXEC_SHIFT 0 |
41 | #define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) | ||
42 | #define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) | ||
43 | #define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT) | ||
44 | #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) | ||
41 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) | 45 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) |
42 | #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) | 46 | #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) |
43 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) | 47 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) |
@@ -49,7 +53,7 @@ | |||
49 | /* | 53 | /* |
50 | * The following bits are implemented in software | 54 | * The following bits are implemented in software |
51 | */ | 55 | */ |
52 | #define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3) | 56 | #define _PAGE_PRESENT_SHIFT (24) |
53 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) | 57 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) |
54 | #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) | 58 | #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) |
55 | #define _PAGE_READ (1 << _PAGE_READ_SHIFT) | 59 | #define _PAGE_READ (1 << _PAGE_READ_SHIFT) |
@@ -62,6 +66,11 @@ | |||
62 | 66 | ||
63 | #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) | 67 | #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) |
64 | 68 | ||
69 | /* | ||
70 | * Bits for extended EntryLo0/EntryLo1 registers | ||
71 | */ | ||
72 | #define _PFNX_MASK 0xffffff | ||
73 | |||
65 | #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | 74 | #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) |
66 | 75 | ||
67 | /* | 76 | /* |
@@ -95,11 +104,7 @@ | |||
95 | 104 | ||
96 | #else | 105 | #else |
97 | /* | 106 | /* |
98 | * When using the RI/XI bit support, we have 13 bits of flags below | 107 | * Below are the "Normal" R4K cases |
99 | * the physical address. The RI/XI bits are placed such that a SRL 5 | ||
100 | * can strip off the software bits, then a ROTR 2 can move the RI/XI | ||
101 | * into bits [63:62]. This also limits physical address to 56 bits, | ||
102 | * which is more than we need right now. | ||
103 | */ | 108 | */ |
104 | 109 | ||
105 | /* | 110 | /* |
@@ -107,38 +112,59 @@ | |||
107 | */ | 112 | */ |
108 | #define _PAGE_PRESENT_SHIFT 0 | 113 | #define _PAGE_PRESENT_SHIFT 0 |
109 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) | 114 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) |
110 | #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) | 115 | /* R2 or later cores check for RI/XI support to determine _PAGE_READ */ |
111 | #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) | 116 | #ifdef CONFIG_CPU_MIPSR2 |
117 | #define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1) | ||
118 | #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) | ||
119 | #else | ||
120 | #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) | ||
121 | #define _PAGE_READ (1 << _PAGE_READ_SHIFT) | ||
112 | #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) | 122 | #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) |
113 | #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) | 123 | #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) |
124 | #endif | ||
114 | #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) | 125 | #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) |
115 | #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) | 126 | #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) |
116 | #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) | 127 | #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) |
117 | #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) | 128 | #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) |
118 | 129 | ||
119 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT | 130 | #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) |
120 | /* huge tlb page */ | 131 | /* Huge TLB page */ |
121 | #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) | 132 | #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) |
122 | #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) | 133 | #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) |
123 | #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) | 134 | #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) |
124 | #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) | 135 | #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) |
136 | |||
137 | /* Only R2 or newer cores have the XI bit */ | ||
138 | #ifdef CONFIG_CPU_MIPSR2 | ||
139 | #define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1) | ||
125 | #else | 140 | #else |
126 | #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) | 141 | #define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1) |
127 | #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ | 142 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) |
128 | #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) | 143 | #endif /* CONFIG_CPU_MIPSR2 */ |
129 | #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ | ||
130 | #endif | ||
131 | 144 | ||
132 | /* Page cannot be executed */ | 145 | #endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
133 | #define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT) | ||
134 | #define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; }) | ||
135 | 146 | ||
136 | /* Page cannot be read */ | 147 | #ifdef CONFIG_CPU_MIPSR2 |
137 | #define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT) | 148 | /* XI - page cannot be executed */ |
138 | #define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; }) | 149 | #ifndef _PAGE_NO_EXEC_SHIFT |
150 | #define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1) | ||
151 | #endif | ||
152 | #define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) | ||
153 | |||
154 | /* RI - page cannot be read */ | ||
155 | #define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) | ||
156 | #define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT)) | ||
157 | #define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT | ||
158 | #define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0) | ||
139 | 159 | ||
140 | #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) | 160 | #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) |
141 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) | 161 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) |
162 | |||
163 | #else /* !CONFIG_CPU_MIPSR2 */ | ||
164 | #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1) | ||
165 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) | ||
166 | #endif /* CONFIG_CPU_MIPSR2 */ | ||
167 | |||
142 | #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) | 168 | #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) |
143 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) | 169 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) |
144 | #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) | 170 | #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) |
@@ -150,18 +176,26 @@ | |||
150 | 176 | ||
151 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ | 177 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ |
152 | 178 | ||
179 | #ifndef _PAGE_NO_EXEC | ||
180 | #define _PAGE_NO_EXEC 0 | ||
181 | #endif | ||
182 | #ifndef _PAGE_NO_READ | ||
183 | #define _PAGE_NO_READ 0 | ||
184 | #endif | ||
185 | |||
153 | #define _PAGE_SILENT_READ _PAGE_VALID | 186 | #define _PAGE_SILENT_READ _PAGE_VALID |
154 | #define _PAGE_SILENT_WRITE _PAGE_DIRTY | 187 | #define _PAGE_SILENT_WRITE _PAGE_DIRTY |
155 | 188 | ||
156 | #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) | 189 | #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) |
157 | 190 | ||
158 | #ifndef _PAGE_NO_READ | 191 | /* |
159 | #define _PAGE_NO_READ ({BUG(); 0; }) | 192 | * The final layouts of the PTE bits are: |
160 | #define _PAGE_NO_READ_SHIFT ({BUG(); 0; }) | 193 | * |
161 | #endif | 194 | * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P |
162 | #ifndef _PAGE_NO_EXEC | 195 | * 32-bit, R1 or earler: CCC D V G M A W R P |
163 | #define _PAGE_NO_EXEC ({BUG(); 0; }) | 196 | * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P |
164 | #endif | 197 | * 32-bit, R2 or later: CCC D V G RI/R XI M A W P |
198 | */ | ||
165 | 199 | ||
166 | 200 | ||
167 | #ifndef __ASSEMBLY__ | 201 | #ifndef __ASSEMBLY__ |
@@ -171,6 +205,7 @@ | |||
171 | */ | 205 | */ |
172 | static inline uint64_t pte_to_entrylo(unsigned long pte_val) | 206 | static inline uint64_t pte_to_entrylo(unsigned long pte_val) |
173 | { | 207 | { |
208 | #ifdef CONFIG_CPU_MIPSR2 | ||
174 | if (cpu_has_rixi) { | 209 | if (cpu_has_rixi) { |
175 | int sa; | 210 | int sa; |
176 | #ifdef CONFIG_32BIT | 211 | #ifdef CONFIG_32BIT |
@@ -186,6 +221,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) | |||
186 | return (pte_val >> _PAGE_GLOBAL_SHIFT) | | 221 | return (pte_val >> _PAGE_GLOBAL_SHIFT) | |
187 | ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); | 222 | ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); |
188 | } | 223 | } |
224 | #endif | ||
189 | 225 | ||
190 | return pte_val >> _PAGE_GLOBAL_SHIFT; | 226 | return pte_val >> _PAGE_GLOBAL_SHIFT; |
191 | } | 227 | } |
@@ -245,7 +281,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) | |||
245 | #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) | 281 | #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) |
246 | #endif | 282 | #endif |
247 | 283 | ||
248 | #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) | 284 | #define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED) |
249 | #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) | 285 | #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) |
250 | 286 | ||
251 | #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ | 287 | #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ |
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index bef782c4a44b..819af9d057a8 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h | |||
@@ -24,17 +24,17 @@ struct mm_struct; | |||
24 | struct vm_area_struct; | 24 | struct vm_area_struct; |
25 | 25 | ||
26 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) | 26 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) |
27 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \ | 27 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \ |
28 | _page_cachable_default) | 28 | _page_cachable_default) |
29 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ | 29 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \ |
30 | (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default) | 30 | _page_cachable_default) |
31 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ | 31 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
32 | _page_cachable_default) | 32 | _page_cachable_default) |
33 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ | 33 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ |
34 | _PAGE_GLOBAL | _page_cachable_default) | 34 | _PAGE_GLOBAL | _page_cachable_default) |
35 | #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ | 35 | #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ |
36 | _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) | 36 | _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) |
37 | #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ | 37 | #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
38 | _page_cachable_default) | 38 | _page_cachable_default) |
39 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ | 39 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ |
40 | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) | 40 | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) |
@@ -127,13 +127,9 @@ do { \ | |||
127 | } \ | 127 | } \ |
128 | } while(0) | 128 | } while(0) |
129 | 129 | ||
130 | |||
131 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | ||
132 | pte_t pteval); | ||
133 | |||
134 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) | 130 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
135 | 131 | ||
136 | #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) | 132 | #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) |
137 | #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) | 133 | #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) |
138 | 134 | ||
139 | static inline void set_pte(pte_t *ptep, pte_t pte) | 135 | static inline void set_pte(pte_t *ptep, pte_t pte) |
@@ -142,18 +138,17 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
142 | smp_wmb(); | 138 | smp_wmb(); |
143 | ptep->pte_low = pte.pte_low; | 139 | ptep->pte_low = pte.pte_low; |
144 | 140 | ||
145 | if (pte.pte_low & _PAGE_GLOBAL) { | 141 | if (pte.pte_high & _PAGE_GLOBAL) { |
146 | pte_t *buddy = ptep_buddy(ptep); | 142 | pte_t *buddy = ptep_buddy(ptep); |
147 | /* | 143 | /* |
148 | * Make sure the buddy is global too (if it's !none, | 144 | * Make sure the buddy is global too (if it's !none, |
149 | * it better already be global) | 145 | * it better already be global) |
150 | */ | 146 | */ |
151 | if (pte_none(*buddy)) { | 147 | if (pte_none(*buddy)) |
152 | buddy->pte_low |= _PAGE_GLOBAL; | ||
153 | buddy->pte_high |= _PAGE_GLOBAL; | 148 | buddy->pte_high |= _PAGE_GLOBAL; |
154 | } | ||
155 | } | 149 | } |
156 | } | 150 | } |
151 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
157 | 152 | ||
158 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 153 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
159 | { | 154 | { |
@@ -161,8 +156,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt | |||
161 | 156 | ||
162 | htw_stop(); | 157 | htw_stop(); |
163 | /* Preserve global status for the pair */ | 158 | /* Preserve global status for the pair */ |
164 | if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) | 159 | if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) |
165 | null.pte_low = null.pte_high = _PAGE_GLOBAL; | 160 | null.pte_high = _PAGE_GLOBAL; |
166 | 161 | ||
167 | set_pte_at(mm, addr, ptep, null); | 162 | set_pte_at(mm, addr, ptep, null); |
168 | htw_start(); | 163 | htw_start(); |
@@ -192,6 +187,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) | |||
192 | } | 187 | } |
193 | #endif | 188 | #endif |
194 | } | 189 | } |
190 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
195 | 191 | ||
196 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 192 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
197 | { | 193 | { |
@@ -242,21 +238,21 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } | |||
242 | 238 | ||
243 | static inline pte_t pte_wrprotect(pte_t pte) | 239 | static inline pte_t pte_wrprotect(pte_t pte) |
244 | { | 240 | { |
245 | pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); | 241 | pte.pte_low &= ~_PAGE_WRITE; |
246 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 242 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
247 | return pte; | 243 | return pte; |
248 | } | 244 | } |
249 | 245 | ||
250 | static inline pte_t pte_mkclean(pte_t pte) | 246 | static inline pte_t pte_mkclean(pte_t pte) |
251 | { | 247 | { |
252 | pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); | 248 | pte.pte_low &= ~_PAGE_MODIFIED; |
253 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 249 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
254 | return pte; | 250 | return pte; |
255 | } | 251 | } |
256 | 252 | ||
257 | static inline pte_t pte_mkold(pte_t pte) | 253 | static inline pte_t pte_mkold(pte_t pte) |
258 | { | 254 | { |
259 | pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); | 255 | pte.pte_low &= ~_PAGE_ACCESSED; |
260 | pte.pte_high &= ~_PAGE_SILENT_READ; | 256 | pte.pte_high &= ~_PAGE_SILENT_READ; |
261 | return pte; | 257 | return pte; |
262 | } | 258 | } |
@@ -264,30 +260,24 @@ static inline pte_t pte_mkold(pte_t pte) | |||
264 | static inline pte_t pte_mkwrite(pte_t pte) | 260 | static inline pte_t pte_mkwrite(pte_t pte) |
265 | { | 261 | { |
266 | pte.pte_low |= _PAGE_WRITE; | 262 | pte.pte_low |= _PAGE_WRITE; |
267 | if (pte.pte_low & _PAGE_MODIFIED) { | 263 | if (pte.pte_low & _PAGE_MODIFIED) |
268 | pte.pte_low |= _PAGE_SILENT_WRITE; | ||
269 | pte.pte_high |= _PAGE_SILENT_WRITE; | 264 | pte.pte_high |= _PAGE_SILENT_WRITE; |
270 | } | ||
271 | return pte; | 265 | return pte; |
272 | } | 266 | } |
273 | 267 | ||
274 | static inline pte_t pte_mkdirty(pte_t pte) | 268 | static inline pte_t pte_mkdirty(pte_t pte) |
275 | { | 269 | { |
276 | pte.pte_low |= _PAGE_MODIFIED; | 270 | pte.pte_low |= _PAGE_MODIFIED; |
277 | if (pte.pte_low & _PAGE_WRITE) { | 271 | if (pte.pte_low & _PAGE_WRITE) |
278 | pte.pte_low |= _PAGE_SILENT_WRITE; | ||
279 | pte.pte_high |= _PAGE_SILENT_WRITE; | 272 | pte.pte_high |= _PAGE_SILENT_WRITE; |
280 | } | ||
281 | return pte; | 273 | return pte; |
282 | } | 274 | } |
283 | 275 | ||
284 | static inline pte_t pte_mkyoung(pte_t pte) | 276 | static inline pte_t pte_mkyoung(pte_t pte) |
285 | { | 277 | { |
286 | pte.pte_low |= _PAGE_ACCESSED; | 278 | pte.pte_low |= _PAGE_ACCESSED; |
287 | if (pte.pte_low & _PAGE_READ) { | 279 | if (pte.pte_low & _PAGE_READ) |
288 | pte.pte_low |= _PAGE_SILENT_READ; | ||
289 | pte.pte_high |= _PAGE_SILENT_READ; | 280 | pte.pte_high |= _PAGE_SILENT_READ; |
290 | } | ||
291 | return pte; | 281 | return pte; |
292 | } | 282 | } |
293 | #else | 283 | #else |
@@ -332,13 +322,13 @@ static inline pte_t pte_mkdirty(pte_t pte) | |||
332 | static inline pte_t pte_mkyoung(pte_t pte) | 322 | static inline pte_t pte_mkyoung(pte_t pte) |
333 | { | 323 | { |
334 | pte_val(pte) |= _PAGE_ACCESSED; | 324 | pte_val(pte) |= _PAGE_ACCESSED; |
335 | if (cpu_has_rixi) { | 325 | #ifdef CONFIG_CPU_MIPSR2 |
336 | if (!(pte_val(pte) & _PAGE_NO_READ)) | 326 | if (!(pte_val(pte) & _PAGE_NO_READ)) |
337 | pte_val(pte) |= _PAGE_SILENT_READ; | 327 | pte_val(pte) |= _PAGE_SILENT_READ; |
338 | } else { | 328 | else |
339 | if (pte_val(pte) & _PAGE_READ) | 329 | #endif |
340 | pte_val(pte) |= _PAGE_SILENT_READ; | 330 | if (pte_val(pte) & _PAGE_READ) |
341 | } | 331 | pte_val(pte) |= _PAGE_SILENT_READ; |
342 | return pte; | 332 | return pte; |
343 | } | 333 | } |
344 | 334 | ||
@@ -391,10 +381,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) | |||
391 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) | 381 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
392 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 382 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
393 | { | 383 | { |
394 | pte.pte_low &= _PAGE_CHG_MASK; | 384 | pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); |
395 | pte.pte_high &= (_PFN_MASK | _CACHE_MASK); | 385 | pte.pte_high &= (_PFN_MASK | _CACHE_MASK); |
396 | pte.pte_low |= pgprot_val(newprot); | 386 | pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; |
397 | pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); | 387 | pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; |
398 | return pte; | 388 | return pte; |
399 | } | 389 | } |
400 | #else | 390 | #else |
@@ -407,12 +397,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
407 | 397 | ||
408 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, | 398 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, |
409 | pte_t pte); | 399 | pte_t pte); |
400 | extern void __update_cache(struct vm_area_struct *vma, unsigned long address, | ||
401 | pte_t pte); | ||
410 | 402 | ||
411 | static inline void update_mmu_cache(struct vm_area_struct *vma, | 403 | static inline void update_mmu_cache(struct vm_area_struct *vma, |
412 | unsigned long address, pte_t *ptep) | 404 | unsigned long address, pte_t *ptep) |
413 | { | 405 | { |
414 | pte_t pte = *ptep; | 406 | pte_t pte = *ptep; |
415 | __update_tlb(vma, address, pte); | 407 | __update_tlb(vma, address, pte); |
408 | __update_cache(vma, address, pte); | ||
416 | } | 409 | } |
417 | 410 | ||
418 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | 411 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, |
@@ -534,13 +527,13 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) | |||
534 | { | 527 | { |
535 | pmd_val(pmd) |= _PAGE_ACCESSED; | 528 | pmd_val(pmd) |= _PAGE_ACCESSED; |
536 | 529 | ||
537 | if (cpu_has_rixi) { | 530 | #ifdef CONFIG_CPU_MIPSR2 |
538 | if (!(pmd_val(pmd) & _PAGE_NO_READ)) | 531 | if (!(pmd_val(pmd) & _PAGE_NO_READ)) |
539 | pmd_val(pmd) |= _PAGE_SILENT_READ; | 532 | pmd_val(pmd) |= _PAGE_SILENT_READ; |
540 | } else { | 533 | else |
541 | if (pmd_val(pmd) & _PAGE_READ) | 534 | #endif |
542 | pmd_val(pmd) |= _PAGE_SILENT_READ; | 535 | if (pmd_val(pmd) & _PAGE_READ) |
543 | } | 536 | pmd_val(pmd) |= _PAGE_SILENT_READ; |
544 | 537 | ||
545 | return pmd; | 538 | return pmd; |
546 | } | 539 | } |
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 1b22d2da88a1..38902bf97adc 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -12,6 +12,8 @@ | |||
12 | #ifndef _ASM_R4KCACHE_H | 12 | #ifndef _ASM_R4KCACHE_H |
13 | #define _ASM_R4KCACHE_H | 13 | #define _ASM_R4KCACHE_H |
14 | 14 | ||
15 | #include <linux/stringify.h> | ||
16 | |||
15 | #include <asm/asm.h> | 17 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> | 18 | #include <asm/cacheops.h> |
17 | #include <asm/compiler.h> | 19 | #include <asm/compiler.h> |
@@ -344,7 +346,7 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
344 | " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ | 346 | " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ |
345 | " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ | 347 | " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ |
346 | " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ | 348 | " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ |
347 | " addiu $1, $0, 0x100 \n" \ | 349 | " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ |
348 | " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ | 350 | " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ |
349 | " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ | 351 | " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ |
350 | " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ | 352 | " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ |
@@ -368,17 +370,17 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
368 | " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ | 370 | " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ |
369 | " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ | 371 | " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ |
370 | " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ | 372 | " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ |
371 | " addiu $1, %0, 0x100\n" \ | 373 | " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ |
372 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ | 374 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ |
373 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ | 375 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ |
374 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ | 376 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ |
375 | " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ | 377 | " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ |
376 | " addiu $1, $1, 0x100\n" \ | 378 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
377 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ | 379 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ |
378 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ | 380 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ |
379 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ | 381 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ |
380 | " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ | 382 | " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ |
381 | " addiu $1, $1, 0x100\n" \ | 383 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \ |
382 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ | 384 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ |
383 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ | 385 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ |
384 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ | 386 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ |
@@ -396,25 +398,25 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
396 | " .set noat\n" \ | 398 | " .set noat\n" \ |
397 | " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ | 399 | " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ |
398 | " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ | 400 | " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ |
399 | " addiu $1, %0, 0x100\n" \ | 401 | " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ |
400 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | 402 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ |
401 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | 403 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ |
402 | " addiu $1, %0, 0x100\n" \ | 404 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
403 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | 405 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ |
404 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | 406 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ |
405 | " addiu $1, %0, 0x100\n" \ | 407 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
406 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | 408 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ |
407 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | 409 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ |
408 | " addiu $1, %0, 0x100\n" \ | 410 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
409 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | 411 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ |
410 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | 412 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ |
411 | " addiu $1, %0, 0x100\n" \ | 413 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
412 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | 414 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ |
413 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | 415 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ |
414 | " addiu $1, %0, 0x100\n" \ | 416 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
415 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | 417 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ |
416 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | 418 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ |
417 | " addiu $1, %0, 0x100\n" \ | 419 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
418 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | 420 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ |
419 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | 421 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ |
420 | " .set pop\n" \ | 422 | " .set pop\n" \ |
@@ -429,39 +431,38 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
429 | " .set mips64r6\n" \ | 431 | " .set mips64r6\n" \ |
430 | " .set noat\n" \ | 432 | " .set noat\n" \ |
431 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 433 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ |
432 | " addiu $1, %0, 0x100\n" \ | 434 | " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ |
433 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 435 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
434 | " addiu $1, %0, 0x100\n" \ | 436 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
435 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 437 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
436 | " addiu $1, %0, 0x100\n" \ | 438 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
437 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 439 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
438 | " addiu $1, %0, 0x100\n" \ | 440 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
439 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 441 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
440 | " addiu $1, %0, 0x100\n" \ | 442 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
441 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 443 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
442 | " addiu $1, %0, 0x100\n" \ | 444 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
443 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 445 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
444 | " addiu $1, %0, 0x100\n" \ | 446 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
445 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 447 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
446 | " addiu $1, %0, 0x100\n" \ | 448 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
447 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 449 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
448 | " addiu $1, %0, 0x100\n" \ | 450 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
449 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 451 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
450 | " addiu $1, %0, 0x100\n" \ | 452 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
451 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 453 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
452 | " addiu $1, %0, 0x100\n" \ | 454 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
453 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 455 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
454 | " addiu $1, %0, 0x100\n" \ | 456 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
455 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 457 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
456 | " addiu $1, %0, 0x100\n" \ | 458 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
457 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 459 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
458 | " addiu $1, %0, 0x100\n" \ | 460 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
459 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 461 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
460 | " addiu $1, %0, 0x100\n" \ | 462 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
461 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 463 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
462 | " addiu $1, %0, 0x100\n" \ | 464 | " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ |
463 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | 465 | " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ |
464 | " addiu $1, %0, 0x100\n" \ | ||
465 | " .set pop\n" \ | 466 | " .set pop\n" \ |
466 | : \ | 467 | : \ |
467 | : "r" (base), \ | 468 | : "r" (base), \ |
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index b4548690ade9..1fca2e0793dc 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -263,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
263 | if (R10000_LLSC_WAR) { | 263 | if (R10000_LLSC_WAR) { |
264 | __asm__ __volatile__( | 264 | __asm__ __volatile__( |
265 | "1: ll %1, %2 # arch_read_unlock \n" | 265 | "1: ll %1, %2 # arch_read_unlock \n" |
266 | " addiu %1, 1 \n" | 266 | " addiu %1, -1 \n" |
267 | " sc %1, %0 \n" | 267 | " sc %1, %0 \n" |
268 | " beqzl %1, 1b \n" | 268 | " beqzl %1, 1b \n" |
269 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) | 269 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |