aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile6
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c26
-rw-r--r--arch/mips/mm/c-tx39.c12
-rw-r--r--arch/mips/mm/cerr-sb1.c30
-rw-r--r--arch/mips/mm/cex-gen.S6
-rw-r--r--arch/mips/mm/cex-oct.S36
-rw-r--r--arch/mips/mm/cex-sb1.S8
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/ioremap.c4
-rw-r--r--arch/mips/mm/page.c6
-rw-r--r--arch/mips/mm/pgtable-64.c4
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-r5k.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c32
-rw-r--r--arch/mips/mm/uasm.c16
21 files changed, 108 insertions, 108 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 90ceb963aaf1..1dcec30ad1c4 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -16,9 +16,9 @@ obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
16obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o 16obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
17obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o 17obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
18obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o 18obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
19obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o 19obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
20 20
21obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o 21obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o 22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o 23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o 24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 6ec04daf4231..8557fb552863 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -106,7 +106,7 @@ static void octeon_flush_icache_all(void)
106 * Called to flush all memory associated with a memory 106 * Called to flush all memory associated with a memory
107 * context. 107 * context.
108 * 108 *
109 * @mm: Memory context to flush 109 * @mm: Memory context to flush
110 */ 110 */
111static void octeon_flush_cache_mm(struct mm_struct *mm) 111static void octeon_flush_cache_mm(struct mm_struct *mm)
112{ 112{
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 031c4c2cdf2e..704dc735a59d 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -119,7 +119,7 @@ static void r3k_flush_icache_range(unsigned long start, unsigned long end)
119 write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC); 119 write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
120 120
121 for (i = 0; i < size; i += 0x080) { 121 for (i = 0; i < size; i += 0x080) {
122 asm( "sb\t$0, 0x000(%0)\n\t" 122 asm( "sb\t$0, 0x000(%0)\n\t"
123 "sb\t$0, 0x004(%0)\n\t" 123 "sb\t$0, 0x004(%0)\n\t"
124 "sb\t$0, 0x008(%0)\n\t" 124 "sb\t$0, 0x008(%0)\n\t"
125 "sb\t$0, 0x00c(%0)\n\t" 125 "sb\t$0, 0x00c(%0)\n\t"
@@ -176,7 +176,7 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
176 write_c0_status((ST0_ISC|flags)&~ST0_IEC); 176 write_c0_status((ST0_ISC|flags)&~ST0_IEC);
177 177
178 for (i = 0; i < size; i += 0x080) { 178 for (i = 0; i < size; i += 0x080) {
179 asm( "sb\t$0, 0x000(%0)\n\t" 179 asm( "sb\t$0, 0x000(%0)\n\t"
180 "sb\t$0, 0x004(%0)\n\t" 180 "sb\t$0, 0x004(%0)\n\t"
181 "sb\t$0, 0x008(%0)\n\t" 181 "sb\t$0, 0x008(%0)\n\t"
182 "sb\t$0, 0x00c(%0)\n\t" 182 "sb\t$0, 0x00c(%0)\n\t"
@@ -285,13 +285,13 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
285 write_c0_status(flags&~ST0_IEC); 285 write_c0_status(flags&~ST0_IEC);
286 286
287 /* Fill the TLB to avoid an exception with caches isolated. */ 287 /* Fill the TLB to avoid an exception with caches isolated. */
288 asm( "lw\t$0, 0x000(%0)\n\t" 288 asm( "lw\t$0, 0x000(%0)\n\t"
289 "lw\t$0, 0x004(%0)\n\t" 289 "lw\t$0, 0x004(%0)\n\t"
290 : : "r" (addr) ); 290 : : "r" (addr) );
291 291
292 write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC); 292 write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
293 293
294 asm( "sb\t$0, 0x000(%0)\n\t" 294 asm( "sb\t$0, 0x000(%0)\n\t"
295 "sb\t$0, 0x004(%0)\n\t" 295 "sb\t$0, 0x004(%0)\n\t"
296 : : "r" (addr) ); 296 : : "r" (addr) );
297 297
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 0f7d788e8810..d45f8e28b470 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -160,7 +160,7 @@ static void __cpuinit r4k_blast_dcache_setup(void)
160 "1:\n\t" \ 160 "1:\n\t" \
161 ) 161 )
162#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ 162#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
163#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) 163#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
164 164
165static inline void blast_r4600_v1_icache32(void) 165static inline void blast_r4600_v1_icache32(void)
166{ 166{
@@ -177,7 +177,7 @@ static inline void tx49_blast_icache32(void)
177 unsigned long end = start + current_cpu_data.icache.waysize; 177 unsigned long end = start + current_cpu_data.icache.waysize;
178 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 178 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
179 unsigned long ws_end = current_cpu_data.icache.ways << 179 unsigned long ws_end = current_cpu_data.icache.ways <<
180 current_cpu_data.icache.waybit; 180 current_cpu_data.icache.waybit;
181 unsigned long ws, addr; 181 unsigned long ws, addr;
182 182
183 CACHE32_UNROLL32_ALIGN2; 183 CACHE32_UNROLL32_ALIGN2;
@@ -208,7 +208,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
208 unsigned long end = start + PAGE_SIZE; 208 unsigned long end = start + PAGE_SIZE;
209 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 209 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
210 unsigned long ws_end = current_cpu_data.icache.ways << 210 unsigned long ws_end = current_cpu_data.icache.ways <<
211 current_cpu_data.icache.waybit; 211 current_cpu_data.icache.waybit;
212 unsigned long ws, addr; 212 unsigned long ws, addr;
213 213
214 CACHE32_UNROLL32_ALIGN2; 214 CACHE32_UNROLL32_ALIGN2;
@@ -637,7 +637,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
637 * for the cache instruction on MIPS processors and 637 * for the cache instruction on MIPS processors and
638 * some processors, among them the RM5200 and RM7000 638 * some processors, among them the RM5200 and RM7000
639 * QED processors will throw an address error for cache 639 * QED processors will throw an address error for cache
640 * hit ops with insufficient alignment. Solved by 640 * hit ops with insufficient alignment. Solved by
641 * aligning the address to cache line size. 641 * aligning the address to cache line size.
642 */ 642 */
643 blast_inv_scache_range(addr, addr + size); 643 blast_inv_scache_range(addr, addr + size);
@@ -864,7 +864,7 @@ static void __cpuinit probe_pcache(void)
864 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 864 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
865 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 865 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
866 c->icache.ways = 1; 866 c->icache.ways = 1;
867 c->icache.waybit = 0; /* doesn't matter */ 867 c->icache.waybit = 0; /* doesn't matter */
868 868
869 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 869 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
870 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 870 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -923,7 +923,7 @@ static void __cpuinit probe_pcache(void)
923 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 923 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
924 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 924 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
925 c->icache.ways = 1; 925 c->icache.ways = 1;
926 c->icache.waybit = 0; /* doesn't matter */ 926 c->icache.waybit = 0; /* doesn't matter */
927 927
928 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 928 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
929 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 929 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -986,8 +986,8 @@ static void __cpuinit probe_pcache(void)
986 c->icache.ways = 1 + ((config1 >> 16) & 7); 986 c->icache.ways = 1 + ((config1 >> 16) & 7);
987 987
988 icache_size = c->icache.sets * 988 icache_size = c->icache.sets *
989 c->icache.ways * 989 c->icache.ways *
990 c->icache.linesz; 990 c->icache.linesz;
991 c->icache.waybit = __ffs(icache_size/c->icache.ways); 991 c->icache.waybit = __ffs(icache_size/c->icache.ways);
992 992
993 if (config & 0x8) /* VI bit */ 993 if (config & 0x8) /* VI bit */
@@ -1006,8 +1006,8 @@ static void __cpuinit probe_pcache(void)
1006 c->dcache.ways = 1 + ((config1 >> 7) & 7); 1006 c->dcache.ways = 1 + ((config1 >> 7) & 7);
1007 1007
1008 dcache_size = c->dcache.sets * 1008 dcache_size = c->dcache.sets *
1009 c->dcache.ways * 1009 c->dcache.ways *
1010 c->dcache.linesz; 1010 c->dcache.linesz;
1011 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); 1011 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1012 1012
1013 c->options |= MIPS_CPU_PREFETCH; 1013 c->options |= MIPS_CPU_PREFETCH;
@@ -1016,7 +1016,7 @@ static void __cpuinit probe_pcache(void)
1016 1016
1017 /* 1017 /*
1018 * Processor configuration sanity check for the R4000SC erratum 1018 * Processor configuration sanity check for the R4000SC erratum
1019 * #5. With page sizes larger than 32kB there is no possibility 1019 * #5. With page sizes larger than 32kB there is no possibility
1020 * to get a VCE exception anymore so we don't care about this 1020 * to get a VCE exception anymore so we don't care about this
1021 * misconfiguration. The case is rather theoretical anyway; 1021 * misconfiguration. The case is rather theoretical anyway;
1022 * presumably no vendor is shipping his hardware in the "bad" 1022 * presumably no vendor is shipping his hardware in the "bad"
@@ -1088,7 +1088,7 @@ static void __cpuinit probe_pcache(void)
1088 break; 1088 break;
1089 } 1089 }
1090 1090
1091#ifdef CONFIG_CPU_LOONGSON2 1091#ifdef CONFIG_CPU_LOONGSON2
1092 /* 1092 /*
1093 * LOONGSON2 has 4 way icache, but when using indexed cache op, 1093 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1094 * one op will act on all 4 ways 1094 * one op will act on all 4 ways
@@ -1228,7 +1228,7 @@ static void __cpuinit setup_scache(void)
1228#ifdef CONFIG_R5000_CPU_SCACHE 1228#ifdef CONFIG_R5000_CPU_SCACHE
1229 r5k_sc_init(); 1229 r5k_sc_init();
1230#endif 1230#endif
1231 return; 1231 return;
1232 1232
1233 case CPU_RM7000: 1233 case CPU_RM7000:
1234#ifdef CONFIG_RM7000_CPU_SCACHE 1234#ifdef CONFIG_RM7000_CPU_SCACHE
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 87d23cada6d6..ba9da270289f 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -33,9 +33,9 @@ extern int r3k_have_wired_reg; /* in r3k-tlb.c */
33/* This sequence is required to ensure icache is disabled immediately */ 33/* This sequence is required to ensure icache is disabled immediately */
34#define TX39_STOP_STREAMING() \ 34#define TX39_STOP_STREAMING() \
35__asm__ __volatile__( \ 35__asm__ __volatile__( \
36 ".set push\n\t" \ 36 ".set push\n\t" \
37 ".set noreorder\n\t" \ 37 ".set noreorder\n\t" \
38 "b 1f\n\t" \ 38 "b 1f\n\t" \
39 "nop\n\t" \ 39 "nop\n\t" \
40 "1:\n\t" \ 40 "1:\n\t" \
41 ".set pop" \ 41 ".set pop" \
@@ -361,7 +361,7 @@ void __cpuinit tx39_cache_init(void)
361 /* TX39/H core (writethru direct-map cache) */ 361 /* TX39/H core (writethru direct-map cache) */
362 __flush_cache_vmap = tx39__flush_cache_vmap; 362 __flush_cache_vmap = tx39__flush_cache_vmap;
363 __flush_cache_vunmap = tx39__flush_cache_vunmap; 363 __flush_cache_vunmap = tx39__flush_cache_vunmap;
364 flush_cache_all = tx39h_flush_icache_all; 364 flush_cache_all = tx39h_flush_icache_all;
365 __flush_cache_all = tx39h_flush_icache_all; 365 __flush_cache_all = tx39h_flush_icache_all;
366 flush_cache_mm = (void *) tx39h_flush_icache_all; 366 flush_cache_mm = (void *) tx39h_flush_icache_all;
367 flush_cache_range = (void *) tx39h_flush_icache_all; 367 flush_cache_range = (void *) tx39h_flush_icache_all;
@@ -409,8 +409,8 @@ void __cpuinit tx39_cache_init(void)
409 _dma_cache_inv = tx39_dma_cache_inv; 409 _dma_cache_inv = tx39_dma_cache_inv;
410 410
411 shm_align_mask = max_t(unsigned long, 411 shm_align_mask = max_t(unsigned long,
412 (dcache_size / current_cpu_data.dcache.ways) - 1, 412 (dcache_size / current_cpu_data.dcache.ways) - 1,
413 PAGE_SIZE - 1); 413 PAGE_SIZE - 1);
414 414
415 break; 415 break;
416 } 416 }
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 3571090ba178..576add33bf5b 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -27,7 +27,7 @@
27 27
28/* 28/*
29 * We'd like to dump the L2_ECC_TAG register on errors, but errata make 29 * We'd like to dump the L2_ECC_TAG register on errors, but errata make
30 * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.) 30 * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
31 */ 31 */
32#undef DUMP_L2_ECC_TAG_ON_ERROR 32#undef DUMP_L2_ECC_TAG_ON_ERROR
33 33
@@ -48,7 +48,7 @@
48#define CP0_CERRI_EXTERNAL (1 << 26) 48#define CP0_CERRI_EXTERNAL (1 << 26)
49 49
50#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL)) 50#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
51#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY) 51#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
52 52
53#define CP0_CERRD_MULTIPLE (1 << 31) 53#define CP0_CERRD_MULTIPLE (1 << 31)
54#define CP0_CERRD_TAG_STATE (1 << 30) 54#define CP0_CERRD_TAG_STATE (1 << 30)
@@ -56,8 +56,8 @@
56#define CP0_CERRD_DATA_SBE (1 << 28) 56#define CP0_CERRD_DATA_SBE (1 << 28)
57#define CP0_CERRD_DATA_DBE (1 << 27) 57#define CP0_CERRD_DATA_DBE (1 << 27)
58#define CP0_CERRD_EXTERNAL (1 << 26) 58#define CP0_CERRD_EXTERNAL (1 << 26)
59#define CP0_CERRD_LOAD (1 << 25) 59#define CP0_CERRD_LOAD (1 << 25)
60#define CP0_CERRD_STORE (1 << 24) 60#define CP0_CERRD_STORE (1 << 24)
61#define CP0_CERRD_FILLWB (1 << 23) 61#define CP0_CERRD_FILLWB (1 << 23)
62#define CP0_CERRD_COHERENCY (1 << 22) 62#define CP0_CERRD_COHERENCY (1 << 22)
63#define CP0_CERRD_DUPTAG (1 << 21) 63#define CP0_CERRD_DUPTAG (1 << 21)
@@ -69,10 +69,10 @@
69 (CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG) 69 (CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
70#define CP0_CERRD_TYPES \ 70#define CP0_CERRD_TYPES \
71 (CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL) 71 (CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
72#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE) 72#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
73 73
74static uint32_t extract_ic(unsigned short addr, int data); 74static uint32_t extract_ic(unsigned short addr, int data);
75static uint32_t extract_dc(unsigned short addr, int data); 75static uint32_t extract_dc(unsigned short addr, int data);
76 76
77static inline void breakout_errctl(unsigned int val) 77static inline void breakout_errctl(unsigned int val)
78{ 78{
@@ -209,11 +209,11 @@ asmlinkage void sb1_cache_error(void)
209 "=r" (dpahi), "=r" (dpalo), "=r" (eepc)); 209 "=r" (dpahi), "=r" (dpalo), "=r" (eepc));
210 210
211 cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo; 211 cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
212 printk(" c0_errorepc == %08x\n", eepc); 212 printk(" c0_errorepc == %08x\n", eepc);
213 printk(" c0_errctl == %08x", errctl); 213 printk(" c0_errctl == %08x", errctl);
214 breakout_errctl(errctl); 214 breakout_errctl(errctl);
215 if (errctl & CP0_ERRCTL_ICACHE) { 215 if (errctl & CP0_ERRCTL_ICACHE) {
216 printk(" c0_cerr_i == %08x", cerr_i); 216 printk(" c0_cerr_i == %08x", cerr_i);
217 breakout_cerri(cerr_i); 217 breakout_cerri(cerr_i);
218 if (CP0_CERRI_IDX_VALID(cerr_i)) { 218 if (CP0_CERRI_IDX_VALID(cerr_i)) {
219 /* Check index of EPC, allowing for delay slot */ 219 /* Check index of EPC, allowing for delay slot */
@@ -229,7 +229,7 @@ asmlinkage void sb1_cache_error(void)
229 } 229 }
230 } 230 }
231 if (errctl & CP0_ERRCTL_DCACHE) { 231 if (errctl & CP0_ERRCTL_DCACHE) {
232 printk(" c0_cerr_d == %08x", cerr_d); 232 printk(" c0_cerr_d == %08x", cerr_d);
233 breakout_cerrd(cerr_d); 233 breakout_cerrd(cerr_d);
234 if (CP0_CERRD_DPA_VALID(cerr_d)) { 234 if (CP0_CERRD_DPA_VALID(cerr_d)) {
235 printk(" c0_cerr_dpa == %010llx\n", cerr_dpa); 235 printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
@@ -256,7 +256,7 @@ asmlinkage void sb1_cache_error(void)
256 /* 256 /*
257 * Calling panic() when a fatal cache error occurs scrambles the 257 * Calling panic() when a fatal cache error occurs scrambles the
258 * state of the system (and the cache), making it difficult to 258 * state of the system (and the cache), making it difficult to
259 * investigate after the fact. However, if you just stall the CPU, 259 * investigate after the fact. However, if you just stall the CPU,
260 * the other CPU may keep on running, which is typically very 260 * the other CPU may keep on running, which is typically very
261 * undesirable. 261 * undesirable.
262 */ 262 */
@@ -411,7 +411,7 @@ static uint32_t extract_ic(unsigned short addr, int data)
411 " dmfc0 $1, $28, 1\n\t" 411 " dmfc0 $1, $28, 1\n\t"
412 " dsrl32 %1, $1, 0 \n\t" 412 " dsrl32 %1, $1, 0 \n\t"
413 " sll %2, $1, 0 \n\t" 413 " sll %2, $1, 0 \n\t"
414 " .set pop \n" 414 " .set pop \n"
415 : "=r" (datahi), "=r" (insta), "=r" (instb) 415 : "=r" (datahi), "=r" (insta), "=r" (instb)
416 : "r" ((way << 13) | addr | (offset << 3))); 416 : "r" ((way << 13) | addr | (offset << 3)));
417 predecode = (datahi >> 8) & 0xff; 417 predecode = (datahi >> 8) & 0xff;
@@ -441,8 +441,8 @@ static uint8_t dc_ecc(uint64_t dword)
441{ 441{
442 uint64_t t; 442 uint64_t t;
443 uint32_t w; 443 uint32_t w;
444 uint8_t p; 444 uint8_t p;
445 int i; 445 int i;
446 446
447 p = 0; 447 p = 0;
448 for (i = 7; i >= 0; i--) 448 for (i = 7; i >= 0; i--)
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
index e743622fd24d..45dff5cd4b8e 100644
--- a/arch/mips/mm/cex-gen.S
+++ b/arch/mips/mm/cex-gen.S
@@ -14,17 +14,17 @@
14#include <asm/stackframe.h> 14#include <asm/stackframe.h>
15 15
16/* 16/*
17 * Game over. Go to the button. Press gently. Swear where allowed by 17 * Game over. Go to the button. Press gently. Swear where allowed by
18 * legislation. 18 * legislation.
19 */ 19 */
20 LEAF(except_vec2_generic) 20 LEAF(except_vec2_generic)
21 .set noreorder 21 .set noreorder
22 .set noat 22 .set noat
23 .set mips0 23 .set mips0
24 /* 24 /*
25 * This is a very bad place to be. Our cache error 25 * This is a very bad place to be. Our cache error
26 * detection has triggered. If we have write-back data 26 * detection has triggered. If we have write-back data
27 * in the cache, we may not be able to recover. As a 27 * in the cache, we may not be able to recover. As a
28 * first-order desperate measure, turn off KSEG0 cacheing. 28 * first-order desperate measure, turn off KSEG0 cacheing.
29 */ 29 */
30 mfc0 k0,CP0_CONFIG 30 mfc0 k0,CP0_CONFIG
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
index 3db8553fcd34..9029092aa740 100644
--- a/arch/mips/mm/cex-oct.S
+++ b/arch/mips/mm/cex-oct.S
@@ -18,7 +18,7 @@
18 */ 18 */
19 LEAF(except_vec2_octeon) 19 LEAF(except_vec2_octeon)
20 20
21 .set push 21 .set push
22 .set mips64r2 22 .set mips64r2
23 .set noreorder 23 .set noreorder
24 .set noat 24 .set noat
@@ -27,19 +27,19 @@
27 /* due to an errata we need to read the COP0 CacheErr (Dcache) 27 /* due to an errata we need to read the COP0 CacheErr (Dcache)
28 * before any cache/DRAM access */ 28 * before any cache/DRAM access */
29 29
30 rdhwr k0, $0 /* get core_id */ 30 rdhwr k0, $0 /* get core_id */
31 PTR_LA k1, cache_err_dcache 31 PTR_LA k1, cache_err_dcache
32 sll k0, k0, 3 32 sll k0, k0, 3
33 PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */ 33 PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
34 34
35 dmfc0 k0, CP0_CACHEERR, 1 35 dmfc0 k0, CP0_CACHEERR, 1
36 sd k0, (k1) 36 sd k0, (k1)
37 dmtc0 $0, CP0_CACHEERR, 1 37 dmtc0 $0, CP0_CACHEERR, 1
38 38
39 /* check whether this is a nested exception */ 39 /* check whether this is a nested exception */
40 mfc0 k1, CP0_STATUS 40 mfc0 k1, CP0_STATUS
41 andi k1, k1, ST0_EXL 41 andi k1, k1, ST0_EXL
42 beqz k1, 1f 42 beqz k1, 1f
43 nop 43 nop
44 j cache_parity_error_octeon_non_recoverable 44 j cache_parity_error_octeon_non_recoverable
45 nop 45 nop
@@ -48,22 +48,22 @@
481: j handle_cache_err 481: j handle_cache_err
49 nop 49 nop
50 50
51 .set pop 51 .set pop
52 END(except_vec2_octeon) 52 END(except_vec2_octeon)
53 53
54 /* We need to jump to handle_cache_err so that the previous handler 54 /* We need to jump to handle_cache_err so that the previous handler
55 * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX 55 * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
56 * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */ 56 * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
57 LEAF(handle_cache_err) 57 LEAF(handle_cache_err)
58 .set push 58 .set push
59 .set noreorder 59 .set noreorder
60 .set noat 60 .set noat
61 61
62 SAVE_ALL 62 SAVE_ALL
63 KMODE 63 KMODE
64 jal cache_parity_error_octeon_recoverable 64 jal cache_parity_error_octeon_recoverable
65 nop 65 nop
66 j ret_from_exception 66 j ret_from_exception
67 nop 67 nop
68 68
69 .set pop 69 .set pop
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 89c412bc4b64..fe1d887e8d70 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -24,9 +24,9 @@
24#include <asm/cacheops.h> 24#include <asm/cacheops.h>
25#include <asm/sibyte/board.h> 25#include <asm/sibyte/board.h>
26 26
27#define C0_ERRCTL $26 /* CP0: Error info */ 27#define C0_ERRCTL $26 /* CP0: Error info */
28#define C0_CERR_I $27 /* CP0: Icache error */ 28#define C0_CERR_I $27 /* CP0: Icache error */
29#define C0_CERR_D $27,1 /* CP0: Dcache error */ 29#define C0_CERR_D $27,1 /* CP0: Dcache error */
30 30
31 /* 31 /*
32 * Based on SiByte sample software cache-err/cerr.S 32 * Based on SiByte sample software cache-err/cerr.S
@@ -88,7 +88,7 @@ attempt_recovery:
88 /* 88 /*
89 * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any 89 * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
90 * Dcache errors we can recover from will take more extensive 90 * Dcache errors we can recover from will take more extensive
91 * processing. For now, they are considered "unrecoverable". 91 * processing. For now, they are considered "unrecoverable".
92 * Note that 'DC' becoming set (outside of ERL mode) will 92 * Note that 'DC' becoming set (outside of ERL mode) will
93 * cause 'IC' to clear; so if there's an Icache error, we'll 93 * cause 'IC' to clear; so if there's an Icache error, we'll
94 * only find out about it if we recover from this error and 94 * only find out about it if we recover from this error and
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 3fab2046c8a4..f9ef83829a52 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -4,7 +4,7 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> 6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. 8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */ 9 */
10 10
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index ddcec1e1a0cd..0fead53d1c26 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -52,7 +52,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
52 52
53#ifdef CONFIG_KPROBES 53#ifdef CONFIG_KPROBES
54 /* 54 /*
55 * This is to notify the fault handler of the kprobes. The 55 * This is to notify the fault handler of the kprobes. The
56 * exception code is redundant as it is also carried in REGS, 56 * exception code is redundant as it is also carried in REGS,
57 * but we pass it anyhow. 57 * but we pass it anyhow.
58 */ 58 */
@@ -216,7 +216,7 @@ bad_area_nosemaphore:
216 } 216 }
217 217
218no_context: 218no_context:
219 /* Are we prepared to handle this kernel fault? */ 219 /* Are we prepared to handle this kernel fault? */
220 if (fixup_exception(regs)) { 220 if (fixup_exception(regs)) {
221 current->thread.cp0_baduaddr = address; 221 current->thread.cp0_baduaddr = address;
222 return; 222 return;
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index dcfd573871c1..d4ea5c9c4a93 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -249,7 +249,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
249 * @nr_pages: number of pages from start to pin 249 * @nr_pages: number of pages from start to pin
250 * @write: whether pages will be written to 250 * @write: whether pages will be written to
251 * @pages: array that receives pointers to the pages pinned. 251 * @pages: array that receives pointers to the pages pinned.
252 * Should be at least nr_pages long. 252 * Should be at least nr_pages long.
253 * 253 *
254 * Attempt to pin user pages in memory without taking mm->mmap_sem. 254 * Attempt to pin user pages in memory without taking mm->mmap_sem.
255 * If not successful, it will fall back to taking the lock and 255 * If not successful, it will fall back to taking the lock and
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index be9acb2b959d..67929251286c 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -66,7 +66,7 @@
66 66
67/* 67/*
68 * We have up to 8 empty zeroed pages so we can map one of the right colour 68 * We have up to 8 empty zeroed pages so we can map one of the right colour
69 * when needed. This is necessary only on R4000 / R4400 SC and MC versions 69 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
70 * where we have to avoid VCED / VECI exceptions for good performance at 70 * where we have to avoid VCED / VECI exceptions for good performance at
71 * any price. Since page is never written to after the initialization we 71 * any price. Since page is never written to after the initialization we
72 * don't have to care about aliases on other CPUs. 72 * don't have to care about aliases on other CPUs.
@@ -380,7 +380,7 @@ void __init mem_init(void)
380 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 380 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
381 381
382 totalram_pages += free_all_bootmem(); 382 totalram_pages += free_all_bootmem();
383 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 383 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
384 384
385 reservedpages = ram = 0; 385 reservedpages = ram = 0;
386 for (tmp = 0; tmp < max_low_pfn; tmp++) 386 for (tmp = 0; tmp < max_low_pfn; tmp++)
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index cacfd31e8ec9..7f840bc08abf 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -22,7 +22,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
22 phys_t end; 22 phys_t end;
23 unsigned long pfn; 23 unsigned long pfn;
24 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE 24 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
25 | __WRITEABLE | flags); 25 | __WRITEABLE | flags);
26 26
27 address &= ~PMD_MASK; 27 address &= ~PMD_MASK;
28 end = address + size; 28 end = address + size;
@@ -185,7 +185,7 @@ void __iounmap(const volatile void __iomem *addr)
185 if (!p) 185 if (!p)
186 printk(KERN_ERR "iounmap: bad address %p\n", addr); 186 printk(KERN_ERR "iounmap: bad address %p\n", addr);
187 187
188 kfree(p); 188 kfree(p);
189} 189}
190 190
191EXPORT_SYMBOL(__ioremap); 191EXPORT_SYMBOL(__ioremap);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 8e666c55f4d4..a29fba55b53e 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -271,7 +271,7 @@ void __cpuinit build_clear_page(void)
271 uasm_i_lui(&buf, AT, 0xa000); 271 uasm_i_lui(&buf, AT, 0xa000);
272 272
273 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) 273 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
274 * cache_line_size : 0; 274 * cache_line_size : 0;
275 while (off) { 275 while (off) {
276 build_clear_pref(&buf, -off); 276 build_clear_pref(&buf, -off);
277 off -= cache_line_size; 277 off -= cache_line_size;
@@ -417,13 +417,13 @@ void __cpuinit build_copy_page(void)
417 uasm_i_lui(&buf, AT, 0xa000); 417 uasm_i_lui(&buf, AT, 0xa000);
418 418
419 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * 419 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
420 cache_line_size : 0; 420 cache_line_size : 0;
421 while (off) { 421 while (off) {
422 build_copy_load_pref(&buf, -off); 422 build_copy_load_pref(&buf, -off);
423 off -= cache_line_size; 423 off -= cache_line_size;
424 } 424 }
425 off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) * 425 off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
426 cache_line_size : 0; 426 cache_line_size : 0;
427 while (off) { 427 while (off) {
428 build_copy_store_pref(&buf, -off); 428 build_copy_store_pref(&buf, -off);
429 off -= cache_line_size; 429 off -= cache_line_size;
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index ee331bbd8f8a..e8adc0069d66 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -24,7 +24,7 @@ void pgd_init(unsigned long page)
24 entry = (unsigned long)invalid_pmd_table; 24 entry = (unsigned long)invalid_pmd_table;
25#endif 25#endif
26 26
27 p = (unsigned long *) page; 27 p = (unsigned long *) page;
28 end = p + PTRS_PER_PGD; 28 end = p + PTRS_PER_PGD;
29 29
30 do { 30 do {
@@ -45,7 +45,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
45{ 45{
46 unsigned long *p, *end; 46 unsigned long *p, *end;
47 47
48 p = (unsigned long *) addr; 48 p = (unsigned long *) addr;
49 end = p + PTRS_PER_PMD; 49 end = p + PTRS_PER_PMD;
50 50
51 do { 51 do {
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index 1eb708ef75ff..c6aaed934d53 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -159,7 +159,7 @@ static inline int __init indy_sc_probe(void)
159} 159}
160 160
161/* XXX Check with wje if the Indy caches can differenciate between 161/* XXX Check with wje if the Indy caches can differenciate between
162 writeback + invalidate and just invalidate. */ 162 writeback + invalidate and just invalidate. */
163static struct bcache_ops indy_sc_ops = { 163static struct bcache_ops indy_sc_ops = {
164 .bc_enable = indy_sc_enable, 164 .bc_enable = indy_sc_enable,
165 .bc_disable = indy_sc_disable, 165 .bc_disable = indy_sc_disable,
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8d90ff25b123..8bc67720e145 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -58,7 +58,7 @@ static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
58 58
59static void r5k_sc_enable(void) 59static void r5k_sc_enable(void)
60{ 60{
61 unsigned long flags; 61 unsigned long flags;
62 62
63 local_irq_save(flags); 63 local_irq_save(flags);
64 set_c0_config(R5K_CONF_SE); 64 set_c0_config(R5K_CONF_SE);
@@ -68,7 +68,7 @@ static void r5k_sc_enable(void)
68 68
69static void r5k_sc_disable(void) 69static void r5k_sc_disable(void)
70{ 70{
71 unsigned long flags; 71 unsigned long flags;
72 72
73 local_irq_save(flags); 73 local_irq_save(flags);
74 blast_r5000_scache(); 74 blast_r5000_scache();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 2a7c9725b2a3..493131c81a29 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -424,7 +424,7 @@ void __cpuinit tlb_init(void)
424 write_c0_pagegrain(pg); 424 write_c0_pagegrain(pg);
425 } 425 }
426 426
427 /* From this point on the ARC firmware is dead. */ 427 /* From this point on the ARC firmware is dead. */
428 local_flush_tlb_all(); 428 local_flush_tlb_all();
429 429
430 /* Did I tell you that ARC SUCKS? */ 430 /* Did I tell you that ARC SUCKS? */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 1c8ac49ec72c..36b9bd89c799 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * Synthesize TLB refill handlers at runtime. 6 * Synthesize TLB refill handlers at runtime.
7 * 7 *
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc. 11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
12 * Copyright (C) 2011 MIPS Technologies, Inc. 12 * Copyright (C) 2011 MIPS Technologies, Inc.
@@ -212,7 +212,7 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
212/* 212/*
213 * pgtable bits are assigned dynamically depending on processor feature 213 * pgtable bits are assigned dynamically depending on processor feature
214 * and statically based on kernel configuration. This spits out the actual 214 * and statically based on kernel configuration. This spits out the actual
215 * values the kernel is using. Required to make sense from disassembled 215 * values the kernel is using. Required to make sense from disassembled
216 * TLB exception handlers. 216 * TLB exception handlers.
217 */ 217 */
218static void output_pgtable_bits_defines(void) 218static void output_pgtable_bits_defines(void)
@@ -464,8 +464,8 @@ static u32 final_handler[64] __cpuinitdata;
464 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: 464 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
465 * 2. A timing hazard exists for the TLBP instruction. 465 * 2. A timing hazard exists for the TLBP instruction.
466 * 466 *
467 * stalling_instruction 467 * stalling_instruction
468 * TLBP 468 * TLBP
469 * 469 *
470 * The JTLB is being read for the TLBP throughout the stall generated by the 470 * The JTLB is being read for the TLBP throughout the stall generated by the
471 * previous instruction. This is not really correct as the stalling instruction 471 * previous instruction. This is not really correct as the stalling instruction
@@ -476,7 +476,7 @@ static u32 final_handler[64] __cpuinitdata;
476 * The software work-around is to not allow the instruction preceding the TLBP 476 * The software work-around is to not allow the instruction preceding the TLBP
477 * to stall - make it an NOP or some other instruction guaranteed not to stall. 477 * to stall - make it an NOP or some other instruction guaranteed not to stall.
478 * 478 *
479 * Errata 2 will not be fixed. This errata is also on the R5000. 479 * Errata 2 will not be fixed. This errata is also on the R5000.
480 * 480 *
481 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 481 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
482 */ 482 */
@@ -748,7 +748,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
748 */ 748 */
749 small_sequence = (HPAGE_SIZE >> 7) < 0x10000; 749 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
750 750
751 /* We can clobber tmp. It isn't used after this.*/ 751 /* We can clobber tmp. It isn't used after this.*/
752 if (!small_sequence) 752 if (!small_sequence)
753 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); 753 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
754 754
@@ -830,12 +830,12 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
830 /* Clear lower 23 bits of context. */ 830 /* Clear lower 23 bits of context. */
831 uasm_i_dins(p, ptr, 0, 0, 23); 831 uasm_i_dins(p, ptr, 0, 0, 23);
832 832
833 /* 1 0 1 0 1 << 6 xkphys cached */ 833 /* 1 0 1 0 1 << 6 xkphys cached */
834 uasm_i_ori(p, ptr, ptr, 0x540); 834 uasm_i_ori(p, ptr, ptr, 0x540);
835 uasm_i_drotr(p, ptr, ptr, 11); 835 uasm_i_drotr(p, ptr, ptr, 11);
836 } 836 }
837#elif defined(CONFIG_SMP) 837#elif defined(CONFIG_SMP)
838# ifdef CONFIG_MIPS_MT_SMTC 838# ifdef CONFIG_MIPS_MT_SMTC
839 /* 839 /*
840 * SMTC uses TCBind value as "CPU" index 840 * SMTC uses TCBind value as "CPU" index
841 */ 841 */
@@ -955,7 +955,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
955 955
956 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 956 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
957#ifdef CONFIG_SMP 957#ifdef CONFIG_SMP
958#ifdef CONFIG_MIPS_MT_SMTC 958#ifdef CONFIG_MIPS_MT_SMTC
959 /* 959 /*
960 * SMTC uses TCBind value as "CPU" index 960 * SMTC uses TCBind value as "CPU" index
961 */ 961 */
@@ -965,7 +965,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
965#else 965#else
966 /* 966 /*
967 * smp_processor_id() << 3 is stored in CONTEXT. 967 * smp_processor_id() << 3 is stored in CONTEXT.
968 */ 968 */
969 uasm_i_mfc0(p, ptr, C0_CONTEXT); 969 uasm_i_mfc0(p, ptr, C0_CONTEXT);
970 UASM_i_LA_mostly(p, tmp, pgdc); 970 UASM_i_LA_mostly(p, tmp, pgdc);
971 uasm_i_srl(p, ptr, ptr, 23); 971 uasm_i_srl(p, ptr, ptr, 23);
@@ -1153,7 +1153,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1153 1153
1154 if (pgd_reg == -1) { 1154 if (pgd_reg == -1) {
1155 vmalloc_branch_delay_filled = 1; 1155 vmalloc_branch_delay_filled = 1;
1156 /* 1 0 1 0 1 << 6 xkphys cached */ 1156 /* 1 0 1 0 1 << 6 xkphys cached */
1157 uasm_i_ori(p, ptr, ptr, 0x540); 1157 uasm_i_ori(p, ptr, ptr, 0x540);
1158 uasm_i_drotr(p, ptr, ptr, 11); 1158 uasm_i_drotr(p, ptr, ptr, 11);
1159 } 1159 }
@@ -1171,9 +1171,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1171 uasm_l_vmalloc_done(l, *p); 1171 uasm_l_vmalloc_done(l, *p);
1172 1172
1173 /* 1173 /*
1174 * tmp ptr 1174 * tmp ptr
1175 * fall-through case = badvaddr *pgd_current 1175 * fall-through case = badvaddr *pgd_current
1176 * vmalloc case = badvaddr swapper_pg_dir 1176 * vmalloc case = badvaddr swapper_pg_dir
1177 */ 1177 */
1178 1178
1179 if (vmalloc_branch_delay_filled) 1179 if (vmalloc_branch_delay_filled)
@@ -1212,7 +1212,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1212 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); 1212 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1213 /* 1213 /*
1214 * The in the LWX case we don't want to do the load in the 1214 * The in the LWX case we don't want to do the load in the
1215 * delay slot. It cannot issue in the same cycle and may be 1215 * delay slot. It cannot issue in the same cycle and may be
1216 * speculative and unneeded. 1216 * speculative and unneeded.
1217 */ 1217 */
1218 if (use_lwx_insns()) 1218 if (use_lwx_insns())
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 39b891056227..942ff6c2eba2 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -7,7 +7,7 @@
7 * support a subset of instructions, and does not try to hide pipeline 7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots. 8 * effects like branch delay slots.
9 * 9 *
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 */ 13 */
@@ -119,30 +119,30 @@ static struct insn insn_table[] __uasminitdata = {
119 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, 119 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
120 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, 120 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
121 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 121 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
122 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 122 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
123 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 123 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
124 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 124 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
125 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 125 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
126 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 126 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
127 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 127 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
128 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 128 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
129 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 129 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
130 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 130 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
131 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, 131 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
132 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 132 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
133 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 133 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
134 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 134 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
135 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 135 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
136 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 136 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
137 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 137 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
138 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, 138 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
139 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 139 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
140 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 140 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
141 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 141 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
142 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 142 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
143 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 143 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
144 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 144 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
145 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 145 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
146 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 146 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
147 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, 147 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
148 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 148 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
@@ -345,7 +345,7 @@ Ip_u2u1msbu3(op) \
345} \ 345} \
346UASM_EXPORT_SYMBOL(uasm_i##op); 346UASM_EXPORT_SYMBOL(uasm_i##op);
347 347
348#define I_u2u1msbdu3(op) \ 348#define I_u2u1msbdu3(op) \
349Ip_u2u1msbu3(op) \ 349Ip_u2u1msbu3(op) \
350{ \ 350{ \
351 build_insn(buf, insn##op, b, a, d-1, c); \ 351 build_insn(buf, insn##op, b, a, d-1, c); \