aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 13:06:44 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 13:06:44 -0500
commit0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch)
treedcced72d230d69fd0c5816ac6dd03ab84799a93e /arch/mips/mm
parente138a5d2356729b8752e88520cc1525fae9794ac (diff)
parentf26b90440cd74c78fe10c9bd5160809704a9627c (diff)
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-sb1.c20
-rw-r--r--arch/mips/mm/init.c167
-rw-r--r--arch/mips/mm/ioremap.c2
-rw-r--r--arch/mips/mm/pg-r4k.c30
-rw-r--r--arch/mips/mm/pgtable-32.c7
-rw-r--r--arch/mips/mm/pgtable-64.c11
-rw-r--r--arch/mips/mm/tlbex.c13
7 files changed, 228 insertions, 22 deletions
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c
index 5537558f19f7..d0ddb4a768a5 100644
--- a/arch/mips/mm/c-sb1.c
+++ b/arch/mips/mm/c-sb1.c
@@ -49,6 +49,15 @@ static unsigned short dcache_sets;
49static unsigned int icache_range_cutoff; 49static unsigned int icache_range_cutoff;
50static unsigned int dcache_range_cutoff; 50static unsigned int dcache_range_cutoff;
51 51
52static inline void sb1_on_each_cpu(void (*func) (void *info), void *info,
53 int retry, int wait)
54{
55 preempt_disable();
56 smp_call_function(func, info, retry, wait);
57 func(info);
58 preempt_enable();
59}
60
52/* 61/*
53 * The dcache is fully coherent to the system, with one 62 * The dcache is fully coherent to the system, with one
54 * big caveat: the instruction stream. In other words, 63 * big caveat: the instruction stream. In other words,
@@ -226,7 +235,7 @@ static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
226 args.vma = vma; 235 args.vma = vma;
227 args.addr = addr; 236 args.addr = addr;
228 args.pfn = pfn; 237 args.pfn = pfn;
229 on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); 238 sb1_on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
230} 239}
231#else 240#else
232void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 241void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
@@ -249,7 +258,7 @@ void sb1___flush_cache_all_ipi(void *ignored)
249 258
250static void sb1___flush_cache_all(void) 259static void sb1___flush_cache_all(void)
251{ 260{
252 on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); 261 sb1_on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1);
253} 262}
254#else 263#else
255void sb1___flush_cache_all(void) 264void sb1___flush_cache_all(void)
@@ -299,7 +308,7 @@ void sb1_flush_icache_range(unsigned long start, unsigned long end)
299 308
300 args.start = start; 309 args.start = start;
301 args.end = end; 310 args.end = end;
302 on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); 311 sb1_on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1);
303} 312}
304#else 313#else
305void sb1_flush_icache_range(unsigned long start, unsigned long end) 314void sb1_flush_icache_range(unsigned long start, unsigned long end)
@@ -326,7 +335,7 @@ static void sb1_flush_cache_sigtramp_ipi(void *info)
326 335
327static void sb1_flush_cache_sigtramp(unsigned long addr) 336static void sb1_flush_cache_sigtramp(unsigned long addr)
328{ 337{
329 on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); 338 sb1_on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1);
330} 339}
331#else 340#else
332void sb1_flush_cache_sigtramp(unsigned long addr) 341void sb1_flush_cache_sigtramp(unsigned long addr)
@@ -444,7 +453,6 @@ static __init void probe_cache_sizes(void)
444void sb1_cache_init(void) 453void sb1_cache_init(void)
445{ 454{
446 extern char except_vec2_sb1; 455 extern char except_vec2_sb1;
447 extern char handle_vec2_sb1;
448 456
449 /* Special cache error handler for SB1 */ 457 /* Special cache error handler for SB1 */
450 set_uncached_handler (0x100, &except_vec2_sb1, 0x80); 458 set_uncached_handler (0x100, &except_vec2_sb1, 0x80);
@@ -497,5 +505,5 @@ void sb1_cache_init(void)
497 : 505 :
498 : "memory"); 506 : "memory");
499 507
500 flush_cache_all(); 508 local_sb1___flush_cache_all();
501} 509}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 88b72c9a8495..2de4d3c367a2 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -30,11 +30,34 @@
30#include <asm/cachectl.h> 30#include <asm/cachectl.h>
31#include <asm/cpu.h> 31#include <asm/cpu.h>
32#include <asm/dma.h> 32#include <asm/dma.h>
33#include <asm/kmap_types.h>
33#include <asm/mmu_context.h> 34#include <asm/mmu_context.h>
34#include <asm/sections.h> 35#include <asm/sections.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
37#include <asm/tlb.h> 38#include <asm/tlb.h>
39#include <asm/fixmap.h>
40
41/* Atomicity and interruptability */
42#ifdef CONFIG_MIPS_MT_SMTC
43
44#include <asm/mipsmtregs.h>
45
46#define ENTER_CRITICAL(flags) \
47 { \
48 unsigned int mvpflags; \
49 local_irq_save(flags);\
50 mvpflags = dvpe()
51#define EXIT_CRITICAL(flags) \
52 evpe(mvpflags); \
53 local_irq_restore(flags); \
54 }
55#else
56
57#define ENTER_CRITICAL(flags) local_irq_save(flags)
58#define EXIT_CRITICAL(flags) local_irq_restore(flags)
59
60#endif /* CONFIG_MIPS_MT_SMTC */
38 61
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 62DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40 63
@@ -80,13 +103,142 @@ unsigned long setup_zero_pages(void)
80 return 1UL << order; 103 return 1UL << order;
81} 104}
82 105
83#ifdef CONFIG_HIGHMEM 106/*
84pte_t *kmap_pte; 107 * These are almost like kmap_atomic / kunmap_atmic except they take an
85pgprot_t kmap_prot; 108 * additional address argument as the hint.
109 */
86 110
87#define kmap_get_fixmap_pte(vaddr) \ 111#define kmap_get_fixmap_pte(vaddr) \
88 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) 112 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
89 113
114#ifdef CONFIG_MIPS_MT_SMTC
115static pte_t *kmap_coherent_pte;
116static void __init kmap_coherent_init(void)
117{
118 unsigned long vaddr;
119
120 /* cache the first coherent kmap pte */
121 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
122 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
123}
124#else
125static inline void kmap_coherent_init(void) {}
126#endif
127
128static inline void *kmap_coherent(struct page *page, unsigned long addr)
129{
130 enum fixed_addresses idx;
131 unsigned long vaddr, flags, entrylo;
132 unsigned long old_ctx;
133 pte_t pte;
134 int tlbidx;
135
136 inc_preempt_count();
137 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
138#ifdef CONFIG_MIPS_MT_SMTC
139 idx += FIX_N_COLOURS * smp_processor_id();
140#endif
141 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
142 pte = mk_pte(page, PAGE_KERNEL);
143#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
144 entrylo = pte.pte_high;
145#else
146 entrylo = pte_val(pte) >> 6;
147#endif
148
149 ENTER_CRITICAL(flags);
150 old_ctx = read_c0_entryhi();
151 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
152 write_c0_entrylo0(entrylo);
153 write_c0_entrylo1(entrylo);
154#ifdef CONFIG_MIPS_MT_SMTC
155 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
156 /* preload TLB instead of local_flush_tlb_one() */
157 mtc0_tlbw_hazard();
158 tlb_probe();
159 tlb_probe_hazard();
160 tlbidx = read_c0_index();
161 mtc0_tlbw_hazard();
162 if (tlbidx < 0)
163 tlb_write_random();
164 else
165 tlb_write_indexed();
166#else
167 tlbidx = read_c0_wired();
168 write_c0_wired(tlbidx + 1);
169 write_c0_index(tlbidx);
170 mtc0_tlbw_hazard();
171 tlb_write_indexed();
172#endif
173 tlbw_use_hazard();
174 write_c0_entryhi(old_ctx);
175 EXIT_CRITICAL(flags);
176
177 return (void*) vaddr;
178}
179
180#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
181
182static inline void kunmap_coherent(struct page *page)
183{
184#ifndef CONFIG_MIPS_MT_SMTC
185 unsigned int wired;
186 unsigned long flags, old_ctx;
187
188 ENTER_CRITICAL(flags);
189 old_ctx = read_c0_entryhi();
190 wired = read_c0_wired() - 1;
191 write_c0_wired(wired);
192 write_c0_index(wired);
193 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
194 write_c0_entrylo0(0);
195 write_c0_entrylo1(0);
196 mtc0_tlbw_hazard();
197 tlb_write_indexed();
198 tlbw_use_hazard();
199 write_c0_entryhi(old_ctx);
200 EXIT_CRITICAL(flags);
201#endif
202 dec_preempt_count();
203 preempt_check_resched();
204}
205
206void copy_to_user_page(struct vm_area_struct *vma,
207 struct page *page, unsigned long vaddr, void *dst, const void *src,
208 unsigned long len)
209{
210 if (cpu_has_dc_aliases) {
211 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
212 memcpy(vto, src, len);
213 kunmap_coherent(page);
214 } else
215 memcpy(dst, src, len);
216 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
217 flush_cache_page(vma, vaddr, page_to_pfn(page));
218}
219
220EXPORT_SYMBOL(copy_to_user_page);
221
222void copy_from_user_page(struct vm_area_struct *vma,
223 struct page *page, unsigned long vaddr, void *dst, const void *src,
224 unsigned long len)
225{
226 if (cpu_has_dc_aliases) {
227 void *vfrom =
228 kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
229 memcpy(dst, vfrom, len);
230 kunmap_coherent(page);
231 } else
232 memcpy(dst, src, len);
233}
234
235EXPORT_SYMBOL(copy_from_user_page);
236
237
238#ifdef CONFIG_HIGHMEM
239pte_t *kmap_pte;
240pgprot_t kmap_prot;
241
90static void __init kmap_init(void) 242static void __init kmap_init(void)
91{ 243{
92 unsigned long kmap_vstart; 244 unsigned long kmap_vstart;
@@ -97,11 +249,12 @@ static void __init kmap_init(void)
97 249
98 kmap_prot = PAGE_KERNEL; 250 kmap_prot = PAGE_KERNEL;
99} 251}
252#endif /* CONFIG_HIGHMEM */
100 253
101#ifdef CONFIG_32BIT
102void __init fixrange_init(unsigned long start, unsigned long end, 254void __init fixrange_init(unsigned long start, unsigned long end,
103 pgd_t *pgd_base) 255 pgd_t *pgd_base)
104{ 256{
257#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
105 pgd_t *pgd; 258 pgd_t *pgd;
106 pud_t *pud; 259 pud_t *pud;
107 pmd_t *pmd; 260 pmd_t *pmd;
@@ -122,7 +275,7 @@ void __init fixrange_init(unsigned long start, unsigned long end,
122 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 275 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
123 if (pmd_none(*pmd)) { 276 if (pmd_none(*pmd)) {
124 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 277 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
125 set_pmd(pmd, __pmd(pte)); 278 set_pmd(pmd, __pmd((unsigned long)pte));
126 if (pte != pte_offset_kernel(pmd, 0)) 279 if (pte != pte_offset_kernel(pmd, 0))
127 BUG(); 280 BUG();
128 } 281 }
@@ -132,9 +285,8 @@ void __init fixrange_init(unsigned long start, unsigned long end,
132 } 285 }
133 j = 0; 286 j = 0;
134 } 287 }
288#endif
135} 289}
136#endif /* CONFIG_32BIT */
137#endif /* CONFIG_HIGHMEM */
138 290
139#ifndef CONFIG_NEED_MULTIPLE_NODES 291#ifndef CONFIG_NEED_MULTIPLE_NODES
140extern void pagetable_init(void); 292extern void pagetable_init(void);
@@ -175,6 +327,7 @@ void __init paging_init(void)
175#ifdef CONFIG_HIGHMEM 327#ifdef CONFIG_HIGHMEM
176 kmap_init(); 328 kmap_init();
177#endif 329#endif
330 kmap_coherent_init();
178 331
179 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 332 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
180 low = max_low_pfn; 333 low = max_low_pfn;
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 3101d1db5592..cea7d0ea36e4 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -176,7 +176,7 @@ void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
176 176
177#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) 177#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
178 178
179void __iounmap(volatile void __iomem *addr) 179void __iounmap(const volatile void __iomem *addr)
180{ 180{
181 struct vm_struct *p; 181 struct vm_struct *p;
182 182
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c
index b7c749232ffe..d41fc5885e87 100644
--- a/arch/mips/mm/pg-r4k.c
+++ b/arch/mips/mm/pg-r4k.c
@@ -270,6 +270,20 @@ static inline void build_addiu_a2_a0(unsigned long offset)
270 emit_instruction(mi); 270 emit_instruction(mi);
271} 271}
272 272
273static inline void build_addiu_a2(unsigned long offset)
274{
275 union mips_instruction mi;
276
277 BUG_ON(offset > 0x7fff);
278
279 mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op;
280 mi.i_format.rs = 6; /* $a2 */
281 mi.i_format.rt = 6; /* $a2 */
282 mi.i_format.simmediate = offset;
283
284 emit_instruction(mi);
285}
286
273static inline void build_addiu_a1(unsigned long offset) 287static inline void build_addiu_a1(unsigned long offset)
274{ 288{
275 union mips_instruction mi; 289 union mips_instruction mi;
@@ -333,6 +347,7 @@ static inline void build_jr_ra(void)
333void __init build_clear_page(void) 347void __init build_clear_page(void)
334{ 348{
335 unsigned int loop_start; 349 unsigned int loop_start;
350 unsigned long off;
336 351
337 epc = (unsigned int *) &clear_page_array; 352 epc = (unsigned int *) &clear_page_array;
338 instruction_pending = 0; 353 instruction_pending = 0;
@@ -369,7 +384,12 @@ void __init build_clear_page(void)
369 } 384 }
370 } 385 }
371 386
372 build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_clear : 0)); 387 off = PAGE_SIZE - (cpu_has_prefetch ? pref_offset_clear : 0);
388 if (off > 0x7fff) {
389 build_addiu_a2_a0(off >> 1);
390 build_addiu_a2(off >> 1);
391 } else
392 build_addiu_a2_a0(off);
373 393
374 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 394 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
375 build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ 395 build_insn_word(0x3c01a000); /* lui $at, 0xa000 */
@@ -420,12 +440,18 @@ dest = label();
420void __init build_copy_page(void) 440void __init build_copy_page(void)
421{ 441{
422 unsigned int loop_start; 442 unsigned int loop_start;
443 unsigned long off;
423 444
424 epc = (unsigned int *) &copy_page_array; 445 epc = (unsigned int *) &copy_page_array;
425 store_offset = load_offset = 0; 446 store_offset = load_offset = 0;
426 instruction_pending = 0; 447 instruction_pending = 0;
427 448
428 build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_copy : 0)); 449 off = PAGE_SIZE - (cpu_has_prefetch ? pref_offset_copy : 0);
450 if (off > 0x7fff) {
451 build_addiu_a2_a0(off >> 1);
452 build_addiu_a2(off >> 1);
453 } else
454 build_addiu_a2_a0(off);
429 455
430 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 456 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
431 build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ 457 build_insn_word(0x3c01a000); /* lui $at, 0xa000 */
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 4bdaa05f485b..4a61e624b0ec 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -31,9 +31,10 @@ void pgd_init(unsigned long page)
31 31
32void __init pagetable_init(void) 32void __init pagetable_init(void)
33{ 33{
34#ifdef CONFIG_HIGHMEM
35 unsigned long vaddr; 34 unsigned long vaddr;
36 pgd_t *pgd, *pgd_base; 35 pgd_t *pgd_base;
36#ifdef CONFIG_HIGHMEM
37 pgd_t *pgd;
37 pud_t *pud; 38 pud_t *pud;
38 pmd_t *pmd; 39 pmd_t *pmd;
39 pte_t *pte; 40 pte_t *pte;
@@ -44,7 +45,6 @@ void __init pagetable_init(void)
44 pgd_init((unsigned long)swapper_pg_dir 45 pgd_init((unsigned long)swapper_pg_dir
45 + sizeof(pgd_t) * USER_PTRS_PER_PGD); 46 + sizeof(pgd_t) * USER_PTRS_PER_PGD);
46 47
47#ifdef CONFIG_HIGHMEM
48 pgd_base = swapper_pg_dir; 48 pgd_base = swapper_pg_dir;
49 49
50 /* 50 /*
@@ -53,6 +53,7 @@ void __init pagetable_init(void)
53 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 53 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
54 fixrange_init(vaddr, 0, pgd_base); 54 fixrange_init(vaddr, 0, pgd_base);
55 55
56#ifdef CONFIG_HIGHMEM
56 /* 57 /*
57 * Permanent kmaps: 58 * Permanent kmaps:
58 */ 59 */
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 44b5e97fff65..8d600d307d5d 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <asm/fixmap.h>
11#include <asm/pgtable.h> 12#include <asm/pgtable.h>
12 13
13void pgd_init(unsigned long page) 14void pgd_init(unsigned long page)
@@ -52,7 +53,17 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
52 53
53void __init pagetable_init(void) 54void __init pagetable_init(void)
54{ 55{
56 unsigned long vaddr;
57 pgd_t *pgd_base;
58
55 /* Initialize the entire pgd. */ 59 /* Initialize the entire pgd. */
56 pgd_init((unsigned long)swapper_pg_dir); 60 pgd_init((unsigned long)swapper_pg_dir);
57 pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); 61 pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
62
63 pgd_base = swapper_pg_dir;
64 /*
65 * Fixed mappings:
66 */
67 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
68 fixrange_init(vaddr, 0, pgd_base);
58} 69}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 6f8b25cfa6f0..fec318a1c8c5 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -102,7 +102,7 @@ enum opcode {
102 insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, 102 insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
103 insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 103 insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
104 insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, 104 insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
105 insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, 105 insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
106 insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, 106 insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
107 insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, 107 insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0,
108 insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, 108 insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
@@ -145,6 +145,7 @@ static __initdata struct insn insn_table[] = {
145 { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, 145 { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
146 { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, 146 { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
147 { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE }, 147 { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE },
148 { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE },
148 { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD }, 149 { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD },
149 { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 }, 150 { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 },
150 { insn_j, M(j_op,0,0,0,0,0), JIMM }, 151 { insn_j, M(j_op,0,0,0,0,0), JIMM },
@@ -385,6 +386,7 @@ I_u2u1u3(_dsll);
385I_u2u1u3(_dsll32); 386I_u2u1u3(_dsll32);
386I_u2u1u3(_dsra); 387I_u2u1u3(_dsra);
387I_u2u1u3(_dsrl); 388I_u2u1u3(_dsrl);
389I_u2u1u3(_dsrl32);
388I_u3u1u2(_dsubu); 390I_u3u1u2(_dsubu);
389I_0(_eret); 391I_0(_eret);
390I_u1(_j); 392I_u1(_j);
@@ -996,7 +998,12 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
996#endif 998#endif
997 999
998 l_vmalloc_done(l, *p); 1000 l_vmalloc_done(l, *p);
999 i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); /* get pgd offset in bytes */ 1001
1002 if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
1003 i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
1004 else
1005 i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
1006
1000 i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 1007 i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
1001 i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 1008 i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
1002 i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 1009 i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
@@ -1073,7 +1080,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
1073 1080
1074static __init void build_adjust_context(u32 **p, unsigned int ctx) 1081static __init void build_adjust_context(u32 **p, unsigned int ctx)
1075{ 1082{
1076 unsigned int shift = 4 - (PTE_T_LOG2 + 1); 1083 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
1077 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 1084 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
1078 1085
1079 switch (current_cpu_data.cputype) { 1086 switch (current_cpu_data.cputype) {