diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 22 | ||||
-rw-r--r-- | arch/mips/mm/c-sb1.c | 22 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 42 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 55 |
6 files changed, 113 insertions, 35 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index cc895dad71d2..df04a315d830 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -323,7 +323,6 @@ static void __init r4k_blast_scache_setup(void) | |||
323 | static inline void local_r4k_flush_cache_all(void * args) | 323 | static inline void local_r4k_flush_cache_all(void * args) |
324 | { | 324 | { |
325 | r4k_blast_dcache(); | 325 | r4k_blast_dcache(); |
326 | r4k_blast_icache(); | ||
327 | } | 326 | } |
328 | 327 | ||
329 | static void r4k_flush_cache_all(void) | 328 | static void r4k_flush_cache_all(void) |
@@ -359,21 +358,19 @@ static void r4k___flush_cache_all(void) | |||
359 | static inline void local_r4k_flush_cache_range(void * args) | 358 | static inline void local_r4k_flush_cache_range(void * args) |
360 | { | 359 | { |
361 | struct vm_area_struct *vma = args; | 360 | struct vm_area_struct *vma = args; |
362 | int exec; | ||
363 | 361 | ||
364 | if (!(cpu_context(smp_processor_id(), vma->vm_mm))) | 362 | if (!(cpu_context(smp_processor_id(), vma->vm_mm))) |
365 | return; | 363 | return; |
366 | 364 | ||
367 | exec = vma->vm_flags & VM_EXEC; | 365 | r4k_blast_dcache(); |
368 | if (cpu_has_dc_aliases || exec) | ||
369 | r4k_blast_dcache(); | ||
370 | if (exec) | ||
371 | r4k_blast_icache(); | ||
372 | } | 366 | } |
373 | 367 | ||
374 | static void r4k_flush_cache_range(struct vm_area_struct *vma, | 368 | static void r4k_flush_cache_range(struct vm_area_struct *vma, |
375 | unsigned long start, unsigned long end) | 369 | unsigned long start, unsigned long end) |
376 | { | 370 | { |
371 | if (!cpu_has_dc_aliases) | ||
372 | return; | ||
373 | |||
377 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); | 374 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); |
378 | } | 375 | } |
379 | 376 | ||
@@ -384,18 +381,21 @@ static inline void local_r4k_flush_cache_mm(void * args) | |||
384 | if (!cpu_context(smp_processor_id(), mm)) | 381 | if (!cpu_context(smp_processor_id(), mm)) |
385 | return; | 382 | return; |
386 | 383 | ||
387 | r4k_blast_dcache(); | ||
388 | r4k_blast_icache(); | ||
389 | |||
390 | /* | 384 | /* |
391 | * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we | 385 | * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we |
392 | * only flush the primary caches but R10000 and R12000 behave sane ... | 386 | * only flush the primary caches but R10000 and R12000 behave sane ... |
387 | * R4000SC and R4400SC indexed S-cache ops also invalidate primary | ||
388 | * caches, so we can bail out early. | ||
393 | */ | 389 | */ |
394 | if (current_cpu_data.cputype == CPU_R4000SC || | 390 | if (current_cpu_data.cputype == CPU_R4000SC || |
395 | current_cpu_data.cputype == CPU_R4000MC || | 391 | current_cpu_data.cputype == CPU_R4000MC || |
396 | current_cpu_data.cputype == CPU_R4400SC || | 392 | current_cpu_data.cputype == CPU_R4400SC || |
397 | current_cpu_data.cputype == CPU_R4400MC) | 393 | current_cpu_data.cputype == CPU_R4400MC) { |
398 | r4k_blast_scache(); | 394 | r4k_blast_scache(); |
395 | return; | ||
396 | } | ||
397 | |||
398 | r4k_blast_dcache(); | ||
399 | } | 399 | } |
400 | 400 | ||
401 | static void r4k_flush_cache_mm(struct mm_struct *mm) | 401 | static void r4k_flush_cache_mm(struct mm_struct *mm) |
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index d0ddb4a768a5..3a8afd47feaa 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
20 | */ | 20 | */ |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/hardirq.h> | ||
22 | 23 | ||
23 | #include <asm/asm.h> | 24 | #include <asm/asm.h> |
24 | #include <asm/bootinfo.h> | 25 | #include <asm/bootinfo.h> |
@@ -242,6 +243,25 @@ void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsign | |||
242 | __attribute__((alias("local_sb1_flush_cache_page"))); | 243 | __attribute__((alias("local_sb1_flush_cache_page"))); |
243 | #endif | 244 | #endif |
244 | 245 | ||
246 | #ifdef CONFIG_SMP | ||
247 | static void sb1_flush_cache_data_page_ipi(void *info) | ||
248 | { | ||
249 | unsigned long start = (unsigned long)info; | ||
250 | |||
251 | __sb1_writeback_inv_dcache_range(start, start + PAGE_SIZE); | ||
252 | } | ||
253 | |||
254 | static void sb1_flush_cache_data_page(unsigned long addr) | ||
255 | { | ||
256 | if (in_atomic()) | ||
257 | __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); | ||
258 | else | ||
259 | on_each_cpu(sb1_flush_cache_data_page_ipi, (void *) addr, 1, 1); | ||
260 | } | ||
261 | #else | ||
262 | void sb1_flush_cache_data_page(unsigned long) | ||
263 | __attribute__((alias("local_sb1_flush_cache_data_page"))); | ||
264 | #endif | ||
245 | 265 | ||
246 | /* | 266 | /* |
247 | * Invalidate all caches on this CPU | 267 | * Invalidate all caches on this CPU |
@@ -481,7 +501,7 @@ void sb1_cache_init(void) | |||
481 | 501 | ||
482 | flush_cache_sigtramp = sb1_flush_cache_sigtramp; | 502 | flush_cache_sigtramp = sb1_flush_cache_sigtramp; |
483 | local_flush_data_cache_page = (void *) sb1_nop; | 503 | local_flush_data_cache_page = (void *) sb1_nop; |
484 | flush_data_cache_page = (void *) sb1_nop; | 504 | flush_data_cache_page = sb1_flush_cache_data_page; |
485 | 505 | ||
486 | /* Full flush */ | 506 | /* Full flush */ |
487 | __flush_cache_all = sb1___flush_cache_all; | 507 | __flush_cache_all = sb1___flush_cache_all; |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 8423d8590779..6f90e7ef66ac 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -60,6 +60,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, | |||
60 | */ | 60 | */ |
61 | if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) | 61 | if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) |
62 | goto vmalloc_fault; | 62 | goto vmalloc_fault; |
63 | #ifdef MODULE_START | ||
64 | if (unlikely(address >= MODULE_START && address < MODULE_END)) | ||
65 | goto vmalloc_fault; | ||
66 | #endif | ||
63 | 67 | ||
64 | /* | 68 | /* |
65 | * If we're in an interrupt or have no user | 69 | * If we're in an interrupt or have no user |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2de4d3c367a2..9e29ba9205f0 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -90,9 +90,9 @@ unsigned long setup_zero_pages(void) | |||
90 | if (!empty_zero_page) | 90 | if (!empty_zero_page) |
91 | panic("Oh boy, that early out of memory?"); | 91 | panic("Oh boy, that early out of memory?"); |
92 | 92 | ||
93 | page = virt_to_page(empty_zero_page); | 93 | page = virt_to_page((void *)empty_zero_page); |
94 | split_page(page, order); | 94 | split_page(page, order); |
95 | while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) { | 95 | while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { |
96 | SetPageReserved(page); | 96 | SetPageReserved(page); |
97 | page++; | 97 | page++; |
98 | } | 98 | } |
@@ -443,15 +443,18 @@ void __init mem_init(void) | |||
443 | } | 443 | } |
444 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 444 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
445 | 445 | ||
446 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 446 | static void free_init_pages(char *what, unsigned long begin, unsigned long end) |
447 | { | 447 | { |
448 | unsigned long addr; | 448 | unsigned long pfn; |
449 | 449 | ||
450 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 450 | for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { |
451 | ClearPageReserved(virt_to_page(addr)); | 451 | struct page *page = pfn_to_page(pfn); |
452 | init_page_count(virt_to_page(addr)); | 452 | void *addr = phys_to_virt(PFN_PHYS(pfn)); |
453 | memset((void *)addr, 0xcc, PAGE_SIZE); | 453 | |
454 | free_page(addr); | 454 | ClearPageReserved(page); |
455 | init_page_count(page); | ||
456 | memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
457 | __free_page(page); | ||
455 | totalram_pages++; | 458 | totalram_pages++; |
456 | } | 459 | } |
457 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | 460 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
@@ -460,12 +463,9 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
460 | #ifdef CONFIG_BLK_DEV_INITRD | 463 | #ifdef CONFIG_BLK_DEV_INITRD |
461 | void free_initrd_mem(unsigned long start, unsigned long end) | 464 | void free_initrd_mem(unsigned long start, unsigned long end) |
462 | { | 465 | { |
463 | #ifdef CONFIG_64BIT | 466 | free_init_pages("initrd memory", |
464 | /* Switch from KSEG0 to XKPHYS addresses */ | 467 | virt_to_phys((void *)start), |
465 | start = (unsigned long)phys_to_virt(CPHYSADDR(start)); | 468 | virt_to_phys((void *)end)); |
466 | end = (unsigned long)phys_to_virt(CPHYSADDR(end)); | ||
467 | #endif | ||
468 | free_init_pages("initrd memory", start, end); | ||
469 | } | 469 | } |
470 | #endif | 470 | #endif |
471 | 471 | ||
@@ -473,17 +473,13 @@ extern unsigned long prom_free_prom_memory(void); | |||
473 | 473 | ||
474 | void free_initmem(void) | 474 | void free_initmem(void) |
475 | { | 475 | { |
476 | unsigned long start, end, freed; | 476 | unsigned long freed; |
477 | 477 | ||
478 | freed = prom_free_prom_memory(); | 478 | freed = prom_free_prom_memory(); |
479 | if (freed) | 479 | if (freed) |
480 | printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed); | 480 | printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed); |
481 | 481 | ||
482 | start = (unsigned long)(&__init_begin); | 482 | free_init_pages("unused kernel memory", |
483 | end = (unsigned long)(&__init_end); | 483 | __pa_symbol(&__init_begin), |
484 | #ifdef CONFIG_64BIT | 484 | __pa_symbol(&__init_end)); |
485 | start = PAGE_OFFSET | CPHYSADDR(start); | ||
486 | end = PAGE_OFFSET | CPHYSADDR(end); | ||
487 | #endif | ||
488 | free_init_pages("unused kernel memory", start, end); | ||
489 | } | 485 | } |
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 8d600d307d5d..c46eb651bf09 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c | |||
@@ -58,6 +58,9 @@ void __init pagetable_init(void) | |||
58 | 58 | ||
59 | /* Initialize the entire pgd. */ | 59 | /* Initialize the entire pgd. */ |
60 | pgd_init((unsigned long)swapper_pg_dir); | 60 | pgd_init((unsigned long)swapper_pg_dir); |
61 | #ifdef MODULE_START | ||
62 | pgd_init((unsigned long)module_pg_dir); | ||
63 | #endif | ||
61 | pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); | 64 | pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); |
62 | 65 | ||
63 | pgd_base = swapper_pg_dir; | 66 | pgd_base = swapper_pg_dir; |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index fec318a1c8c5..492c518e7ba5 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -423,6 +423,9 @@ enum label_id { | |||
423 | label_invalid, | 423 | label_invalid, |
424 | label_second_part, | 424 | label_second_part, |
425 | label_leave, | 425 | label_leave, |
426 | #ifdef MODULE_START | ||
427 | label_module_alloc, | ||
428 | #endif | ||
426 | label_vmalloc, | 429 | label_vmalloc, |
427 | label_vmalloc_done, | 430 | label_vmalloc_done, |
428 | label_tlbw_hazard, | 431 | label_tlbw_hazard, |
@@ -455,6 +458,9 @@ static __init void build_label(struct label **lab, u32 *addr, | |||
455 | 458 | ||
456 | L_LA(_second_part) | 459 | L_LA(_second_part) |
457 | L_LA(_leave) | 460 | L_LA(_leave) |
461 | #ifdef MODULE_START | ||
462 | L_LA(_module_alloc) | ||
463 | #endif | ||
458 | L_LA(_vmalloc) | 464 | L_LA(_vmalloc) |
459 | L_LA(_vmalloc_done) | 465 | L_LA(_vmalloc_done) |
460 | L_LA(_tlbw_hazard) | 466 | L_LA(_tlbw_hazard) |
@@ -686,6 +692,13 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, | |||
686 | i_bgezl(p, reg, 0); | 692 | i_bgezl(p, reg, 0); |
687 | } | 693 | } |
688 | 694 | ||
695 | static void __init __attribute__((unused)) | ||
696 | il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) | ||
697 | { | ||
698 | r_mips_pc16(r, *p, l); | ||
699 | i_bgez(p, reg, 0); | ||
700 | } | ||
701 | |||
689 | /* The only general purpose registers allowed in TLB handlers. */ | 702 | /* The only general purpose registers allowed in TLB handlers. */ |
690 | #define K0 26 | 703 | #define K0 26 |
691 | #define K1 27 | 704 | #define K1 27 |
@@ -970,7 +983,11 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, | |||
970 | * The vmalloc handling is not in the hotpath. | 983 | * The vmalloc handling is not in the hotpath. |
971 | */ | 984 | */ |
972 | i_dmfc0(p, tmp, C0_BADVADDR); | 985 | i_dmfc0(p, tmp, C0_BADVADDR); |
986 | #ifdef MODULE_START | ||
987 | il_bltz(p, r, tmp, label_module_alloc); | ||
988 | #else | ||
973 | il_bltz(p, r, tmp, label_vmalloc); | 989 | il_bltz(p, r, tmp, label_vmalloc); |
990 | #endif | ||
974 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ | 991 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ |
975 | 992 | ||
976 | #ifdef CONFIG_SMP | 993 | #ifdef CONFIG_SMP |
@@ -1023,8 +1040,46 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, | |||
1023 | { | 1040 | { |
1024 | long swpd = (long)swapper_pg_dir; | 1041 | long swpd = (long)swapper_pg_dir; |
1025 | 1042 | ||
1043 | #ifdef MODULE_START | ||
1044 | long modd = (long)module_pg_dir; | ||
1045 | |||
1046 | l_module_alloc(l, *p); | ||
1047 | /* | ||
1048 | * Assumption: | ||
1049 | * VMALLOC_START >= 0xc000000000000000UL | ||
1050 | * MODULE_START >= 0xe000000000000000UL | ||
1051 | */ | ||
1052 | i_SLL(p, ptr, bvaddr, 2); | ||
1053 | il_bgez(p, r, ptr, label_vmalloc); | ||
1054 | |||
1055 | if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) { | ||
1056 | i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */ | ||
1057 | } else { | ||
1058 | /* unlikely configuration */ | ||
1059 | i_nop(p); /* delay slot */ | ||
1060 | i_LA(p, ptr, MODULE_START); | ||
1061 | } | ||
1062 | i_dsubu(p, bvaddr, bvaddr, ptr); | ||
1063 | |||
1064 | if (in_compat_space_p(modd) && !rel_lo(modd)) { | ||
1065 | il_b(p, r, label_vmalloc_done); | ||
1066 | i_lui(p, ptr, rel_hi(modd)); | ||
1067 | } else { | ||
1068 | i_LA_mostly(p, ptr, modd); | ||
1069 | il_b(p, r, label_vmalloc_done); | ||
1070 | i_daddiu(p, ptr, ptr, rel_lo(modd)); | ||
1071 | } | ||
1072 | |||
1073 | l_vmalloc(l, *p); | ||
1074 | if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) && | ||
1075 | MODULE_START << 32 == VMALLOC_START) | ||
1076 | i_dsll32(p, ptr, ptr, 0); /* typical case */ | ||
1077 | else | ||
1078 | i_LA(p, ptr, VMALLOC_START); | ||
1079 | #else | ||
1026 | l_vmalloc(l, *p); | 1080 | l_vmalloc(l, *p); |
1027 | i_LA(p, ptr, VMALLOC_START); | 1081 | i_LA(p, ptr, VMALLOC_START); |
1082 | #endif | ||
1028 | i_dsubu(p, bvaddr, bvaddr, ptr); | 1083 | i_dsubu(p, bvaddr, bvaddr, ptr); |
1029 | 1084 | ||
1030 | if (in_compat_space_p(swpd) && !rel_lo(swpd)) { | 1085 | if (in_compat_space_p(swpd) && !rel_lo(swpd)) { |