diff options
Diffstat (limited to 'arch/sh/mm/pg-mmu.c')
| -rw-r--r-- | arch/sh/mm/pg-mmu.c | 136 |
1 files changed, 136 insertions, 0 deletions
diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c new file mode 100644 index 000000000000..356d2cdcb209 --- /dev/null +++ b/arch/sh/mm/pg-mmu.c | |||
| @@ -0,0 +1,136 @@ | |||
| 1 | /* | ||
| 2 | * arch/sh/mm/pg-mmu.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
| 5 | * Copyright (C) 2002 - 2009 Paul Mundt | ||
| 6 | * | ||
| 7 | * Released under the terms of the GNU GPL v2.0. | ||
| 8 | */ | ||
| 9 | #include <linux/mm.h> | ||
| 10 | #include <linux/init.h> | ||
| 11 | #include <linux/mutex.h> | ||
| 12 | #include <linux/fs.h> | ||
| 13 | #include <linux/highmem.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <asm/mmu_context.h> | ||
| 16 | #include <asm/cacheflush.h> | ||
| 17 | |||
| 18 | #define kmap_get_fixmap_pte(vaddr) \ | ||
| 19 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | ||
| 20 | |||
| 21 | static pte_t *kmap_coherent_pte; | ||
| 22 | |||
| 23 | void __init kmap_coherent_init(void) | ||
| 24 | { | ||
| 25 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | ||
| 26 | unsigned long vaddr; | ||
| 27 | |||
| 28 | /* cache the first coherent kmap pte */ | ||
| 29 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
| 30 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
| 31 | #endif | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline void *kmap_coherent(struct page *page, unsigned long addr) | ||
| 35 | { | ||
| 36 | enum fixed_addresses idx; | ||
| 37 | unsigned long vaddr, flags; | ||
| 38 | pte_t pte; | ||
| 39 | |||
| 40 | inc_preempt_count(); | ||
| 41 | |||
| 42 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | ||
| 43 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | ||
| 44 | pte = mk_pte(page, PAGE_KERNEL); | ||
| 45 | |||
| 46 | local_irq_save(flags); | ||
| 47 | flush_tlb_one(get_asid(), vaddr); | ||
| 48 | local_irq_restore(flags); | ||
| 49 | |||
| 50 | update_mmu_cache(NULL, vaddr, pte); | ||
| 51 | |||
| 52 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
| 53 | |||
| 54 | return (void *)vaddr; | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void kunmap_coherent(struct page *page) | ||
| 58 | { | ||
| 59 | dec_preempt_count(); | ||
| 60 | preempt_check_resched(); | ||
| 61 | } | ||
| 62 | |||
| 63 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
| 64 | unsigned long vaddr, void *dst, const void *src, | ||
| 65 | unsigned long len) | ||
| 66 | { | ||
| 67 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
| 68 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
| 69 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
| 70 | memcpy(vto, src, len); | ||
| 71 | kunmap_coherent(vto); | ||
| 72 | } else { | ||
| 73 | memcpy(dst, src, len); | ||
| 74 | if (boot_cpu_data.dcache.n_aliases) | ||
| 75 | set_bit(PG_dcache_dirty, &page->flags); | ||
| 76 | } | ||
| 77 | |||
| 78 | if (vma->vm_flags & VM_EXEC) | ||
| 79 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
| 80 | } | ||
| 81 | |||
| 82 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
| 83 | unsigned long vaddr, void *dst, const void *src, | ||
| 84 | unsigned long len) | ||
| 85 | { | ||
| 86 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
| 87 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
| 88 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
| 89 | memcpy(dst, vfrom, len); | ||
| 90 | kunmap_coherent(vfrom); | ||
| 91 | } else { | ||
| 92 | memcpy(dst, src, len); | ||
| 93 | if (boot_cpu_data.dcache.n_aliases) | ||
| 94 | set_bit(PG_dcache_dirty, &page->flags); | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | void copy_user_highpage(struct page *to, struct page *from, | ||
| 99 | unsigned long vaddr, struct vm_area_struct *vma) | ||
| 100 | { | ||
| 101 | void *vfrom, *vto; | ||
| 102 | |||
| 103 | vto = kmap_atomic(to, KM_USER1); | ||
| 104 | |||
| 105 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && | ||
| 106 | !test_bit(PG_dcache_dirty, &from->flags)) { | ||
| 107 | vfrom = kmap_coherent(from, vaddr); | ||
| 108 | copy_page(vto, vfrom); | ||
| 109 | kunmap_coherent(vfrom); | ||
| 110 | } else { | ||
| 111 | vfrom = kmap_atomic(from, KM_USER0); | ||
| 112 | copy_page(vto, vfrom); | ||
| 113 | kunmap_atomic(vfrom, KM_USER0); | ||
| 114 | } | ||
| 115 | |||
| 116 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | ||
| 117 | __flush_wback_region(vto, PAGE_SIZE); | ||
| 118 | |||
| 119 | kunmap_atomic(vto, KM_USER1); | ||
| 120 | /* Make sure this page is cleared on other CPU's too before using it */ | ||
| 121 | smp_wmb(); | ||
| 122 | } | ||
| 123 | EXPORT_SYMBOL(copy_user_highpage); | ||
| 124 | |||
| 125 | void clear_user_highpage(struct page *page, unsigned long vaddr) | ||
| 126 | { | ||
| 127 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
| 128 | |||
| 129 | clear_page(kaddr); | ||
| 130 | |||
| 131 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | ||
| 132 | __flush_wback_region(kaddr, PAGE_SIZE); | ||
| 133 | |||
| 134 | kunmap_atomic(kaddr, KM_USER0); | ||
| 135 | } | ||
| 136 | EXPORT_SYMBOL(clear_user_highpage); | ||
