diff options
| -rw-r--r-- | arch/powerpc/include/asm/highmem.h | 57 | ||||
| -rw-r--r-- | arch/powerpc/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/powerpc/mm/highmem.c | 77 |
3 files changed, 82 insertions, 53 deletions
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 684a73f4324f..a74c4ee6c020 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
| @@ -22,9 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
| 24 | 24 | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
| 27 | #include <linux/highmem.h> | ||
| 28 | #include <asm/kmap_types.h> | 26 | #include <asm/kmap_types.h> |
| 29 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
| 30 | #include <asm/page.h> | 28 | #include <asm/page.h> |
| @@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; | |||
| 62 | 60 | ||
| 63 | extern void *kmap_high(struct page *page); | 61 | extern void *kmap_high(struct page *page); |
| 64 | extern void kunmap_high(struct page *page); | 62 | extern void kunmap_high(struct page *page); |
| 63 | extern void *kmap_atomic_prot(struct page *page, enum km_type type, | ||
| 64 | pgprot_t prot); | ||
| 65 | extern void kunmap_atomic(void *kvaddr, enum km_type type); | ||
| 65 | 66 | ||
| 66 | static inline void *kmap(struct page *page) | 67 | static inline void *kmap(struct page *page) |
| 67 | { | 68 | { |
| @@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) | |||
| 79 | kunmap_high(page); | 80 | kunmap_high(page); |
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | /* | ||
| 83 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
| 84 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
| 85 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
| 86 | * it. | ||
| 87 | */ | ||
| 88 | static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
| 89 | { | ||
| 90 | unsigned int idx; | ||
| 91 | unsigned long vaddr; | ||
| 92 | |||
| 93 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
| 94 | pagefault_disable(); | ||
| 95 | if (!PageHighMem(page)) | ||
| 96 | return page_address(page); | ||
| 97 | |||
| 98 | debug_kmap_atomic(type); | ||
| 99 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 100 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 101 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 102 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
| 103 | #endif | ||
| 104 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | ||
| 105 | local_flush_tlb_page(NULL, vaddr); | ||
| 106 | |||
| 107 | return (void*) vaddr; | ||
| 108 | } | ||
| 109 | |||
| 110 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 83 | static inline void *kmap_atomic(struct page *page, enum km_type type) |
| 111 | { | 84 | { |
| 112 | return kmap_atomic_prot(page, type, kmap_prot); | 85 | return kmap_atomic_prot(page, type, kmap_prot); |
| 113 | } | 86 | } |
| 114 | 87 | ||
| 115 | static inline void kunmap_atomic(void *kvaddr, enum km_type type) | ||
| 116 | { | ||
| 117 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 118 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
| 119 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 120 | |||
| 121 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
| 122 | pagefault_enable(); | ||
| 123 | return; | ||
| 124 | } | ||
| 125 | |||
| 126 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
| 127 | |||
| 128 | /* | ||
| 129 | * force other mappings to Oops if they'll try to access | ||
| 130 | * this pte without first remap it | ||
| 131 | */ | ||
| 132 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
| 133 | local_flush_tlb_page(NULL, vaddr); | ||
| 134 | #endif | ||
| 135 | pagefault_enable(); | ||
| 136 | } | ||
| 137 | |||
| 138 | static inline struct page *kmap_atomic_to_page(void *ptr) | 88 | static inline struct page *kmap_atomic_to_page(void *ptr) |
| 139 | { | 89 | { |
| 140 | unsigned long idx, vaddr = (unsigned long) ptr; | 90 | unsigned long idx, vaddr = (unsigned long) ptr; |
| @@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) | |||
| 148 | return pte_page(*pte); | 98 | return pte_page(*pte); |
| 149 | } | 99 | } |
| 150 | 100 | ||
| 101 | |||
| 151 | #define flush_cache_kmaps() flush_cache_all() | 102 | #define flush_cache_kmaps() flush_cache_all() |
| 152 | 103 | ||
| 153 | #endif /* __KERNEL__ */ | 104 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 2d2192e48de7..3e68363405b7 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
| @@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o | |||
| 30 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 30 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
| 31 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o | 31 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o |
| 32 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o | 32 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o |
| 33 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c new file mode 100644 index 000000000000..c2186c74c85a --- /dev/null +++ b/arch/powerpc/mm/highmem.c | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | /* | ||
| 2 | * highmem.c: virtual kernel memory mappings for high memory | ||
| 3 | * | ||
| 4 | * PowerPC version, stolen from the i386 version. | ||
| 5 | * | ||
| 6 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
| 7 | * are not addressable by direct kernel virtual addresses. | ||
| 8 | * | ||
| 9 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
| 10 | * Gerhard.Wichert@pdb.siemens.de | ||
| 11 | * | ||
| 12 | * | ||
| 13 | * Redesigned the x86 32-bit VM architecture to deal with | ||
| 14 | * up to 16 Terrabyte physical memory. With current x86 CPUs | ||
| 15 | * we now support up to 64 Gigabytes physical RAM. | ||
| 16 | * | ||
| 17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
| 18 | * | ||
| 19 | * Reworked for PowerPC by various contributors. Moved from | ||
| 20 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/highmem.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | |||
| 26 | /* | ||
| 27 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
| 28 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
| 29 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
| 30 | * it. | ||
| 31 | */ | ||
| 32 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
| 33 | { | ||
| 34 | unsigned int idx; | ||
| 35 | unsigned long vaddr; | ||
| 36 | |||
| 37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
| 38 | pagefault_disable(); | ||
| 39 | if (!PageHighMem(page)) | ||
| 40 | return page_address(page); | ||
| 41 | |||
| 42 | debug_kmap_atomic(type); | ||
| 43 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 45 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 46 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
| 47 | #endif | ||
| 48 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | ||
| 49 | local_flush_tlb_page(NULL, vaddr); | ||
| 50 | |||
| 51 | return (void*) vaddr; | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
| 54 | |||
| 55 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
| 56 | { | ||
| 57 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 58 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
| 59 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 60 | |||
| 61 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
| 62 | pagefault_enable(); | ||
| 63 | return; | ||
| 64 | } | ||
| 65 | |||
| 66 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
| 67 | |||
| 68 | /* | ||
| 69 | * force other mappings to Oops if they'll try to access | ||
| 70 | * this pte without first remap it | ||
| 71 | */ | ||
| 72 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
| 73 | local_flush_tlb_page(NULL, vaddr); | ||
| 74 | #endif | ||
| 75 | pagefault_enable(); | ||
| 76 | } | ||
| 77 | EXPORT_SYMBOL(kunmap_atomic); | ||
