diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/fixmap.h | 106 | ||||
-rw-r--r-- | include/asm-powerpc/highmem.h | 41 |
2 files changed, 128 insertions, 19 deletions
diff --git a/include/asm-powerpc/fixmap.h b/include/asm-powerpc/fixmap.h new file mode 100644 index 000000000000..8428b38a3d30 --- /dev/null +++ b/include/asm-powerpc/fixmap.h | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Copyright 2008 Freescale Semiconductor Inc. | ||
11 | * Port to powerpc added by Kumar Gala | ||
12 | */ | ||
13 | |||
14 | #ifndef _ASM_FIXMAP_H | ||
15 | #define _ASM_FIXMAP_H | ||
16 | |||
17 | extern unsigned long FIXADDR_TOP; | ||
18 | |||
19 | #ifndef __ASSEMBLY__ | ||
20 | #include <linux/kernel.h> | ||
21 | #include <asm/page.h> | ||
22 | #ifdef CONFIG_HIGHMEM | ||
23 | #include <linux/threads.h> | ||
24 | #include <asm/kmap_types.h> | ||
25 | #endif | ||
26 | |||
27 | /* | ||
28 | * Here we define all the compile-time 'special' virtual | ||
29 | * addresses. The point is to have a constant address at | ||
30 | * compile time, but to set the physical address only | ||
31 | * in the boot process. We allocate these special addresses | ||
32 | * from the end of virtual memory (0xfffff000) backwards. | ||
33 | * Also this lets us do fail-safe vmalloc(), we | ||
34 | * can guarantee that these special addresses and | ||
35 | * vmalloc()-ed addresses never overlap. | ||
36 | * | ||
37 | * these 'compile-time allocated' memory buffers are | ||
38 | * fixed-size 4k pages. (or larger if used with an increment | ||
39 | * highger than 1) use fixmap_set(idx,phys) to associate | ||
40 | * physical memory with fixmap indices. | ||
41 | * | ||
42 | * TLB entries of such buffers will not be flushed across | ||
43 | * task switches. | ||
44 | */ | ||
45 | enum fixed_addresses { | ||
46 | FIX_HOLE, | ||
47 | #ifdef CONFIG_HIGHMEM | ||
48 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
49 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
50 | #endif | ||
51 | /* FIX_PCIE_MCFG, */ | ||
52 | __end_of_fixed_addresses | ||
53 | }; | ||
54 | |||
55 | extern void __set_fixmap (enum fixed_addresses idx, | ||
56 | phys_addr_t phys, pgprot_t flags); | ||
57 | |||
58 | #define set_fixmap(idx, phys) \ | ||
59 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
60 | /* | ||
61 | * Some hardware wants to get fixmapped without caching. | ||
62 | */ | ||
63 | #define set_fixmap_nocache(idx, phys) \ | ||
64 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
65 | |||
66 | #define clear_fixmap(idx) \ | ||
67 | __set_fixmap(idx, 0, __pgprot(0)) | ||
68 | |||
69 | #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
70 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) | ||
71 | |||
72 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
73 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
74 | |||
75 | extern void __this_fixmap_does_not_exist(void); | ||
76 | |||
77 | /* | ||
78 | * 'index to address' translation. If anyone tries to use the idx | ||
79 | * directly without tranlation, we catch the bug with a NULL-deference | ||
80 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
81 | */ | ||
82 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
83 | { | ||
84 | /* | ||
85 | * this branch gets completely eliminated after inlining, | ||
86 | * except when someone tries to use fixaddr indices in an | ||
87 | * illegal way. (such as mixing up address types or using | ||
88 | * out-of-range indices). | ||
89 | * | ||
90 | * If it doesn't get removed, the linker will complain | ||
91 | * loudly with a reasonably clear error message.. | ||
92 | */ | ||
93 | if (idx >= __end_of_fixed_addresses) | ||
94 | __this_fixmap_does_not_exist(); | ||
95 | |||
96 | return __fix_to_virt(idx); | ||
97 | } | ||
98 | |||
99 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
100 | { | ||
101 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
102 | return __virt_to_fix(vaddr); | ||
103 | } | ||
104 | |||
105 | #endif /* !__ASSEMBLY__ */ | ||
106 | #endif | ||
diff --git a/include/asm-powerpc/highmem.h b/include/asm-powerpc/highmem.h index f7b21ee302b4..5d99b6489d56 100644 --- a/include/asm-powerpc/highmem.h +++ b/include/asm-powerpc/highmem.h | |||
@@ -27,9 +27,7 @@ | |||
27 | #include <asm/kmap_types.h> | 27 | #include <asm/kmap_types.h> |
28 | #include <asm/tlbflush.h> | 28 | #include <asm/tlbflush.h> |
29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
30 | 30 | #include <asm/fixmap.h> | |
31 | /* undef for production */ | ||
32 | #define HIGHMEM_DEBUG 1 | ||
33 | 31 | ||
34 | extern pte_t *kmap_pte; | 32 | extern pte_t *kmap_pte; |
35 | extern pgprot_t kmap_prot; | 33 | extern pgprot_t kmap_prot; |
@@ -40,14 +38,12 @@ extern pte_t *pkmap_page_table; | |||
40 | * easily, subsequent pte tables have to be allocated in one physical | 38 | * easily, subsequent pte tables have to be allocated in one physical |
41 | * chunk of RAM. | 39 | * chunk of RAM. |
42 | */ | 40 | */ |
43 | #define PKMAP_BASE CONFIG_HIGHMEM_START | ||
44 | #define LAST_PKMAP (1 << PTE_SHIFT) | 41 | #define LAST_PKMAP (1 << PTE_SHIFT) |
45 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | 42 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) |
43 | #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) | ||
46 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | 44 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) |
47 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | 45 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) |
48 | 46 | ||
49 | #define KMAP_FIX_BEGIN (PKMAP_BASE + 0x00400000UL) | ||
50 | |||
51 | extern void *kmap_high(struct page *page); | 47 | extern void *kmap_high(struct page *page); |
52 | extern void kunmap_high(struct page *page); | 48 | extern void kunmap_high(struct page *page); |
53 | 49 | ||
@@ -73,7 +69,7 @@ static inline void kunmap(struct page *page) | |||
73 | * be used in IRQ contexts, so in some (very limited) cases we need | 69 | * be used in IRQ contexts, so in some (very limited) cases we need |
74 | * it. | 70 | * it. |
75 | */ | 71 | */ |
76 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 72 | static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
77 | { | 73 | { |
78 | unsigned int idx; | 74 | unsigned int idx; |
79 | unsigned long vaddr; | 75 | unsigned long vaddr; |
@@ -84,34 +80,39 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) | |||
84 | return page_address(page); | 80 | return page_address(page); |
85 | 81 | ||
86 | idx = type + KM_TYPE_NR*smp_processor_id(); | 82 | idx = type + KM_TYPE_NR*smp_processor_id(); |
87 | vaddr = KMAP_FIX_BEGIN + idx * PAGE_SIZE; | 83 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
88 | #ifdef HIGHMEM_DEBUG | 84 | #ifdef CONFIG_DEBUG_HIGHMEM |
89 | BUG_ON(!pte_none(*(kmap_pte+idx))); | 85 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
90 | #endif | 86 | #endif |
91 | set_pte_at(&init_mm, vaddr, kmap_pte+idx, mk_pte(page, kmap_prot)); | 87 | set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); |
92 | flush_tlb_page(NULL, vaddr); | 88 | flush_tlb_page(NULL, vaddr); |
93 | 89 | ||
94 | return (void*) vaddr; | 90 | return (void*) vaddr; |
95 | } | 91 | } |
96 | 92 | ||
93 | static inline void *kmap_atomic(struct page *page, enum km_type type) | ||
94 | { | ||
95 | return kmap_atomic_prot(page, type, kmap_prot); | ||
96 | } | ||
97 | |||
97 | static inline void kunmap_atomic(void *kvaddr, enum km_type type) | 98 | static inline void kunmap_atomic(void *kvaddr, enum km_type type) |
98 | { | 99 | { |
99 | #ifdef HIGHMEM_DEBUG | 100 | #ifdef CONFIG_DEBUG_HIGHMEM |
100 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 101 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
101 | unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); | 102 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
102 | 103 | ||
103 | if (vaddr < KMAP_FIX_BEGIN) { // FIXME | 104 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { |
104 | pagefault_enable(); | 105 | pagefault_enable(); |
105 | return; | 106 | return; |
106 | } | 107 | } |
107 | 108 | ||
108 | BUG_ON(vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE); | 109 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
109 | 110 | ||
110 | /* | 111 | /* |
111 | * force other mappings to Oops if they'll try to access | 112 | * force other mappings to Oops if they'll try to access |
112 | * this pte without first remap it | 113 | * this pte without first remap it |
113 | */ | 114 | */ |
114 | pte_clear(&init_mm, vaddr, kmap_pte+idx); | 115 | pte_clear(&init_mm, vaddr, kmap_pte-idx); |
115 | flush_tlb_page(NULL, vaddr); | 116 | flush_tlb_page(NULL, vaddr); |
116 | #endif | 117 | #endif |
117 | pagefault_enable(); | 118 | pagefault_enable(); |
@@ -120,12 +121,14 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) | |||
120 | static inline struct page *kmap_atomic_to_page(void *ptr) | 121 | static inline struct page *kmap_atomic_to_page(void *ptr) |
121 | { | 122 | { |
122 | unsigned long idx, vaddr = (unsigned long) ptr; | 123 | unsigned long idx, vaddr = (unsigned long) ptr; |
124 | pte_t *pte; | ||
123 | 125 | ||
124 | if (vaddr < KMAP_FIX_BEGIN) | 126 | if (vaddr < FIXADDR_START) |
125 | return virt_to_page(ptr); | 127 | return virt_to_page(ptr); |
126 | 128 | ||
127 | idx = (vaddr - KMAP_FIX_BEGIN) >> PAGE_SHIFT; | 129 | idx = virt_to_fix(vaddr); |
128 | return pte_page(kmap_pte[idx]); | 130 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); |
131 | return pte_page(*pte); | ||
129 | } | 132 | } |
130 | 133 | ||
131 | #define flush_cache_kmaps() flush_cache_all() | 134 | #define flush_cache_kmaps() flush_cache_all() |