diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2016-04-29 09:25:44 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-05-01 04:32:42 -0400 |
commit | eee24b5aafe87f3591990528eae2ad22e2c1d50c (patch) | |
tree | 724368e072025f9f2957bf63923497a3bc05fd1a | |
parent | 50de596de8be6de75401a2190b90a822e8a1bab2 (diff) |
powerpc/mm: Move hash and no hash code to separate files
This patch reduces the number of #ifdefs in C code and will also help in
adding radix changes later. Only code movement in this patch.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
[mpe: Propagate copyrights and update GPL text]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 74 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable-book3e.c | 121 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable-hash64.c | 94 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 83 |
5 files changed, 222 insertions, 153 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index adfee3f1aeb9..ef778997daa9 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -13,7 +13,8 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ | |||
13 | tlb_nohash_low.o | 13 | tlb_nohash_low.o |
14 | obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o | 14 | obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o |
15 | hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o | 15 | hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o |
16 | obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o slb_low.o slb.o $(hash64-y) | 16 | obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o |
17 | obj-$(CONFIG_PPC_STD_MMU_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) | ||
17 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o | 18 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o |
18 | obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(CONFIG_WORD_SIZE).o \ | 19 | obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(CONFIG_WORD_SIZE).o \ |
19 | mmu_context_hash$(CONFIG_WORD_SIZE).o | 20 | mmu_context_hash$(CONFIG_WORD_SIZE).o |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index ba655666186d..8d1daf7d9785 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -189,75 +189,6 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size) | |||
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* On hash-based CPUs, the vmemmap is bolted in the hash table. | ||
193 | * | ||
194 | * On Book3E CPUs, the vmemmap is currently mapped in the top half of | ||
195 | * the vmalloc space using normal page tables, though the size of | ||
196 | * pages encoded in the PTEs can be different | ||
197 | */ | ||
198 | |||
199 | #ifdef CONFIG_PPC_BOOK3E | ||
200 | static int __meminit vmemmap_create_mapping(unsigned long start, | ||
201 | unsigned long page_size, | ||
202 | unsigned long phys) | ||
203 | { | ||
204 | /* Create a PTE encoding without page size */ | ||
205 | unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | | ||
206 | _PAGE_KERNEL_RW; | ||
207 | |||
208 | /* PTEs only contain page size encodings up to 32M */ | ||
209 | BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); | ||
210 | |||
211 | /* Encode the size in the PTE */ | ||
212 | flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; | ||
213 | |||
214 | /* For each PTE for that area, map things. Note that we don't | ||
215 | * increment phys because all PTEs are of the large size and | ||
216 | * thus must have the low bits clear | ||
217 | */ | ||
218 | for (i = 0; i < page_size; i += PAGE_SIZE) | ||
219 | BUG_ON(map_kernel_page(start + i, phys, flags)); | ||
220 | |||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
225 | static void vmemmap_remove_mapping(unsigned long start, | ||
226 | unsigned long page_size) | ||
227 | { | ||
228 | } | ||
229 | #endif | ||
230 | #else /* CONFIG_PPC_BOOK3E */ | ||
231 | static int __meminit vmemmap_create_mapping(unsigned long start, | ||
232 | unsigned long page_size, | ||
233 | unsigned long phys) | ||
234 | { | ||
235 | int rc = htab_bolt_mapping(start, start + page_size, phys, | ||
236 | pgprot_val(PAGE_KERNEL), | ||
237 | mmu_vmemmap_psize, mmu_kernel_ssize); | ||
238 | if (rc < 0) { | ||
239 | int rc2 = htab_remove_mapping(start, start + page_size, | ||
240 | mmu_vmemmap_psize, | ||
241 | mmu_kernel_ssize); | ||
242 | BUG_ON(rc2 && (rc2 != -ENOENT)); | ||
243 | } | ||
244 | return rc; | ||
245 | } | ||
246 | |||
247 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
248 | static void vmemmap_remove_mapping(unsigned long start, | ||
249 | unsigned long page_size) | ||
250 | { | ||
251 | int rc = htab_remove_mapping(start, start + page_size, | ||
252 | mmu_vmemmap_psize, | ||
253 | mmu_kernel_ssize); | ||
254 | BUG_ON((rc < 0) && (rc != -ENOENT)); | ||
255 | WARN_ON(rc == -ENOENT); | ||
256 | } | ||
257 | #endif | ||
258 | |||
259 | #endif /* CONFIG_PPC_BOOK3E */ | ||
260 | |||
261 | struct vmemmap_backing *vmemmap_list; | 192 | struct vmemmap_backing *vmemmap_list; |
262 | static struct vmemmap_backing *next; | 193 | static struct vmemmap_backing *next; |
263 | static int num_left; | 194 | static int num_left; |
@@ -309,6 +240,9 @@ static __meminit void vmemmap_list_populate(unsigned long phys, | |||
309 | vmemmap_list = vmem_back; | 240 | vmemmap_list = vmem_back; |
310 | } | 241 | } |
311 | 242 | ||
243 | extern int __meminit vmemmap_create_mapping(unsigned long start, | ||
244 | unsigned long page_size, | ||
245 | unsigned long phys); | ||
312 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | 246 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
313 | { | 247 | { |
314 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | 248 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
@@ -347,6 +281,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
347 | } | 281 | } |
348 | 282 | ||
349 | #ifdef CONFIG_MEMORY_HOTPLUG | 283 | #ifdef CONFIG_MEMORY_HOTPLUG |
284 | extern void vmemmap_remove_mapping(unsigned long start, | ||
285 | unsigned long page_size); | ||
350 | static unsigned long vmemmap_list_free(unsigned long start) | 286 | static unsigned long vmemmap_list_free(unsigned long start) |
351 | { | 287 | { |
352 | struct vmemmap_backing *vmem_back, *vmem_back_prev; | 288 | struct vmemmap_backing *vmem_back, *vmem_back_prev; |
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c new file mode 100644 index 000000000000..b323735a8360 --- /dev/null +++ b/arch/powerpc/mm/pgtable-book3e.c | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * Copyright 2005, Paul Mackerras, IBM Corporation. | ||
3 | * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. | ||
4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/memblock.h> | ||
14 | #include <asm/pgalloc.h> | ||
15 | #include <asm/tlb.h> | ||
16 | #include <asm/dma.h> | ||
17 | |||
18 | #include "mmu_decl.h" | ||
19 | |||
20 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
21 | /* | ||
22 | * On Book3E CPUs, the vmemmap is currently mapped in the top half of | ||
23 | * the vmalloc space using normal page tables, though the size of | ||
24 | * pages encoded in the PTEs can be different | ||
25 | */ | ||
26 | int __meminit vmemmap_create_mapping(unsigned long start, | ||
27 | unsigned long page_size, | ||
28 | unsigned long phys) | ||
29 | { | ||
30 | /* Create a PTE encoding without page size */ | ||
31 | unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | | ||
32 | _PAGE_KERNEL_RW; | ||
33 | |||
34 | /* PTEs only contain page size encodings up to 32M */ | ||
35 | BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); | ||
36 | |||
37 | /* Encode the size in the PTE */ | ||
38 | flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; | ||
39 | |||
40 | /* For each PTE for that area, map things. Note that we don't | ||
41 | * increment phys because all PTEs are of the large size and | ||
42 | * thus must have the low bits clear | ||
43 | */ | ||
44 | for (i = 0; i < page_size; i += PAGE_SIZE) | ||
45 | BUG_ON(map_kernel_page(start + i, phys, flags)); | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
51 | void vmemmap_remove_mapping(unsigned long start, | ||
52 | unsigned long page_size) | ||
53 | { | ||
54 | } | ||
55 | #endif | ||
56 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | ||
57 | |||
58 | static __ref void *early_alloc_pgtable(unsigned long size) | ||
59 | { | ||
60 | void *pt; | ||
61 | |||
62 | pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); | ||
63 | memset(pt, 0, size); | ||
64 | |||
65 | return pt; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * map_kernel_page currently only called by __ioremap | ||
70 | * map_kernel_page adds an entry to the ioremap page table | ||
71 | * and adds an entry to the HPT, possibly bolting it | ||
72 | */ | ||
73 | int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) | ||
74 | { | ||
75 | pgd_t *pgdp; | ||
76 | pud_t *pudp; | ||
77 | pmd_t *pmdp; | ||
78 | pte_t *ptep; | ||
79 | |||
80 | if (slab_is_available()) { | ||
81 | pgdp = pgd_offset_k(ea); | ||
82 | pudp = pud_alloc(&init_mm, pgdp, ea); | ||
83 | if (!pudp) | ||
84 | return -ENOMEM; | ||
85 | pmdp = pmd_alloc(&init_mm, pudp, ea); | ||
86 | if (!pmdp) | ||
87 | return -ENOMEM; | ||
88 | ptep = pte_alloc_kernel(pmdp, ea); | ||
89 | if (!ptep) | ||
90 | return -ENOMEM; | ||
91 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | ||
92 | __pgprot(flags))); | ||
93 | } else { | ||
94 | pgdp = pgd_offset_k(ea); | ||
95 | #ifndef __PAGETABLE_PUD_FOLDED | ||
96 | if (pgd_none(*pgdp)) { | ||
97 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); | ||
98 | BUG_ON(pudp == NULL); | ||
99 | pgd_populate(&init_mm, pgdp, pudp); | ||
100 | } | ||
101 | #endif /* !__PAGETABLE_PUD_FOLDED */ | ||
102 | pudp = pud_offset(pgdp, ea); | ||
103 | if (pud_none(*pudp)) { | ||
104 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); | ||
105 | BUG_ON(pmdp == NULL); | ||
106 | pud_populate(&init_mm, pudp, pmdp); | ||
107 | } | ||
108 | pmdp = pmd_offset(pudp, ea); | ||
109 | if (!pmd_present(*pmdp)) { | ||
110 | ptep = early_alloc_pgtable(PAGE_SIZE); | ||
111 | BUG_ON(ptep == NULL); | ||
112 | pmd_populate_kernel(&init_mm, pmdp, ptep); | ||
113 | } | ||
114 | ptep = pte_offset_kernel(pmdp, ea); | ||
115 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | ||
116 | __pgprot(flags))); | ||
117 | } | ||
118 | |||
119 | smp_wmb(); | ||
120 | return 0; | ||
121 | } | ||
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c new file mode 100644 index 000000000000..f405a67c807a --- /dev/null +++ b/arch/powerpc/mm/pgtable-hash64.c | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Copyright 2005, Paul Mackerras, IBM Corporation. | ||
3 | * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. | ||
4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <asm/pgalloc.h> | ||
14 | #include <asm/tlb.h> | ||
15 | |||
16 | #include "mmu_decl.h" | ||
17 | |||
18 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
19 | /* | ||
20 | * On hash-based CPUs, the vmemmap is bolted in the hash table. | ||
21 | * | ||
22 | */ | ||
23 | int __meminit vmemmap_create_mapping(unsigned long start, | ||
24 | unsigned long page_size, | ||
25 | unsigned long phys) | ||
26 | { | ||
27 | int rc = htab_bolt_mapping(start, start + page_size, phys, | ||
28 | pgprot_val(PAGE_KERNEL), | ||
29 | mmu_vmemmap_psize, mmu_kernel_ssize); | ||
30 | if (rc < 0) { | ||
31 | int rc2 = htab_remove_mapping(start, start + page_size, | ||
32 | mmu_vmemmap_psize, | ||
33 | mmu_kernel_ssize); | ||
34 | BUG_ON(rc2 && (rc2 != -ENOENT)); | ||
35 | } | ||
36 | return rc; | ||
37 | } | ||
38 | |||
39 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
40 | void vmemmap_remove_mapping(unsigned long start, | ||
41 | unsigned long page_size) | ||
42 | { | ||
43 | int rc = htab_remove_mapping(start, start + page_size, | ||
44 | mmu_vmemmap_psize, | ||
45 | mmu_kernel_ssize); | ||
46 | BUG_ON((rc < 0) && (rc != -ENOENT)); | ||
47 | WARN_ON(rc == -ENOENT); | ||
48 | } | ||
49 | #endif | ||
50 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | ||
51 | |||
52 | /* | ||
53 | * map_kernel_page currently only called by __ioremap | ||
54 | * map_kernel_page adds an entry to the ioremap page table | ||
55 | * and adds an entry to the HPT, possibly bolting it | ||
56 | */ | ||
57 | int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) | ||
58 | { | ||
59 | pgd_t *pgdp; | ||
60 | pud_t *pudp; | ||
61 | pmd_t *pmdp; | ||
62 | pte_t *ptep; | ||
63 | |||
64 | if (slab_is_available()) { | ||
65 | pgdp = pgd_offset_k(ea); | ||
66 | pudp = pud_alloc(&init_mm, pgdp, ea); | ||
67 | if (!pudp) | ||
68 | return -ENOMEM; | ||
69 | pmdp = pmd_alloc(&init_mm, pudp, ea); | ||
70 | if (!pmdp) | ||
71 | return -ENOMEM; | ||
72 | ptep = pte_alloc_kernel(pmdp, ea); | ||
73 | if (!ptep) | ||
74 | return -ENOMEM; | ||
75 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | ||
76 | __pgprot(flags))); | ||
77 | } else { | ||
78 | /* | ||
79 | * If the mm subsystem is not fully up, we cannot create a | ||
80 | * linux page table entry for this mapping. Simply bolt an | ||
81 | * entry in the hardware page table. | ||
82 | * | ||
83 | */ | ||
84 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, | ||
85 | mmu_io_psize, mmu_kernel_ssize)) { | ||
86 | printk(KERN_ERR "Failed to do bolted mapping IO " | ||
87 | "memory at %016lx !\n", pa); | ||
88 | return -ENOMEM; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | smp_wmb(); | ||
93 | return 0; | ||
94 | } | ||
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 8f3b2942fba8..1bfb112e1453 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -78,89 +78,6 @@ struct patb_entry *partition_tb; | |||
78 | #endif | 78 | #endif |
79 | unsigned long ioremap_bot = IOREMAP_BASE; | 79 | unsigned long ioremap_bot = IOREMAP_BASE; |
80 | 80 | ||
81 | #ifdef CONFIG_PPC_MMU_NOHASH | ||
82 | static __ref void *early_alloc_pgtable(unsigned long size) | ||
83 | { | ||
84 | void *pt; | ||
85 | |||
86 | pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); | ||
87 | memset(pt, 0, size); | ||
88 | |||
89 | return pt; | ||
90 | } | ||
91 | #endif /* CONFIG_PPC_MMU_NOHASH */ | ||
92 | |||
93 | /* | ||
94 | * map_kernel_page currently only called by __ioremap | ||
95 | * map_kernel_page adds an entry to the ioremap page table | ||
96 | * and adds an entry to the HPT, possibly bolting it | ||
97 | */ | ||
98 | int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) | ||
99 | { | ||
100 | pgd_t *pgdp; | ||
101 | pud_t *pudp; | ||
102 | pmd_t *pmdp; | ||
103 | pte_t *ptep; | ||
104 | |||
105 | if (slab_is_available()) { | ||
106 | pgdp = pgd_offset_k(ea); | ||
107 | pudp = pud_alloc(&init_mm, pgdp, ea); | ||
108 | if (!pudp) | ||
109 | return -ENOMEM; | ||
110 | pmdp = pmd_alloc(&init_mm, pudp, ea); | ||
111 | if (!pmdp) | ||
112 | return -ENOMEM; | ||
113 | ptep = pte_alloc_kernel(pmdp, ea); | ||
114 | if (!ptep) | ||
115 | return -ENOMEM; | ||
116 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | ||
117 | __pgprot(flags))); | ||
118 | } else { | ||
119 | #ifdef CONFIG_PPC_MMU_NOHASH | ||
120 | pgdp = pgd_offset_k(ea); | ||
121 | #ifdef PUD_TABLE_SIZE | ||
122 | if (pgd_none(*pgdp)) { | ||
123 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); | ||
124 | BUG_ON(pudp == NULL); | ||
125 | pgd_populate(&init_mm, pgdp, pudp); | ||
126 | } | ||
127 | #endif /* PUD_TABLE_SIZE */ | ||
128 | pudp = pud_offset(pgdp, ea); | ||
129 | if (pud_none(*pudp)) { | ||
130 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); | ||
131 | BUG_ON(pmdp == NULL); | ||
132 | pud_populate(&init_mm, pudp, pmdp); | ||
133 | } | ||
134 | pmdp = pmd_offset(pudp, ea); | ||
135 | if (!pmd_present(*pmdp)) { | ||
136 | ptep = early_alloc_pgtable(PAGE_SIZE); | ||
137 | BUG_ON(ptep == NULL); | ||
138 | pmd_populate_kernel(&init_mm, pmdp, ptep); | ||
139 | } | ||
140 | ptep = pte_offset_kernel(pmdp, ea); | ||
141 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | ||
142 | __pgprot(flags))); | ||
143 | #else /* CONFIG_PPC_MMU_NOHASH */ | ||
144 | /* | ||
145 | * If the mm subsystem is not fully up, we cannot create a | ||
146 | * linux page table entry for this mapping. Simply bolt an | ||
147 | * entry in the hardware page table. | ||
148 | * | ||
149 | */ | ||
150 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, | ||
151 | mmu_io_psize, mmu_kernel_ssize)) { | ||
152 | printk(KERN_ERR "Failed to do bolted mapping IO " | ||
153 | "memory at %016lx !\n", pa); | ||
154 | return -ENOMEM; | ||
155 | } | ||
156 | #endif /* !CONFIG_PPC_MMU_NOHASH */ | ||
157 | } | ||
158 | |||
159 | smp_wmb(); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | |||
164 | /** | 81 | /** |
165 | * __ioremap_at - Low level function to establish the page tables | 82 | * __ioremap_at - Low level function to establish the page tables |
166 | * for an IO mapping | 83 | * for an IO mapping |