diff options
author | Christophe Leroy <christophe.leroy@c-s.fr> | 2019-08-20 10:07:17 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-08-26 23:03:35 -0400 |
commit | f381d5711f091facd8847a54a2377cc0d1519df2 (patch) | |
tree | c187a3aa9f280cafa727be14d92406682ba972a0 | |
parent | 7cd9b317b630683b0c8eb2dfcfb046003ad6b97b (diff) |
powerpc/mm: Move ioremap functions out of pgtable_32/64.c
Create ioremap_32.c and ioremap_64.c and move respective ioremap
functions out of pgtable_32.c and pgtable_64.c
In the meantime, fix a few comments and changes a printk() to
pr_warn(). Also fix a few oversplitted lines.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b5c8b02ccefd4ede64c61b53cf64fb5dacb35740.1566309263.git.christophe.leroy@c-s.fr
-rw-r--r-- | arch/powerpc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/ioremap_32.c | 104 | ||||
-rw-r--r-- | arch/powerpc/mm/ioremap_64.c | 123 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 99 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 128 |
5 files changed, 229 insertions, 227 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 29c682fe9144..5e147986400d 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -7,7 +7,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) | |||
7 | 7 | ||
8 | obj-y := fault.o mem.o pgtable.o mmap.o \ | 8 | obj-y := fault.o mem.o pgtable.o mmap.o \ |
9 | init_$(BITS).o pgtable_$(BITS).o \ | 9 | init_$(BITS).o pgtable_$(BITS).o \ |
10 | pgtable-frag.o ioremap.o \ | 10 | pgtable-frag.o ioremap.o ioremap_$(BITS).o \ |
11 | init-common.o mmu_context.o drmem.o | 11 | init-common.o mmu_context.o drmem.o |
12 | obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/ | 12 | obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/ |
13 | obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/ | 13 | obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/ |
diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c new file mode 100644 index 000000000000..fb43ba71aa54 --- /dev/null +++ b/arch/powerpc/mm/ioremap_32.c | |||
@@ -0,0 +1,104 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | |||
3 | #include <linux/io.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/vmalloc.h> | ||
6 | |||
7 | #include <mm/mmu_decl.h> | ||
8 | |||
9 | void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size) | ||
10 | { | ||
11 | pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); | ||
12 | |||
13 | return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); | ||
14 | } | ||
15 | EXPORT_SYMBOL(ioremap_wt); | ||
16 | |||
17 | void __iomem * | ||
18 | __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) | ||
19 | { | ||
20 | unsigned long v, i; | ||
21 | phys_addr_t p; | ||
22 | int err; | ||
23 | |||
24 | /* | ||
25 | * Choose an address to map it to. | ||
26 | * Once the vmalloc system is running, we use it. | ||
27 | * Before then, we use space going down from IOREMAP_TOP | ||
28 | * (ioremap_bot records where we're up to). | ||
29 | */ | ||
30 | p = addr & PAGE_MASK; | ||
31 | size = PAGE_ALIGN(addr + size) - p; | ||
32 | |||
33 | /* | ||
34 | * If the address lies within the first 16 MB, assume it's in ISA | ||
35 | * memory space | ||
36 | */ | ||
37 | if (p < 16 * 1024 * 1024) | ||
38 | p += _ISA_MEM_BASE; | ||
39 | |||
40 | #ifndef CONFIG_CRASH_DUMP | ||
41 | /* | ||
42 | * Don't allow anybody to remap normal RAM that we're using. | ||
43 | * mem_init() sets high_memory so only do the check after that. | ||
44 | */ | ||
45 | if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && | ||
46 | page_is_ram(__phys_to_pfn(p))) { | ||
47 | pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__, | ||
48 | (unsigned long long)p, __builtin_return_address(0)); | ||
49 | return NULL; | ||
50 | } | ||
51 | #endif | ||
52 | |||
53 | if (size == 0) | ||
54 | return NULL; | ||
55 | |||
56 | /* | ||
57 | * Is it already mapped? Perhaps overlapped by a previous | ||
58 | * mapping. | ||
59 | */ | ||
60 | v = p_block_mapped(p); | ||
61 | if (v) | ||
62 | goto out; | ||
63 | |||
64 | if (slab_is_available()) { | ||
65 | struct vm_struct *area; | ||
66 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | ||
67 | if (area == 0) | ||
68 | return NULL; | ||
69 | area->phys_addr = p; | ||
70 | v = (unsigned long)area->addr; | ||
71 | } else { | ||
72 | v = (ioremap_bot -= size); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Should check if it is a candidate for a BAT mapping | ||
77 | */ | ||
78 | |||
79 | err = 0; | ||
80 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | ||
81 | err = map_kernel_page(v + i, p + i, prot); | ||
82 | if (err) { | ||
83 | if (slab_is_available()) | ||
84 | vunmap((void *)v); | ||
85 | return NULL; | ||
86 | } | ||
87 | |||
88 | out: | ||
89 | return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK)); | ||
90 | } | ||
91 | |||
92 | void iounmap(volatile void __iomem *addr) | ||
93 | { | ||
94 | /* | ||
95 | * If mapped by BATs then there is nothing to do. | ||
96 | * Calling vfree() generates a benign warning. | ||
97 | */ | ||
98 | if (v_block_mapped((unsigned long)addr)) | ||
99 | return; | ||
100 | |||
101 | if (addr > high_memory && (unsigned long)addr < ioremap_bot) | ||
102 | vunmap((void *)(PAGE_MASK & (unsigned long)addr)); | ||
103 | } | ||
104 | EXPORT_SYMBOL(iounmap); | ||
diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c new file mode 100644 index 000000000000..57f3b096143c --- /dev/null +++ b/arch/powerpc/mm/ioremap_64.c | |||
@@ -0,0 +1,123 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | |||
3 | #include <linux/io.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/vmalloc.h> | ||
6 | |||
7 | int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, | ||
8 | pgprot_t prot, int nid) | ||
9 | { | ||
10 | unsigned long i; | ||
11 | |||
12 | for (i = 0; i < size; i += PAGE_SIZE) { | ||
13 | int err = map_kernel_page(ea + i, pa + i, prot); | ||
14 | if (err) { | ||
15 | if (slab_is_available()) | ||
16 | unmap_kernel_range(ea, size); | ||
17 | else | ||
18 | WARN_ON_ONCE(1); /* Should clean up */ | ||
19 | return err; | ||
20 | } | ||
21 | } | ||
22 | |||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | /** | ||
27 | * Low level function to establish the page tables for an IO mapping | ||
28 | */ | ||
29 | void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) | ||
30 | { | ||
31 | /* We don't support the 4K PFN hack with ioremap */ | ||
32 | if (pgprot_val(prot) & H_PAGE_4K_PFN) | ||
33 | return NULL; | ||
34 | |||
35 | if ((ea + size) >= (void *)IOREMAP_END) { | ||
36 | pr_warn("Outside the supported range\n"); | ||
37 | return NULL; | ||
38 | } | ||
39 | |||
40 | WARN_ON(pa & ~PAGE_MASK); | ||
41 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | ||
42 | WARN_ON(size & ~PAGE_MASK); | ||
43 | |||
44 | if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE)) | ||
45 | return NULL; | ||
46 | |||
47 | return (void __iomem *)ea; | ||
48 | } | ||
49 | EXPORT_SYMBOL(__ioremap_at); | ||
50 | |||
51 | /** | ||
52 | * Low level function to tear down the page tables for an IO mapping. This is | ||
53 | * used for mappings that are manipulated manually, like partial unmapping of | ||
54 | * PCI IOs or ISA space. | ||
55 | */ | ||
56 | void __iounmap_at(void *ea, unsigned long size) | ||
57 | { | ||
58 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | ||
59 | WARN_ON(size & ~PAGE_MASK); | ||
60 | |||
61 | unmap_kernel_range((unsigned long)ea, size); | ||
62 | } | ||
63 | EXPORT_SYMBOL(__iounmap_at); | ||
64 | |||
65 | void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, | ||
66 | pgprot_t prot, void *caller) | ||
67 | { | ||
68 | phys_addr_t paligned; | ||
69 | void __iomem *ret; | ||
70 | |||
71 | /* | ||
72 | * Choose an address to map it to. Once the vmalloc system is running, | ||
73 | * we use it. Before that, we map using addresses going up from | ||
74 | * ioremap_bot. vmalloc will use the addresses from IOREMAP_BASE | ||
75 | * through ioremap_bot. | ||
76 | */ | ||
77 | paligned = addr & PAGE_MASK; | ||
78 | size = PAGE_ALIGN(addr + size) - paligned; | ||
79 | |||
80 | if (size == 0 || paligned == 0) | ||
81 | return NULL; | ||
82 | |||
83 | if (slab_is_available()) { | ||
84 | struct vm_struct *area; | ||
85 | |||
86 | area = __get_vm_area_caller(size, VM_IOREMAP, ioremap_bot, | ||
87 | IOREMAP_END, caller); | ||
88 | if (area == NULL) | ||
89 | return NULL; | ||
90 | |||
91 | area->phys_addr = paligned; | ||
92 | ret = __ioremap_at(paligned, area->addr, size, prot); | ||
93 | } else { | ||
94 | ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); | ||
95 | if (ret) | ||
96 | ioremap_bot += size; | ||
97 | } | ||
98 | |||
99 | if (ret) | ||
100 | ret += addr & ~PAGE_MASK; | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Unmap an IO region and remove it from vmalloc'd list. | ||
106 | * Access to IO memory should be serialized by driver. | ||
107 | */ | ||
108 | void iounmap(volatile void __iomem *token) | ||
109 | { | ||
110 | void *addr; | ||
111 | |||
112 | if (!slab_is_available()) | ||
113 | return; | ||
114 | |||
115 | addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK); | ||
116 | |||
117 | if ((unsigned long)addr < ioremap_bot) { | ||
118 | pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr); | ||
119 | return; | ||
120 | } | ||
121 | vunmap(addr); | ||
122 | } | ||
123 | EXPORT_SYMBOL(iounmap); | ||
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 102901a19f3c..8ec5dfb65b2e 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
28 | #include <asm/pgalloc.h> | 28 | #include <asm/pgalloc.h> |
29 | #include <asm/fixmap.h> | 29 | #include <asm/fixmap.h> |
30 | #include <asm/io.h> | ||
31 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
32 | #include <asm/sections.h> | 31 | #include <asm/sections.h> |
33 | 32 | ||
@@ -35,104 +34,6 @@ | |||
35 | 34 | ||
36 | extern char etext[], _stext[], _sinittext[], _einittext[]; | 35 | extern char etext[], _stext[], _sinittext[], _einittext[]; |
37 | 36 | ||
38 | void __iomem * | ||
39 | ioremap_wt(phys_addr_t addr, unsigned long size) | ||
40 | { | ||
41 | pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); | ||
42 | |||
43 | return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); | ||
44 | } | ||
45 | EXPORT_SYMBOL(ioremap_wt); | ||
46 | |||
47 | void __iomem * | ||
48 | __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) | ||
49 | { | ||
50 | unsigned long v, i; | ||
51 | phys_addr_t p; | ||
52 | int err; | ||
53 | |||
54 | /* | ||
55 | * Choose an address to map it to. | ||
56 | * Once the vmalloc system is running, we use it. | ||
57 | * Before then, we use space going down from IOREMAP_TOP | ||
58 | * (ioremap_bot records where we're up to). | ||
59 | */ | ||
60 | p = addr & PAGE_MASK; | ||
61 | size = PAGE_ALIGN(addr + size) - p; | ||
62 | |||
63 | /* | ||
64 | * If the address lies within the first 16 MB, assume it's in ISA | ||
65 | * memory space | ||
66 | */ | ||
67 | if (p < 16*1024*1024) | ||
68 | p += _ISA_MEM_BASE; | ||
69 | |||
70 | #ifndef CONFIG_CRASH_DUMP | ||
71 | /* | ||
72 | * Don't allow anybody to remap normal RAM that we're using. | ||
73 | * mem_init() sets high_memory so only do the check after that. | ||
74 | */ | ||
75 | if (slab_is_available() && p <= virt_to_phys(high_memory - 1) && | ||
76 | page_is_ram(__phys_to_pfn(p))) { | ||
77 | pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__, | ||
78 | (unsigned long long)p, __builtin_return_address(0)); | ||
79 | return NULL; | ||
80 | } | ||
81 | #endif | ||
82 | |||
83 | if (size == 0) | ||
84 | return NULL; | ||
85 | |||
86 | /* | ||
87 | * Is it already mapped? Perhaps overlapped by a previous | ||
88 | * mapping. | ||
89 | */ | ||
90 | v = p_block_mapped(p); | ||
91 | if (v) | ||
92 | goto out; | ||
93 | |||
94 | if (slab_is_available()) { | ||
95 | struct vm_struct *area; | ||
96 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | ||
97 | if (area == 0) | ||
98 | return NULL; | ||
99 | area->phys_addr = p; | ||
100 | v = (unsigned long) area->addr; | ||
101 | } else { | ||
102 | v = (ioremap_bot -= size); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Should check if it is a candidate for a BAT mapping | ||
107 | */ | ||
108 | |||
109 | err = 0; | ||
110 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | ||
111 | err = map_kernel_page(v + i, p + i, prot); | ||
112 | if (err) { | ||
113 | if (slab_is_available()) | ||
114 | vunmap((void *)v); | ||
115 | return NULL; | ||
116 | } | ||
117 | |||
118 | out: | ||
119 | return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); | ||
120 | } | ||
121 | |||
122 | void iounmap(volatile void __iomem *addr) | ||
123 | { | ||
124 | /* | ||
125 | * If mapped by BATs then there is nothing to do. | ||
126 | * Calling vfree() generates a benign warning. | ||
127 | */ | ||
128 | if (v_block_mapped((unsigned long)addr)) | ||
129 | return; | ||
130 | |||
131 | if (addr > high_memory && (unsigned long) addr < ioremap_bot) | ||
132 | vunmap((void *) (PAGE_MASK & (unsigned long)addr)); | ||
133 | } | ||
134 | EXPORT_SYMBOL(iounmap); | ||
135 | |||
136 | static void __init *early_alloc_pgtable(unsigned long size) | 37 | static void __init *early_alloc_pgtable(unsigned long size) |
137 | { | 38 | { |
138 | void *ptr = memblock_alloc(size, size); | 39 | void *ptr = memblock_alloc(size, size); |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index d865e053052d..e78832dce7bb 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -1,6 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | 2 | /* |
3 | * This file contains ioremap and related functions for 64-bit machines. | 3 | * This file contains pgtable related functions for 64-bit machines. |
4 | * | 4 | * |
5 | * Derived from arch/ppc64/mm/init.c | 5 | * Derived from arch/ppc64/mm/init.c |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
@@ -34,7 +34,6 @@ | |||
34 | #include <asm/pgalloc.h> | 34 | #include <asm/pgalloc.h> |
35 | #include <asm/page.h> | 35 | #include <asm/page.h> |
36 | #include <asm/prom.h> | 36 | #include <asm/prom.h> |
37 | #include <asm/io.h> | ||
38 | #include <asm/mmu_context.h> | 37 | #include <asm/mmu_context.h> |
39 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
40 | #include <asm/mmu.h> | 39 | #include <asm/mmu.h> |
@@ -100,131 +99,6 @@ unsigned long __pte_frag_size_shift; | |||
100 | EXPORT_SYMBOL(__pte_frag_size_shift); | 99 | EXPORT_SYMBOL(__pte_frag_size_shift); |
101 | #endif | 100 | #endif |
102 | 101 | ||
103 | int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid) | ||
104 | { | ||
105 | unsigned long i; | ||
106 | |||
107 | for (i = 0; i < size; i += PAGE_SIZE) { | ||
108 | int err = map_kernel_page(ea + i, pa + i, prot); | ||
109 | if (err) { | ||
110 | if (slab_is_available()) | ||
111 | unmap_kernel_range(ea, size); | ||
112 | else | ||
113 | WARN_ON_ONCE(1); /* Should clean up */ | ||
114 | return err; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * __ioremap_at - Low level function to establish the page tables | ||
123 | * for an IO mapping | ||
124 | */ | ||
125 | void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) | ||
126 | { | ||
127 | /* We don't support the 4K PFN hack with ioremap */ | ||
128 | if (pgprot_val(prot) & H_PAGE_4K_PFN) | ||
129 | return NULL; | ||
130 | |||
131 | if ((ea + size) >= (void *)IOREMAP_END) { | ||
132 | pr_warn("Outside the supported range\n"); | ||
133 | return NULL; | ||
134 | } | ||
135 | |||
136 | WARN_ON(pa & ~PAGE_MASK); | ||
137 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | ||
138 | WARN_ON(size & ~PAGE_MASK); | ||
139 | |||
140 | if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE)) | ||
141 | return NULL; | ||
142 | |||
143 | return (void __iomem *)ea; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * __iounmap_from - Low level function to tear down the page tables | ||
148 | * for an IO mapping. This is used for mappings that | ||
149 | * are manipulated manually, like partial unmapping of | ||
150 | * PCI IOs or ISA space. | ||
151 | */ | ||
152 | void __iounmap_at(void *ea, unsigned long size) | ||
153 | { | ||
154 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | ||
155 | WARN_ON(size & ~PAGE_MASK); | ||
156 | |||
157 | unmap_kernel_range((unsigned long)ea, size); | ||
158 | } | ||
159 | |||
160 | void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, | ||
161 | pgprot_t prot, void *caller) | ||
162 | { | ||
163 | phys_addr_t paligned; | ||
164 | void __iomem *ret; | ||
165 | |||
166 | /* | ||
167 | * Choose an address to map it to. | ||
168 | * Once the imalloc system is running, we use it. | ||
169 | * Before that, we map using addresses going | ||
170 | * up from ioremap_bot. imalloc will use | ||
171 | * the addresses from ioremap_bot through | ||
172 | * IMALLOC_END | ||
173 | * | ||
174 | */ | ||
175 | paligned = addr & PAGE_MASK; | ||
176 | size = PAGE_ALIGN(addr + size) - paligned; | ||
177 | |||
178 | if ((size == 0) || (paligned == 0)) | ||
179 | return NULL; | ||
180 | |||
181 | if (slab_is_available()) { | ||
182 | struct vm_struct *area; | ||
183 | |||
184 | area = __get_vm_area_caller(size, VM_IOREMAP, | ||
185 | ioremap_bot, IOREMAP_END, | ||
186 | caller); | ||
187 | if (area == NULL) | ||
188 | return NULL; | ||
189 | |||
190 | area->phys_addr = paligned; | ||
191 | ret = __ioremap_at(paligned, area->addr, size, prot); | ||
192 | } else { | ||
193 | ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); | ||
194 | if (ret) | ||
195 | ioremap_bot += size; | ||
196 | } | ||
197 | |||
198 | if (ret) | ||
199 | ret += addr & ~PAGE_MASK; | ||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Unmap an IO region and remove it from imalloc'd list. | ||
205 | * Access to IO memory should be serialized by driver. | ||
206 | */ | ||
207 | void iounmap(volatile void __iomem *token) | ||
208 | { | ||
209 | void *addr; | ||
210 | |||
211 | if (!slab_is_available()) | ||
212 | return; | ||
213 | |||
214 | addr = (void *) ((unsigned long __force) | ||
215 | PCI_FIX_ADDR(token) & PAGE_MASK); | ||
216 | if ((unsigned long)addr < ioremap_bot) { | ||
217 | printk(KERN_WARNING "Attempt to iounmap early bolted mapping" | ||
218 | " at 0x%p\n", addr); | ||
219 | return; | ||
220 | } | ||
221 | vunmap(addr); | ||
222 | } | ||
223 | |||
224 | EXPORT_SYMBOL(__ioremap_at); | ||
225 | EXPORT_SYMBOL(iounmap); | ||
226 | EXPORT_SYMBOL(__iounmap_at); | ||
227 | |||
228 | #ifndef __PAGETABLE_PUD_FOLDED | 102 | #ifndef __PAGETABLE_PUD_FOLDED |
229 | /* 4 level page table */ | 103 | /* 4 level page table */ |
230 | struct page *pgd_page(pgd_t pgd) | 104 | struct page *pgd_page(pgd_t pgd) |