diff options
-rw-r--r-- | arch/x86/mm/ioremap.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 2ac09a5822cb..20c01f2b2e11 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
20 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
21 | #include <asm/pgalloc.h> | 21 | #include <asm/pgalloc.h> |
22 | #include <asm/pat.h> | ||
22 | 23 | ||
23 | #ifdef CONFIG_X86_64 | 24 | #ifdef CONFIG_X86_64 |
24 | 25 | ||
@@ -118,6 +119,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
118 | { | 119 | { |
119 | unsigned long pfn, offset, last_addr, vaddr; | 120 | unsigned long pfn, offset, last_addr, vaddr; |
120 | struct vm_struct *area; | 121 | struct vm_struct *area; |
122 | unsigned long new_prot_val; | ||
121 | pgprot_t prot; | 123 | pgprot_t prot; |
122 | 124 | ||
123 | /* Don't allow wraparound or zero size */ | 125 | /* Don't allow wraparound or zero size */ |
@@ -151,6 +153,28 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
151 | WARN_ON_ONCE(is_ram); | 153 | WARN_ON_ONCE(is_ram); |
152 | } | 154 | } |
153 | 155 | ||
156 | /* | ||
157 | * Mappings have to be page-aligned | ||
158 | */ | ||
159 | offset = phys_addr & ~PAGE_MASK; | ||
160 | phys_addr &= PAGE_MASK; | ||
161 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
162 | |||
163 | if (reserve_memtype(phys_addr, phys_addr + size, | ||
164 | prot_val, &new_prot_val)) { | ||
165 | /* | ||
166 | * Do not fallback to certain memory types with certain | ||
167 | * requested type: | ||
168 | * - request is uncached, return cannot be write-back | ||
169 | */ | ||
170 | if ((prot_val == _PAGE_CACHE_UC && | ||
171 | new_prot_val == _PAGE_CACHE_WB)) { | ||
172 | free_memtype(phys_addr, phys_addr + size); | ||
173 | return NULL; | ||
174 | } | ||
175 | prot_val = new_prot_val; | ||
176 | } | ||
177 | |||
154 | switch (prot_val) { | 178 | switch (prot_val) { |
155 | case _PAGE_CACHE_UC: | 179 | case _PAGE_CACHE_UC: |
156 | default: | 180 | default: |
@@ -162,13 +186,6 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
162 | } | 186 | } |
163 | 187 | ||
164 | /* | 188 | /* |
165 | * Mappings have to be page-aligned | ||
166 | */ | ||
167 | offset = phys_addr & ~PAGE_MASK; | ||
168 | phys_addr &= PAGE_MASK; | ||
169 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
170 | |||
171 | /* | ||
172 | * Ok, go for it.. | 189 | * Ok, go for it.. |
173 | */ | 190 | */ |
174 | area = get_vm_area(size, VM_IOREMAP); | 191 | area = get_vm_area(size, VM_IOREMAP); |
@@ -177,11 +194,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
177 | area->phys_addr = phys_addr; | 194 | area->phys_addr = phys_addr; |
178 | vaddr = (unsigned long) area->addr; | 195 | vaddr = (unsigned long) area->addr; |
179 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { | 196 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { |
197 | free_memtype(phys_addr, phys_addr + size); | ||
180 | free_vm_area(area); | 198 | free_vm_area(area); |
181 | return NULL; | 199 | return NULL; |
182 | } | 200 | } |
183 | 201 | ||
184 | if (ioremap_change_attr(vaddr, size, prot_val) < 0) { | 202 | if (ioremap_change_attr(vaddr, size, prot_val) < 0) { |
203 | free_memtype(phys_addr, phys_addr + size); | ||
185 | vunmap(area->addr); | 204 | vunmap(area->addr); |
186 | return NULL; | 205 | return NULL; |
187 | } | 206 | } |
@@ -265,6 +284,8 @@ void iounmap(volatile void __iomem *addr) | |||
265 | return; | 284 | return; |
266 | } | 285 | } |
267 | 286 | ||
287 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); | ||
288 | |||
268 | /* Finally remove it */ | 289 | /* Finally remove it */ |
269 | o = remove_vm_area((void *)addr); | 290 | o = remove_vm_area((void *)addr); |
270 | BUG_ON(p != o || o == NULL); | 291 | BUG_ON(p != o || o == NULL); |