aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/ioremap.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-17 12:27:37 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-02-17 12:28:05 -0500
commitb7e56edba4b02f2079042c326a8cd72a44635817 (patch)
treeb5042002e9747cd8fb1278d61f86d8b92a74c018 /arch/x86/mm/ioremap.c
parent13ca0fcaa33f6b1984c4111b6ec5df42689fea6f (diff)
parentb0483e78e5c4c9871fc5541875b3bc006846d46b (diff)
Merge branch 'linus' into x86/mm
x86/mm is on 32-rc4 and missing the spinlock namespace changes which are needed for further commits into this topic. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/ioremap.c')
-rw-r--r--arch/x86/mm/ioremap.c50
1 files changed, 12 insertions, 38 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 1bf9e08ed733..e404ffe30210 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -133,8 +133,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
133 (unsigned long long)phys_addr, 133 (unsigned long long)phys_addr,
134 (unsigned long long)(phys_addr + size), 134 (unsigned long long)(phys_addr + size),
135 prot_val, new_prot_val); 135 prot_val, new_prot_val);
136 free_memtype(phys_addr, phys_addr + size); 136 goto err_free_memtype;
137 return NULL;
138 } 137 }
139 prot_val = new_prot_val; 138 prot_val = new_prot_val;
140 } 139 }
@@ -160,26 +159,25 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
160 */ 159 */
161 area = get_vm_area_caller(size, VM_IOREMAP, caller); 160 area = get_vm_area_caller(size, VM_IOREMAP, caller);
162 if (!area) 161 if (!area)
163 return NULL; 162 goto err_free_memtype;
164 area->phys_addr = phys_addr; 163 area->phys_addr = phys_addr;
165 vaddr = (unsigned long) area->addr; 164 vaddr = (unsigned long) area->addr;
166 165
167 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) { 166 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
168 free_memtype(phys_addr, phys_addr + size); 167 goto err_free_area;
169 free_vm_area(area);
170 return NULL;
171 }
172 168
173 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { 169 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
174 free_memtype(phys_addr, phys_addr + size); 170 goto err_free_area;
175 free_vm_area(area);
176 return NULL;
177 }
178 171
179 ret_addr = (void __iomem *) (vaddr + offset); 172 ret_addr = (void __iomem *) (vaddr + offset);
180 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); 173 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
181 174
182 return ret_addr; 175 return ret_addr;
176err_free_area:
177 free_vm_area(area);
178err_free_memtype:
179 free_memtype(phys_addr, phys_addr + size);
180 return NULL;
183} 181}
184 182
185/** 183/**
@@ -246,30 +244,6 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
246} 244}
247EXPORT_SYMBOL(ioremap_cache); 245EXPORT_SYMBOL(ioremap_cache);
248 246
249static void __iomem *ioremap_default(resource_size_t phys_addr,
250 unsigned long size)
251{
252 unsigned long flags;
253 void __iomem *ret;
254 int err;
255
256 /*
257 * - WB for WB-able memory and no other conflicting mappings
258 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
259 * - Inherit from confliting mappings otherwise
260 */
261 err = reserve_memtype(phys_addr, phys_addr + size,
262 _PAGE_CACHE_WB, &flags);
263 if (err < 0)
264 return NULL;
265
266 ret = __ioremap_caller(phys_addr, size, flags,
267 __builtin_return_address(0));
268
269 free_memtype(phys_addr, phys_addr + size);
270 return ret;
271}
272
273void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 247void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
274 unsigned long prot_val) 248 unsigned long prot_val)
275{ 249{
@@ -345,7 +319,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
345 if (page_is_ram(start >> PAGE_SHIFT)) 319 if (page_is_ram(start >> PAGE_SHIFT))
346 return __va(phys); 320 return __va(phys);
347 321
348 addr = (void __force *)ioremap_default(start, PAGE_SIZE); 322 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
349 if (addr) 323 if (addr)
350 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); 324 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
351 325