aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/ioremap.c
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2013-02-09 00:28:06 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-02-16 12:54:22 -0500
commit101eeda38c0ab8a4f916176e325d9e036d981a24 (patch)
tree874b1f0002c9153e6af28b1ec3dd6aa0e7737086 /arch/arm/mm/ioremap.c
parented8fd2186a4e4f3b98434093b56f9b793d48443e (diff)
ARM: 7646/1: mm: use static_vm for managing static mapped areas
A static mapped area is ARM-specific, so it is better not to use generic vmalloc data structure, that is, vmlist and vmlist_lock for managing static mapped area. And it causes some needless overhead and reducing this overhead is better idea. Now, we have newly introduced static_vm infrastructure. With it, we don't need to iterate all mapped areas. Instead, we just iterate static mapped areas. It helps to reduce an overhead of finding matched area. And architecture dependency on vmalloc layer is removed, so it will help to maintainability for vmalloc layer. Reviewed-by: Nicolas Pitre <nico@linaro.org> Acked-by: Rob Herring <rob.herring@calxeda.com> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r--arch/arm/mm/ioremap.c71
1 files changed, 29 insertions, 42 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 904c15e86063..04d9006eab1f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
261 const struct mem_type *type; 261 const struct mem_type *type;
262 int err; 262 int err;
263 unsigned long addr; 263 unsigned long addr;
264 struct vm_struct * area; 264 struct vm_struct *area;
265 phys_addr_t paddr = __pfn_to_phys(pfn);
265 266
266#ifndef CONFIG_ARM_LPAE 267#ifndef CONFIG_ARM_LPAE
267 /* 268 /*
268 * High mappings must be supersection aligned 269 * High mappings must be supersection aligned
269 */ 270 */
270 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 271 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
271 return NULL; 272 return NULL;
272#endif 273#endif
273 274
@@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
283 /* 284 /*
284 * Try to reuse one of the static mapping whenever possible. 285 * Try to reuse one of the static mapping whenever possible.
285 */ 286 */
286 read_lock(&vmlist_lock); 287 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
287 for (area = vmlist; area; area = area->next) { 288 struct static_vm *svm;
288 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) 289
289 break; 290 svm = find_static_vm_paddr(paddr, size, mtype);
290 if (!(area->flags & VM_ARM_STATIC_MAPPING)) 291 if (svm) {
291 continue; 292 addr = (unsigned long)svm->vm.addr;
292 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 293 addr += paddr - svm->vm.phys_addr;
293 continue; 294 return (void __iomem *) (offset + addr);
294 if (__phys_to_pfn(area->phys_addr) > pfn || 295 }
295 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
296 continue;
297 /* we can drop the lock here as we know *area is static */
298 read_unlock(&vmlist_lock);
299 addr = (unsigned long)area->addr;
300 addr += __pfn_to_phys(pfn) - area->phys_addr;
301 return (void __iomem *) (offset + addr);
302 } 296 }
303 read_unlock(&vmlist_lock);
304 297
305 /* 298 /*
306 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 299 * Don't allow RAM to be mapped - this causes problems with ARMv6+
@@ -312,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
312 if (!area) 305 if (!area)
313 return NULL; 306 return NULL;
314 addr = (unsigned long)area->addr; 307 addr = (unsigned long)area->addr;
315 area->phys_addr = __pfn_to_phys(pfn); 308 area->phys_addr = paddr;
316 309
317#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 310#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
318 if (DOMAIN_IO == 0 && 311 if (DOMAIN_IO == 0 &&
319 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 312 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
320 cpu_is_xsc3()) && pfn >= 0x100000 && 313 cpu_is_xsc3()) && pfn >= 0x100000 &&
321 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 314 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
322 area->flags |= VM_ARM_SECTION_MAPPING; 315 area->flags |= VM_ARM_SECTION_MAPPING;
323 err = remap_area_supersections(addr, pfn, size, type); 316 err = remap_area_supersections(addr, pfn, size, type);
324 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 317 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
325 area->flags |= VM_ARM_SECTION_MAPPING; 318 area->flags |= VM_ARM_SECTION_MAPPING;
326 err = remap_area_sections(addr, pfn, size, type); 319 err = remap_area_sections(addr, pfn, size, type);
327 } else 320 } else
328#endif 321#endif
329 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), 322 err = ioremap_page_range(addr, addr + size, paddr,
330 __pgprot(type->prot_pte)); 323 __pgprot(type->prot_pte));
331 324
332 if (err) { 325 if (err) {
@@ -410,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
410void __iounmap(volatile void __iomem *io_addr) 403void __iounmap(volatile void __iomem *io_addr)
411{ 404{
412 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 405 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
413 struct vm_struct *vm; 406 struct static_vm *svm;
407
408 /* If this is a static mapping, we must leave it alone */
409 svm = find_static_vm_vaddr(addr);
410 if (svm)
411 return;
414 412
415 read_lock(&vmlist_lock);
416 for (vm = vmlist; vm; vm = vm->next) {
417 if (vm->addr > addr)
418 break;
419 if (!(vm->flags & VM_IOREMAP))
420 continue;
421 /* If this is a static mapping we must leave it alone */
422 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
423 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
424 read_unlock(&vmlist_lock);
425 return;
426 }
427#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 413#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
414 {
415 struct vm_struct *vm;
416
417 vm = find_vm_area(addr);
418
428 /* 419 /*
429 * If this is a section based mapping we need to handle it 420 * If this is a section based mapping we need to handle it
430 * specially as the VM subsystem does not know how to handle 421 * specially as the VM subsystem does not know how to handle
431 * such a beast. 422 * such a beast.
432 */ 423 */
433 if ((vm->addr == addr) && 424 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
434 (vm->flags & VM_ARM_SECTION_MAPPING)) {
435 unmap_area_sections((unsigned long)vm->addr, vm->size); 425 unmap_area_sections((unsigned long)vm->addr, vm->size);
436 break;
437 }
438#endif
439 } 426 }
440 read_unlock(&vmlist_lock); 427#endif
441 428
442 vunmap(addr); 429 vunmap(addr);
443} 430}