aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2013-02-09 00:28:05 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-02-16 12:54:22 -0500
commited8fd2186a4e4f3b98434093b56f9b793d48443e (patch)
treea38246555389c59db8711ce60d2983c4269c054f /arch/arm/mm
parent48dc369d21b41f6400d15cdaf9411e2e6fd62323 (diff)
ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area
In current implementation, we used ARM-specific flag, that is, VM_ARM_STATIC_MAPPING, for distinguishing ARM specific static mapped area. The purpose of static mapped area is to re-use static mapped area when entire physical address range of the ioremap request can be covered by this area. This implementation causes needless overhead for some cases. For example, assume that there is only one static mapped area and vmlist has 300 areas. Every time we call ioremap, we check 300 areas for deciding whether it is matched or not. Moreover, even if there is no static mapped area and vmlist has 300 areas, every time we call ioremap, we check 300 areas in now. If we construct a extra list for static mapped area, we can eliminate above mentioned overhead. With a extra list, if there is one static mapped area, we just check only one area and proceed next operation quickly. In fact, it is not a critical problem, because ioremap is not frequently used. But reducing overhead is better idea. Another reason for doing this work is for removing architecture dependency on vmalloc layer. I think that vmlist and vmlist_lock is internal data structure for vmalloc layer. Some codes for debugging and stat inevitably use vmlist and vmlist_lock. But it is preferable that they are used as least as possible in outside of vmalloc.c Now, I introduce an ARM-specific infrastructure for static mapped area. In the following patch, we will use this and resolve above mentioned problem. Reviewed-by: Nicolas Pitre <nico@linaro.org> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/ioremap.c64
-rw-r--r--arch/arm/mm/mm.h12
2 files changed, 76 insertions, 0 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 88fd86cf3d9a..904c15e86063 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -39,6 +39,70 @@
39#include <asm/mach/pci.h> 39#include <asm/mach/pci.h>
40#include "mm.h" 40#include "mm.h"
41 41
42
43LIST_HEAD(static_vmlist);
44
45static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
46 size_t size, unsigned int mtype)
47{
48 struct static_vm *svm;
49 struct vm_struct *vm;
50
51 list_for_each_entry(svm, &static_vmlist, list) {
52 vm = &svm->vm;
53 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
54 continue;
55 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
56 continue;
57
58 if (vm->phys_addr > paddr ||
59 paddr + size - 1 > vm->phys_addr + vm->size - 1)
60 continue;
61
62 return svm;
63 }
64
65 return NULL;
66}
67
68struct static_vm *find_static_vm_vaddr(void *vaddr)
69{
70 struct static_vm *svm;
71 struct vm_struct *vm;
72
73 list_for_each_entry(svm, &static_vmlist, list) {
74 vm = &svm->vm;
75
76 /* static_vmlist is ascending order */
77 if (vm->addr > vaddr)
78 break;
79
80 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
81 return svm;
82 }
83
84 return NULL;
85}
86
87void __init add_static_vm_early(struct static_vm *svm)
88{
89 struct static_vm *curr_svm;
90 struct vm_struct *vm;
91 void *vaddr;
92
93 vm = &svm->vm;
94 vm_area_add_early(vm);
95 vaddr = vm->addr;
96
97 list_for_each_entry(curr_svm, &static_vmlist, list) {
98 vm = &curr_svm->vm;
99
100 if (vm->addr > vaddr)
101 break;
102 }
103 list_add_tail(&svm->list, &curr_svm->list);
104}
105
42int ioremap_page(unsigned long virt, unsigned long phys, 106int ioremap_page(unsigned long virt, unsigned long phys,
43 const struct mem_type *mtype) 107 const struct mem_type *mtype)
44{ 108{
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a8ee92da3544..d5a4e9ad8f0f 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -1,4 +1,6 @@
1#ifdef CONFIG_MMU 1#ifdef CONFIG_MMU
2#include <linux/list.h>
3#include <linux/vmalloc.h>
2 4
3/* the upper-most page table pointer */ 5/* the upper-most page table pointer */
4extern pmd_t *top_pmd; 6extern pmd_t *top_pmd;
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
65/* consistent regions used by dma_alloc_attrs() */ 67/* consistent regions used by dma_alloc_attrs() */
66#define VM_ARM_DMA_CONSISTENT 0x20000000 68#define VM_ARM_DMA_CONSISTENT 0x20000000
67 69
70
71struct static_vm {
72 struct vm_struct vm;
73 struct list_head list;
74};
75
76extern struct list_head static_vmlist;
77extern struct static_vm *find_static_vm_vaddr(void *vaddr);
78extern __init void add_static_vm_early(struct static_vm *svm);
79
68#endif 80#endif
69 81
70#ifdef CONFIG_ZONE_DMA 82#ifdef CONFIG_ZONE_DMA