diff options
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 67960017dc8f..310e479309ef 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(consistent_lock); | |||
71 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | 71 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() |
72 | * would have to initialise this each time prior to calling vm_region_alloc(). | 72 | * would have to initialise this each time prior to calling vm_region_alloc(). |
73 | */ | 73 | */ |
74 | struct vm_region { | 74 | struct arm_vm_region { |
75 | struct list_head vm_list; | 75 | struct list_head vm_list; |
76 | unsigned long vm_start; | 76 | unsigned long vm_start; |
77 | unsigned long vm_end; | 77 | unsigned long vm_end; |
@@ -79,20 +79,20 @@ struct vm_region { | |||
79 | int vm_active; | 79 | int vm_active; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static struct vm_region consistent_head = { | 82 | static struct arm_vm_region consistent_head = { |
83 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | 83 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), |
84 | .vm_start = CONSISTENT_BASE, | 84 | .vm_start = CONSISTENT_BASE, |
85 | .vm_end = CONSISTENT_END, | 85 | .vm_end = CONSISTENT_END, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static struct vm_region * | 88 | static struct arm_vm_region * |
89 | vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) | 89 | arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) |
90 | { | 90 | { |
91 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 91 | unsigned long addr = head->vm_start, end = head->vm_end - size; |
92 | unsigned long flags; | 92 | unsigned long flags; |
93 | struct vm_region *c, *new; | 93 | struct arm_vm_region *c, *new; |
94 | 94 | ||
95 | new = kmalloc(sizeof(struct vm_region), gfp); | 95 | new = kmalloc(sizeof(struct arm_vm_region), gfp); |
96 | if (!new) | 96 | if (!new) |
97 | goto out; | 97 | goto out; |
98 | 98 | ||
@@ -127,9 +127,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) | |||
127 | return NULL; | 127 | return NULL; |
128 | } | 128 | } |
129 | 129 | ||
130 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | 130 | static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr) |
131 | { | 131 | { |
132 | struct vm_region *c; | 132 | struct arm_vm_region *c; |
133 | 133 | ||
134 | list_for_each_entry(c, &head->vm_list, vm_list) { | 134 | list_for_each_entry(c, &head->vm_list, vm_list) { |
135 | if (c->vm_active && c->vm_start == addr) | 135 | if (c->vm_active && c->vm_start == addr) |
@@ -149,7 +149,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
149 | pgprot_t prot) | 149 | pgprot_t prot) |
150 | { | 150 | { |
151 | struct page *page; | 151 | struct page *page; |
152 | struct vm_region *c; | 152 | struct arm_vm_region *c; |
153 | unsigned long order; | 153 | unsigned long order; |
154 | u64 mask = ISA_DMA_THRESHOLD, limit; | 154 | u64 mask = ISA_DMA_THRESHOLD, limit; |
155 | 155 | ||
@@ -214,7 +214,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
214 | /* | 214 | /* |
215 | * Allocate a virtual address in the consistent mapping region. | 215 | * Allocate a virtual address in the consistent mapping region. |
216 | */ | 216 | */ |
217 | c = vm_region_alloc(&consistent_head, size, | 217 | c = arm_vm_region_alloc(&consistent_head, size, |
218 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 218 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); |
219 | if (c) { | 219 | if (c) { |
220 | pte_t *pte; | 220 | pte_t *pte; |
@@ -311,13 +311,13 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
311 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 311 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
312 | { | 312 | { |
313 | unsigned long flags, user_size, kern_size; | 313 | unsigned long flags, user_size, kern_size; |
314 | struct vm_region *c; | 314 | struct arm_vm_region *c; |
315 | int ret = -ENXIO; | 315 | int ret = -ENXIO; |
316 | 316 | ||
317 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 317 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
318 | 318 | ||
319 | spin_lock_irqsave(&consistent_lock, flags); | 319 | spin_lock_irqsave(&consistent_lock, flags); |
320 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 320 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); |
321 | spin_unlock_irqrestore(&consistent_lock, flags); | 321 | spin_unlock_irqrestore(&consistent_lock, flags); |
322 | 322 | ||
323 | if (c) { | 323 | if (c) { |
@@ -359,7 +359,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
359 | */ | 359 | */ |
360 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 360 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
361 | { | 361 | { |
362 | struct vm_region *c; | 362 | struct arm_vm_region *c; |
363 | unsigned long flags, addr; | 363 | unsigned long flags, addr; |
364 | pte_t *ptep; | 364 | pte_t *ptep; |
365 | int idx; | 365 | int idx; |
@@ -378,7 +378,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
378 | size = PAGE_ALIGN(size); | 378 | size = PAGE_ALIGN(size); |
379 | 379 | ||
380 | spin_lock_irqsave(&consistent_lock, flags); | 380 | spin_lock_irqsave(&consistent_lock, flags); |
381 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 381 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); |
382 | if (!c) | 382 | if (!c) |
383 | goto no_area; | 383 | goto no_area; |
384 | 384 | ||