aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-11-19 10:31:07 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-11-24 12:41:34 -0500
commit7a9a32a9533fa01de911e1d056142ddd27360782 (patch)
tree2fcd22e74333d83a81586101ea622e9b197c13ea /arch/arm/mm/dma-mapping.c
parent13ccf3ad99a45052664f2c1a6c64899f9d778152 (diff)
ARM: dma-mapping: functions to allocate/free a coherent buffer
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c110
1 files changed, 66 insertions, 44 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c54f1acf92c8..dab2d7f04adf 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -63,6 +63,68 @@ static u64 get_coherent_dma_mask(struct device *dev)
63 return mask; 63 return mask;
64} 64}
65 65
66/*
67 * Allocate a DMA buffer for 'dev' of size 'size' using the
68 * specified gfp mask. Note that 'size' must be page aligned.
69 */
70static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
71{
72 unsigned long order = get_order(size);
73 struct page *page, *p, *e;
74 void *ptr;
75 u64 mask = get_coherent_dma_mask(dev);
76
77#ifdef CONFIG_DMA_API_DEBUG
78 u64 limit = (mask + 1) & ~mask;
79 if (limit && size >= limit) {
80 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
81 size, mask);
82 return NULL;
83 }
84#endif
85
86 if (!mask)
87 return NULL;
88
89 if (mask < 0xffffffffULL)
90 gfp |= GFP_DMA;
91
92 page = alloc_pages(gfp, order);
93 if (!page)
94 return NULL;
95
96 /*
97 * Now split the huge page and free the excess pages
98 */
99 split_page(page, order);
100 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
101 __free_page(p);
102
103 /*
104 * Ensure that the allocated pages are zeroed, and that any data
105 * lurking in the kernel direct-mapped region is invalidated.
106 */
107 ptr = page_address(page);
108 memset(ptr, 0, size);
109 dmac_flush_range(ptr, ptr + size);
110 outer_flush_range(__pa(ptr), __pa(ptr) + size);
111
112 return page;
113}
114
115/*
116 * Free a DMA buffer. 'size' must be page aligned.
117 */
118static void __dma_free_buffer(struct page *page, size_t size)
119{
120 struct page *e = page + (size >> PAGE_SHIFT);
121
122 while (page < e) {
123 __free_page(page);
124 page++;
125 }
126}
127
66#ifdef CONFIG_MMU 128#ifdef CONFIG_MMU
67/* 129/*
68 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 130 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
@@ -88,9 +150,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
88{ 150{
89 struct page *page; 151 struct page *page;
90 struct arm_vmregion *c; 152 struct arm_vmregion *c;
91 unsigned long order;
92 u64 mask = get_coherent_dma_mask(dev);
93 u64 limit;
94 153
95 if (!consistent_pte[0]) { 154 if (!consistent_pte[0]) {
96 printk(KERN_ERR "%s: not initialised\n", __func__); 155 printk(KERN_ERR "%s: not initialised\n", __func__);
@@ -98,53 +157,25 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
98 return NULL; 157 return NULL;
99 } 158 }
100 159
101 if (!mask)
102 goto no_page;
103
104 size = PAGE_ALIGN(size); 160 size = PAGE_ALIGN(size);
105 limit = (mask + 1) & ~mask;
106 if (limit && size >= limit) {
107 printk(KERN_WARNING "coherent allocation too big "
108 "(requested %#x mask %#llx)\n", size, mask);
109 goto no_page;
110 }
111
112 order = get_order(size);
113
114 if (mask < 0xffffffffULL)
115 gfp |= GFP_DMA;
116 161
117 page = alloc_pages(gfp, order); 162 page = __dma_alloc_buffer(dev, size, gfp);
118 if (!page) 163 if (!page)
119 goto no_page; 164 goto no_page;
120 165
121 /* 166 /*
122 * Invalidate any data that might be lurking in the
123 * kernel direct-mapped region for device DMA.
124 */
125 {
126 void *ptr = page_address(page);
127 memset(ptr, 0, size);
128 dmac_flush_range(ptr, ptr + size);
129 outer_flush_range(__pa(ptr), __pa(ptr) + size);
130 }
131
132 /*
133 * Allocate a virtual address in the consistent mapping region. 167 * Allocate a virtual address in the consistent mapping region.
134 */ 168 */
135 c = arm_vmregion_alloc(&consistent_head, size, 169 c = arm_vmregion_alloc(&consistent_head, size,
136 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 170 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
137 if (c) { 171 if (c) {
138 pte_t *pte; 172 pte_t *pte;
139 struct page *end = page + (1 << order);
140 int idx = CONSISTENT_PTE_INDEX(c->vm_start); 173 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
141 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 174 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
142 175
143 pte = consistent_pte[idx] + off; 176 pte = consistent_pte[idx] + off;
144 c->vm_pages = page; 177 c->vm_pages = page;
145 178
146 split_page(page, order);
147
148 /* 179 /*
149 * Set the "dma handle" 180 * Set the "dma handle"
150 */ 181 */
@@ -167,19 +198,11 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
167 } 198 }
168 } while (size -= PAGE_SIZE); 199 } while (size -= PAGE_SIZE);
169 200
170 /*
171 * Free the otherwise unused pages.
172 */
173 while (page < end) {
174 __free_page(page);
175 page++;
176 }
177
178 return (void *)c->vm_start; 201 return (void *)c->vm_start;
179 } 202 }
180 203
181 if (page) 204 if (page)
182 __free_pages(page, order); 205 __dma_free_buffer(page, size);
183 no_page: 206 no_page:
184 *handle = ~0; 207 *handle = ~0;
185 return NULL; 208 return NULL;
@@ -357,12 +380,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
357 * x86 does not mark the pages reserved... 380 * x86 does not mark the pages reserved...
358 */ 381 */
359 ClearPageReserved(page); 382 ClearPageReserved(page);
360
361 __free_page(page);
362 continue; 383 continue;
363 } 384 }
364 } 385 }
365
366 printk(KERN_CRIT "%s: bad page in kernel page table\n", 386 printk(KERN_CRIT "%s: bad page in kernel page table\n",
367 __func__); 387 __func__);
368 } while (size -= PAGE_SIZE); 388 } while (size -= PAGE_SIZE);
@@ -370,6 +390,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
370 flush_tlb_kernel_range(c->vm_start, c->vm_end); 390 flush_tlb_kernel_range(c->vm_start, c->vm_end);
371 391
372 arm_vmregion_free(&consistent_head, c); 392 arm_vmregion_free(&consistent_head, c);
393
394 __dma_free_buffer(dma_to_page(dev, handle), size);
373 return; 395 return;
374 396
375 no_area: 397 no_area: