aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-08-19 10:32:44 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-22 02:34:50 -0400
commitc647c3bb2d16246a87f49035985ddb7c1eb030df (patch)
tree881479b30af77332bbc24e5340a81c02b0aad04f /arch/x86/kernel/pci-dma.c
parenta3a76532e0caa093c279806d8fe8608232538af0 (diff)
x86: cleanup dma_*_coherent functions
All dma_ops implementations support the alloc_coherent and free_coherent callbacks now. This allows a big simplification of the dma_alloc_coherent function which is done with this patch. The dma_free_coherent functions is also cleaned up and calls now the free_coherent callback of the dma_ops implementation. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c121
1 files changed, 12 insertions, 109 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 87d4d6964ec2..613332b26e31 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -241,33 +241,15 @@ int dma_supported(struct device *dev, u64 mask)
241} 241}
242EXPORT_SYMBOL(dma_supported); 242EXPORT_SYMBOL(dma_supported);
243 243
244/* Allocate DMA memory on node near device */
245static noinline struct page *
246dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
247{
248 int node;
249
250 node = dev_to_node(dev);
251
252 return alloc_pages_node(node, gfp, order);
253}
254
255/* 244/*
256 * Allocate memory for a coherent mapping. 245 * Allocate memory for a coherent mapping.
257 */ 246 */
258void * 247 void *
259dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 248dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
260 gfp_t gfp) 249 gfp_t gfp)
261{ 250{
262 struct dma_mapping_ops *ops = get_dma_ops(dev); 251 struct dma_mapping_ops *ops = get_dma_ops(dev);
263 void *memory = NULL; 252 void *memory;
264 struct page *page;
265 unsigned long dma_mask = 0;
266 dma_addr_t bus;
267 int noretry = 0;
268
269 /* ignore region specifiers */
270 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
271 253
272 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) 254 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
273 return memory; 255 return memory;
@@ -276,89 +258,10 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
276 dev = &fallback_dev; 258 dev = &fallback_dev;
277 gfp |= GFP_DMA; 259 gfp |= GFP_DMA;
278 } 260 }
279 dma_mask = dev->coherent_dma_mask;
280 if (dma_mask == 0)
281 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
282
283 /* Device not DMA able */
284 if (dev->dma_mask == NULL)
285 return NULL;
286
287 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
288 if (gfp & __GFP_DMA)
289 noretry = 1;
290
291#ifdef CONFIG_X86_64
292 /* Why <=? Even when the mask is smaller than 4GB it is often
293 larger than 16MB and in this case we have a chance of
294 finding fitting memory in the next higher zone first. If
295 not retry with true GFP_DMA. -AK */
296 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
297 gfp |= GFP_DMA32;
298 if (dma_mask < DMA_32BIT_MASK)
299 noretry = 1;
300 }
301#endif
302 261
303 again: 262 if (ops->alloc_coherent)
304 page = dma_alloc_pages(dev, 263 return ops->alloc_coherent(dev, size,
305 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size)); 264 dma_handle, gfp);
306 if (page == NULL)
307 return NULL;
308
309 {
310 int high, mmu;
311 bus = page_to_phys(page);
312 memory = page_address(page);
313 high = (bus + size) >= dma_mask;
314 mmu = high;
315 if (force_iommu && !(gfp & GFP_DMA))
316 mmu = 1;
317 else if (high) {
318 free_pages((unsigned long)memory,
319 get_order(size));
320
321 /* Don't use the 16MB ZONE_DMA unless absolutely
322 needed. It's better to use remapping first. */
323 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
324 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
325 goto again;
326 }
327
328 /* Let low level make its own zone decisions */
329 gfp &= ~(GFP_DMA32|GFP_DMA);
330
331 if (ops->alloc_coherent)
332 return ops->alloc_coherent(dev, size,
333 dma_handle, gfp);
334 return NULL;
335 }
336
337 memset(memory, 0, size);
338 if (!mmu) {
339 *dma_handle = bus;
340 return memory;
341 }
342 }
343
344 if (ops->alloc_coherent) {
345 free_pages((unsigned long)memory, get_order(size));
346 gfp &= ~(GFP_DMA|GFP_DMA32);
347 return ops->alloc_coherent(dev, size, dma_handle, gfp);
348 }
349
350 if (ops->map_simple) {
351 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
352 size,
353 PCI_DMA_BIDIRECTIONAL);
354 if (*dma_handle != bad_dma_address)
355 return memory;
356 }
357
358 if (panic_on_overflow)
359 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
360 (unsigned long)size);
361 free_pages((unsigned long)memory, get_order(size));
362 return NULL; 265 return NULL;
363} 266}
364EXPORT_SYMBOL(dma_alloc_coherent); 267EXPORT_SYMBOL(dma_alloc_coherent);
@@ -368,17 +271,17 @@ EXPORT_SYMBOL(dma_alloc_coherent);
368 * The caller must ensure that the device has finished accessing the mapping. 271 * The caller must ensure that the device has finished accessing the mapping.
369 */ 272 */
370void dma_free_coherent(struct device *dev, size_t size, 273void dma_free_coherent(struct device *dev, size_t size,
371 void *vaddr, dma_addr_t bus) 274 void *vaddr, dma_addr_t bus)
372{ 275{
373 struct dma_mapping_ops *ops = get_dma_ops(dev); 276 struct dma_mapping_ops *ops = get_dma_ops(dev);
374 277
375 int order = get_order(size); 278 WARN_ON(irqs_disabled()); /* for portability */
376 WARN_ON(irqs_disabled()); /* for portability */ 279
377 if (dma_release_from_coherent(dev, order, vaddr)) 280 if (dma_release_from_coherent(dev, get_order(size), vaddr))
378 return; 281 return;
379 if (ops->unmap_single) 282
380 ops->unmap_single(dev, bus, size, 0); 283 if (ops->free_coherent)
381 free_pages((unsigned long)vaddr, order); 284 ops->free_coherent(dev, size, vaddr, bus);
382} 285}
383EXPORT_SYMBOL(dma_free_coherent); 286EXPORT_SYMBOL(dma_free_coherent);
384 287