aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 0c37f16b6950..dc00a1331ace 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -378,6 +378,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
378 struct page *page; 378 struct page *page;
379 unsigned long dma_mask = 0; 379 unsigned long dma_mask = 0;
380 dma_addr_t bus; 380 dma_addr_t bus;
381 int noretry = 0;
381 382
382 /* ignore region specifiers */ 383 /* ignore region specifiers */
383 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 384 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -385,30 +386,37 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
385 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 386 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
386 return memory; 387 return memory;
387 388
388 if (!dev) 389 if (!dev) {
389 dev = &fallback_dev; 390 dev = &fallback_dev;
391 gfp |= GFP_DMA;
392 }
390 dma_mask = dev->coherent_dma_mask; 393 dma_mask = dev->coherent_dma_mask;
391 if (dma_mask == 0) 394 if (dma_mask == 0)
392 dma_mask = DMA_32BIT_MASK; 395 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
393 396
394 /* Device not DMA able */ 397 /* Device not DMA able */
395 if (dev->dma_mask == NULL) 398 if (dev->dma_mask == NULL)
396 return NULL; 399 return NULL;
397 400
398 /* Don't invoke OOM killer */ 401 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
399 gfp |= __GFP_NORETRY; 402 if (gfp & __GFP_DMA)
403 noretry = 1;
400 404
401#ifdef CONFIG_X86_64 405#ifdef CONFIG_X86_64
402 /* Why <=? Even when the mask is smaller than 4GB it is often 406 /* Why <=? Even when the mask is smaller than 4GB it is often
403 larger than 16MB and in this case we have a chance of 407 larger than 16MB and in this case we have a chance of
404 finding fitting memory in the next higher zone first. If 408 finding fitting memory in the next higher zone first. If
405 not retry with true GFP_DMA. -AK */ 409 not retry with true GFP_DMA. -AK */
406 if (dma_mask <= DMA_32BIT_MASK) 410 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
407 gfp |= GFP_DMA32; 411 gfp |= GFP_DMA32;
412 if (dma_mask < DMA_32BIT_MASK)
413 noretry = 1;
414 }
408#endif 415#endif
409 416
410 again: 417 again:
411 page = dma_alloc_pages(dev, gfp, get_order(size)); 418 page = dma_alloc_pages(dev,
419 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
412 if (page == NULL) 420 if (page == NULL)
413 return NULL; 421 return NULL;
414 422