aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 0c37f16b6950..3c43109ba054 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -77,10 +77,14 @@ void __init dma32_reserve_bootmem(void)
77 if (end_pfn <= MAX_DMA32_PFN) 77 if (end_pfn <= MAX_DMA32_PFN)
78 return; 78 return;
79 79
80 /*
81 * check aperture_64.c allocate_aperture() for reason about
82 * using 512M as goal
83 */
80 align = 64ULL<<20; 84 align = 64ULL<<20;
81 size = round_up(dma32_bootmem_size, align); 85 size = round_up(dma32_bootmem_size, align);
82 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
83 __pa(MAX_DMA_ADDRESS)); 87 512ULL<<20);
84 if (dma32_bootmem_ptr) 88 if (dma32_bootmem_ptr)
85 dma32_bootmem_size = size; 89 dma32_bootmem_size = size;
86 else 90 else
@@ -88,7 +92,6 @@ void __init dma32_reserve_bootmem(void)
88} 92}
89static void __init dma32_free_bootmem(void) 93static void __init dma32_free_bootmem(void)
90{ 94{
91 int node;
92 95
93 if (end_pfn <= MAX_DMA32_PFN) 96 if (end_pfn <= MAX_DMA32_PFN)
94 return; 97 return;
@@ -96,9 +99,7 @@ static void __init dma32_free_bootmem(void)
96 if (!dma32_bootmem_ptr) 99 if (!dma32_bootmem_ptr)
97 return; 100 return;
98 101
99 for_each_online_node(node) 102 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
100 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
101 dma32_bootmem_size);
102 103
103 dma32_bootmem_ptr = NULL; 104 dma32_bootmem_ptr = NULL;
104 dma32_bootmem_size = 0; 105 dma32_bootmem_size = 0;
@@ -357,7 +358,7 @@ int dma_supported(struct device *dev, u64 mask)
357EXPORT_SYMBOL(dma_supported); 358EXPORT_SYMBOL(dma_supported);
358 359
359/* Allocate DMA memory on node near device */ 360/* Allocate DMA memory on node near device */
360noinline struct page * 361static noinline struct page *
361dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) 362dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
362{ 363{
363 int node; 364 int node;
@@ -378,6 +379,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
378 struct page *page; 379 struct page *page;
379 unsigned long dma_mask = 0; 380 unsigned long dma_mask = 0;
380 dma_addr_t bus; 381 dma_addr_t bus;
382 int noretry = 0;
381 383
382 /* ignore region specifiers */ 384 /* ignore region specifiers */
383 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 385 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -385,30 +387,37 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
385 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 387 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
386 return memory; 388 return memory;
387 389
388 if (!dev) 390 if (!dev) {
389 dev = &fallback_dev; 391 dev = &fallback_dev;
392 gfp |= GFP_DMA;
393 }
390 dma_mask = dev->coherent_dma_mask; 394 dma_mask = dev->coherent_dma_mask;
391 if (dma_mask == 0) 395 if (dma_mask == 0)
392 dma_mask = DMA_32BIT_MASK; 396 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
393 397
394 /* Device not DMA able */ 398 /* Device not DMA able */
395 if (dev->dma_mask == NULL) 399 if (dev->dma_mask == NULL)
396 return NULL; 400 return NULL;
397 401
398 /* Don't invoke OOM killer */ 402 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
399 gfp |= __GFP_NORETRY; 403 if (gfp & __GFP_DMA)
404 noretry = 1;
400 405
401#ifdef CONFIG_X86_64 406#ifdef CONFIG_X86_64
402 /* Why <=? Even when the mask is smaller than 4GB it is often 407 /* Why <=? Even when the mask is smaller than 4GB it is often
403 larger than 16MB and in this case we have a chance of 408 larger than 16MB and in this case we have a chance of
404 finding fitting memory in the next higher zone first. If 409 finding fitting memory in the next higher zone first. If
405 not retry with true GFP_DMA. -AK */ 410 not retry with true GFP_DMA. -AK */
406 if (dma_mask <= DMA_32BIT_MASK) 411 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
407 gfp |= GFP_DMA32; 412 gfp |= GFP_DMA32;
413 if (dma_mask < DMA_32BIT_MASK)
414 noretry = 1;
415 }
408#endif 416#endif
409 417
410 again: 418 again:
411 page = dma_alloc_pages(dev, gfp, get_order(size)); 419 page = dma_alloc_pages(dev,
420 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
412 if (page == NULL) 421 if (page == NULL)
413 return NULL; 422 return NULL;
414 423