aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
authorMiquel van Smoorenburg <mikevs@xs4all.net>2008-06-05 12:14:44 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-10 06:22:18 -0400
commitb7f09ae583c49d28b2796d2fa5893dcf822e3a10 (patch)
tree149ba4d9df0eec4c10689d51b33d1e200a512328 /arch/x86/kernel/pci-dma.c
parentf529626a86d61897862aa1bbbb4537773209238e (diff)
x86, pci-dma.c: don't always add __GFP_NORETRY to gfp
Currently arch/x86/kernel/pci-dma.c always adds __GFP_NORETRY to the allocation flags, because it wants to be reasonably sure not to deadlock when calling alloc_pages(). But really that should only be done in two cases: - when allocating memory in the lower 16 MB DMA zone. If there's no free memory there, waiting or OOM killing is of no use - when optimistically trying an allocation in the DMA32 zone when dma_mask < DMA_32BIT_MASK hoping that the allocation happens to fall within the limits of the dma_mask Also blindly adding __GFP_NORETRY to the the gfp variable might not be a good idea since we then also use it when calling dma_ops->alloc_coherent(). Clearing it might also not be a good idea, dma_alloc_coherent()'s caller might have set it on purpose. The gfp variable should not be clobbered. [ mingo@elte.hu: converted to delta patch ontop of previous version. ] Signed-off-by: Miquel van Smoorenburg <miquels@cistron.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 069e843f0b93..dc00a1331ace 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -378,6 +378,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
378 struct page *page; 378 struct page *page;
379 unsigned long dma_mask = 0; 379 unsigned long dma_mask = 0;
380 dma_addr_t bus; 380 dma_addr_t bus;
381 int noretry = 0;
381 382
382 /* ignore region specifiers */ 383 /* ignore region specifiers */
383 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 384 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -397,19 +398,25 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
397 if (dev->dma_mask == NULL) 398 if (dev->dma_mask == NULL)
398 return NULL; 399 return NULL;
399 400
401 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
402 if (gfp & __GFP_DMA)
403 noretry = 1;
404
400#ifdef CONFIG_X86_64 405#ifdef CONFIG_X86_64
401 /* Why <=? Even when the mask is smaller than 4GB it is often 406 /* Why <=? Even when the mask is smaller than 4GB it is often
402 larger than 16MB and in this case we have a chance of 407 larger than 16MB and in this case we have a chance of
403 finding fitting memory in the next higher zone first. If 408 finding fitting memory in the next higher zone first. If
404 not retry with true GFP_DMA. -AK */ 409 not retry with true GFP_DMA. -AK */
405 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 410 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
406 gfp |= GFP_DMA32; 411 gfp |= GFP_DMA32;
412 if (dma_mask < DMA_32BIT_MASK)
413 noretry = 1;
414 }
407#endif 415#endif
408 416
409 again: 417 again:
410 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
411 page = dma_alloc_pages(dev, 418 page = dma_alloc_pages(dev,
412 (gfp & GFP_DMA) ? gfp | __GFP_NORETRY : gfp, get_order(size)); 419 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
413 if (page == NULL) 420 if (page == NULL)
414 return NULL; 421 return NULL;
415 422