aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c36
1 files changed, 24 insertions, 12 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index c5ef1af8e79d..8467ec2320f1 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -7,6 +7,7 @@
7#include <asm/dma.h> 7#include <asm/dma.h>
8#include <asm/gart.h> 8#include <asm/gart.h>
9#include <asm/calgary.h> 9#include <asm/calgary.h>
10#include <asm/amd_iommu.h>
10 11
11int forbid_dac __read_mostly; 12int forbid_dac __read_mostly;
12EXPORT_SYMBOL(forbid_dac); 13EXPORT_SYMBOL(forbid_dac);
@@ -74,13 +75,17 @@ early_param("dma32_size", parse_dma32_size_opt);
74void __init dma32_reserve_bootmem(void) 75void __init dma32_reserve_bootmem(void)
75{ 76{
76 unsigned long size, align; 77 unsigned long size, align;
77 if (end_pfn <= MAX_DMA32_PFN) 78 if (max_pfn <= MAX_DMA32_PFN)
78 return; 79 return;
79 80
81 /*
82 * check aperture_64.c allocate_aperture() for reason about
83 * using 512M as goal
84 */
80 align = 64ULL<<20; 85 align = 64ULL<<20;
81 size = round_up(dma32_bootmem_size, align); 86 size = round_up(dma32_bootmem_size, align);
82 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 87 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
83 __pa(MAX_DMA_ADDRESS)); 88 512ULL<<20);
84 if (dma32_bootmem_ptr) 89 if (dma32_bootmem_ptr)
85 dma32_bootmem_size = size; 90 dma32_bootmem_size = size;
86 else 91 else
@@ -88,17 +93,14 @@ void __init dma32_reserve_bootmem(void)
88} 93}
89static void __init dma32_free_bootmem(void) 94static void __init dma32_free_bootmem(void)
90{ 95{
91 int node;
92 96
93 if (end_pfn <= MAX_DMA32_PFN) 97 if (max_pfn <= MAX_DMA32_PFN)
94 return; 98 return;
95 99
96 if (!dma32_bootmem_ptr) 100 if (!dma32_bootmem_ptr)
97 return; 101 return;
98 102
99 for_each_online_node(node) 103 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
100 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
101 dma32_bootmem_size);
102 104
103 dma32_bootmem_ptr = NULL; 105 dma32_bootmem_ptr = NULL;
104 dma32_bootmem_size = 0; 106 dma32_bootmem_size = 0;
@@ -122,6 +124,8 @@ void __init pci_iommu_alloc(void)
122 124
123 detect_intel_iommu(); 125 detect_intel_iommu();
124 126
127 amd_iommu_detect();
128
125#ifdef CONFIG_SWIOTLB 129#ifdef CONFIG_SWIOTLB
126 pci_swiotlb_init(); 130 pci_swiotlb_init();
127#endif 131#endif
@@ -357,7 +361,7 @@ int dma_supported(struct device *dev, u64 mask)
357EXPORT_SYMBOL(dma_supported); 361EXPORT_SYMBOL(dma_supported);
358 362
359/* Allocate DMA memory on node near device */ 363/* Allocate DMA memory on node near device */
360noinline struct page * 364static noinline struct page *
361dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) 365dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
362{ 366{
363 int node; 367 int node;
@@ -378,6 +382,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
378 struct page *page; 382 struct page *page;
379 unsigned long dma_mask = 0; 383 unsigned long dma_mask = 0;
380 dma_addr_t bus; 384 dma_addr_t bus;
385 int noretry = 0;
381 386
382 /* ignore region specifiers */ 387 /* ignore region specifiers */
383 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 388 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -397,20 +402,25 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
397 if (dev->dma_mask == NULL) 402 if (dev->dma_mask == NULL)
398 return NULL; 403 return NULL;
399 404
400 /* Don't invoke OOM killer */ 405 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
401 gfp |= __GFP_NORETRY; 406 if (gfp & __GFP_DMA)
407 noretry = 1;
402 408
403#ifdef CONFIG_X86_64 409#ifdef CONFIG_X86_64
404 /* Why <=? Even when the mask is smaller than 4GB it is often 410 /* Why <=? Even when the mask is smaller than 4GB it is often
405 larger than 16MB and in this case we have a chance of 411 larger than 16MB and in this case we have a chance of
406 finding fitting memory in the next higher zone first. If 412 finding fitting memory in the next higher zone first. If
407 not retry with true GFP_DMA. -AK */ 413 not retry with true GFP_DMA. -AK */
408 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 414 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
409 gfp |= GFP_DMA32; 415 gfp |= GFP_DMA32;
416 if (dma_mask < DMA_32BIT_MASK)
417 noretry = 1;
418 }
410#endif 419#endif
411 420
412 again: 421 again:
413 page = dma_alloc_pages(dev, gfp, get_order(size)); 422 page = dma_alloc_pages(dev,
423 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
414 if (page == NULL) 424 if (page == NULL)
415 return NULL; 425 return NULL;
416 426
@@ -496,6 +506,8 @@ static int __init pci_iommu_init(void)
496 506
497 intel_iommu_init(); 507 intel_iommu_init();
498 508
509 amd_iommu_init();
510
499#ifdef CONFIG_GART_IOMMU 511#ifdef CONFIG_GART_IOMMU
500 gart_iommu_init(); 512 gart_iommu_init();
501#endif 513#endif