diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-11-15 07:19:53 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-17 01:53:21 -0500 |
commit | 8fd524b355daef0945692227e726fb444cebcd4f (patch) | |
tree | d9c067e6e2693d46b70010a4ed2910205a59c92d /arch/x86/kernel | |
parent | 42109197eb7c01080eea6d9cd48ca23cbc3c566c (diff) |
x86: Kill bad_dma_address variable
This kills bad_dma_address variable, the old mechanism to enable
IOMMU drivers to make dma_mapping_error() work in IOMMU's
specific way.
bad_dma_address variable was introduced to enable IOMMU drivers
to make dma_mapping_error() work in IOMMU's specific way.
However, it can't handle systems that use both swiotlb and HW
IOMMU. SO we introduced dma_map_ops->mapping_error to solve that
case.
Intel VT-d, GART, and swiotlb already use
dma_map_ops->mapping_error. Calgary, AMD IOMMU, and nommu use
zero for an error dma address. This adds DMA_ERROR_CODE and
converts them to use it (as SPARC and POWER does).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: muli@il.ibm.com
Cc: joerg.roedel@amd.com
LKML-Reference: <1258287594-8777-3-git-send-email-fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/pci-calgary_64.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/pci-nommu.c | 2 |
4 files changed, 21 insertions, 27 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 66237fde758f..093bd526c949 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, | |||
928 | } | 928 | } |
929 | 929 | ||
930 | if (unlikely(address == -1)) | 930 | if (unlikely(address == -1)) |
931 | address = bad_dma_address; | 931 | address = DMA_ERROR_CODE; |
932 | 932 | ||
933 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); | 933 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); |
934 | 934 | ||
@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | |||
1544 | 1544 | ||
1545 | pte = dma_ops_get_pte(dom, address); | 1545 | pte = dma_ops_get_pte(dom, address); |
1546 | if (!pte) | 1546 | if (!pte) |
1547 | return bad_dma_address; | 1547 | return DMA_ERROR_CODE; |
1548 | 1548 | ||
1549 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | 1549 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; |
1550 | 1550 | ||
@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
1625 | retry: | 1625 | retry: |
1626 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, | 1626 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, |
1627 | dma_mask); | 1627 | dma_mask); |
1628 | if (unlikely(address == bad_dma_address)) { | 1628 | if (unlikely(address == DMA_ERROR_CODE)) { |
1629 | /* | 1629 | /* |
1630 | * setting next_address here will let the address | 1630 | * setting next_address here will let the address |
1631 | * allocator only scan the new allocated range in the | 1631 | * allocator only scan the new allocated range in the |
@@ -1646,7 +1646,7 @@ retry: | |||
1646 | start = address; | 1646 | start = address; |
1647 | for (i = 0; i < pages; ++i) { | 1647 | for (i = 0; i < pages; ++i) { |
1648 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | 1648 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); |
1649 | if (ret == bad_dma_address) | 1649 | if (ret == DMA_ERROR_CODE) |
1650 | goto out_unmap; | 1650 | goto out_unmap; |
1651 | 1651 | ||
1652 | paddr += PAGE_SIZE; | 1652 | paddr += PAGE_SIZE; |
@@ -1674,7 +1674,7 @@ out_unmap: | |||
1674 | 1674 | ||
1675 | dma_ops_free_addresses(dma_dom, address, pages); | 1675 | dma_ops_free_addresses(dma_dom, address, pages); |
1676 | 1676 | ||
1677 | return bad_dma_address; | 1677 | return DMA_ERROR_CODE; |
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | /* | 1680 | /* |
@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1690 | dma_addr_t i, start; | 1690 | dma_addr_t i, start; |
1691 | unsigned int pages; | 1691 | unsigned int pages; |
1692 | 1692 | ||
1693 | if ((dma_addr == bad_dma_address) || | 1693 | if ((dma_addr == DMA_ERROR_CODE) || |
1694 | (dma_addr + size > dma_dom->aperture_size)) | 1694 | (dma_addr + size > dma_dom->aperture_size)) |
1695 | return; | 1695 | return; |
1696 | 1696 | ||
@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1732 | INC_STATS_COUNTER(cnt_map_single); | 1732 | INC_STATS_COUNTER(cnt_map_single); |
1733 | 1733 | ||
1734 | if (!check_device(dev)) | 1734 | if (!check_device(dev)) |
1735 | return bad_dma_address; | 1735 | return DMA_ERROR_CODE; |
1736 | 1736 | ||
1737 | dma_mask = *dev->dma_mask; | 1737 | dma_mask = *dev->dma_mask; |
1738 | 1738 | ||
@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1743 | return (dma_addr_t)paddr; | 1743 | return (dma_addr_t)paddr; |
1744 | 1744 | ||
1745 | if (!dma_ops_domain(domain)) | 1745 | if (!dma_ops_domain(domain)) |
1746 | return bad_dma_address; | 1746 | return DMA_ERROR_CODE; |
1747 | 1747 | ||
1748 | spin_lock_irqsave(&domain->lock, flags); | 1748 | spin_lock_irqsave(&domain->lock, flags); |
1749 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, | 1749 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, |
1750 | dma_mask); | 1750 | dma_mask); |
1751 | if (addr == bad_dma_address) | 1751 | if (addr == DMA_ERROR_CODE) |
1752 | goto out; | 1752 | goto out; |
1753 | 1753 | ||
1754 | iommu_completion_wait(iommu); | 1754 | iommu_completion_wait(iommu); |
@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1957 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 1957 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, |
1958 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 1958 | size, DMA_BIDIRECTIONAL, true, dma_mask); |
1959 | 1959 | ||
1960 | if (*dma_addr == bad_dma_address) { | 1960 | if (*dma_addr == DMA_ERROR_CODE) { |
1961 | spin_unlock_irqrestore(&domain->lock, flags); | 1961 | spin_unlock_irqrestore(&domain->lock, flags); |
1962 | goto out_free; | 1962 | goto out_free; |
1963 | } | 1963 | } |
@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
2110 | prealloc_protection_domains(); | 2110 | prealloc_protection_domains(); |
2111 | 2111 | ||
2112 | iommu_detected = 1; | 2112 | iommu_detected = 1; |
2113 | bad_dma_address = 0; | ||
2114 | swiotlb = 0; | 2113 | swiotlb = 0; |
2115 | #ifdef CONFIG_GART_IOMMU | 2114 | #ifdef CONFIG_GART_IOMMU |
2116 | gart_iommu_aperture_disabled = 1; | 2115 | gart_iommu_aperture_disabled = 1; |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index c84ad037f586..af9f436096a2 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -245,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev, | |||
245 | if (panic_on_overflow) | 245 | if (panic_on_overflow) |
246 | panic("Calgary: fix the allocator.\n"); | 246 | panic("Calgary: fix the allocator.\n"); |
247 | else | 247 | else |
248 | return bad_dma_address; | 248 | return DMA_ERROR_CODE; |
249 | } | 249 | } |
250 | } | 250 | } |
251 | 251 | ||
@@ -261,11 +261,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
261 | void *vaddr, unsigned int npages, int direction) | 261 | void *vaddr, unsigned int npages, int direction) |
262 | { | 262 | { |
263 | unsigned long entry; | 263 | unsigned long entry; |
264 | dma_addr_t ret = bad_dma_address; | 264 | dma_addr_t ret = DMA_ERROR_CODE; |
265 | 265 | ||
266 | entry = iommu_range_alloc(dev, tbl, npages); | 266 | entry = iommu_range_alloc(dev, tbl, npages); |
267 | 267 | ||
268 | if (unlikely(entry == bad_dma_address)) | 268 | if (unlikely(entry == DMA_ERROR_CODE)) |
269 | goto error; | 269 | goto error; |
270 | 270 | ||
271 | /* set the return dma address */ | 271 | /* set the return dma address */ |
@@ -280,7 +280,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
280 | error: | 280 | error: |
281 | printk(KERN_WARNING "Calgary: failed to allocate %u pages in " | 281 | printk(KERN_WARNING "Calgary: failed to allocate %u pages in " |
282 | "iommu %p\n", npages, tbl); | 282 | "iommu %p\n", npages, tbl); |
283 | return bad_dma_address; | 283 | return DMA_ERROR_CODE; |
284 | } | 284 | } |
285 | 285 | ||
286 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | 286 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
@@ -291,8 +291,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
291 | unsigned long flags; | 291 | unsigned long flags; |
292 | 292 | ||
293 | /* were we called with bad_dma_address? */ | 293 | /* were we called with bad_dma_address? */ |
294 | badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); | 294 | badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE); |
295 | if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { | 295 | if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) { |
296 | WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " | 296 | WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " |
297 | "address 0x%Lx\n", dma_addr); | 297 | "address 0x%Lx\n", dma_addr); |
298 | return; | 298 | return; |
@@ -374,7 +374,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
374 | npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); | 374 | npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); |
375 | 375 | ||
376 | entry = iommu_range_alloc(dev, tbl, npages); | 376 | entry = iommu_range_alloc(dev, tbl, npages); |
377 | if (entry == bad_dma_address) { | 377 | if (entry == DMA_ERROR_CODE) { |
378 | /* makes sure unmap knows to stop */ | 378 | /* makes sure unmap knows to stop */ |
379 | s->dma_length = 0; | 379 | s->dma_length = 0; |
380 | goto error; | 380 | goto error; |
@@ -392,7 +392,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
392 | error: | 392 | error: |
393 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); | 393 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); |
394 | for_each_sg(sg, s, nelems, i) { | 394 | for_each_sg(sg, s, nelems, i) { |
395 | sg->dma_address = bad_dma_address; | 395 | sg->dma_address = DMA_ERROR_CODE; |
396 | sg->dma_length = 0; | 396 | sg->dma_length = 0; |
397 | } | 397 | } |
398 | return 0; | 398 | return 0; |
@@ -447,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, | |||
447 | 447 | ||
448 | /* set up tces to cover the allocated range */ | 448 | /* set up tces to cover the allocated range */ |
449 | mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); | 449 | mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); |
450 | if (mapping == bad_dma_address) | 450 | if (mapping == DMA_ERROR_CODE) |
451 | goto free; | 451 | goto free; |
452 | *dma_handle = mapping; | 452 | *dma_handle = mapping; |
453 | return ret; | 453 | return ret; |
@@ -728,7 +728,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev) | |||
728 | struct iommu_table *tbl = pci_iommu(dev->bus); | 728 | struct iommu_table *tbl = pci_iommu(dev->bus); |
729 | 729 | ||
730 | /* reserve EMERGENCY_PAGES from bad_dma_address and up */ | 730 | /* reserve EMERGENCY_PAGES from bad_dma_address and up */ |
731 | iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); | 731 | iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES); |
732 | 732 | ||
733 | /* avoid the BIOS/VGA first 640KB-1MB region */ | 733 | /* avoid the BIOS/VGA first 640KB-1MB region */ |
734 | /* for CalIOC2 - avoid the entire first MB */ | 734 | /* for CalIOC2 - avoid the entire first MB */ |
@@ -1359,8 +1359,6 @@ static int __init calgary_iommu_init(void) | |||
1359 | return ret; | 1359 | return ret; |
1360 | } | 1360 | } |
1361 | 1361 | ||
1362 | bad_dma_address = 0x0; | ||
1363 | |||
1364 | return 0; | 1362 | return 0; |
1365 | } | 1363 | } |
1366 | 1364 | ||
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index bf621b9ee26e..afcc58b69c7c 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -43,9 +43,6 @@ int iommu_detected __read_mostly = 0; | |||
43 | */ | 43 | */ |
44 | int iommu_pass_through __read_mostly; | 44 | int iommu_pass_through __read_mostly; |
45 | 45 | ||
46 | dma_addr_t bad_dma_address __read_mostly = 0; | ||
47 | EXPORT_SYMBOL(bad_dma_address); | ||
48 | |||
49 | /* Dummy device used for NULL arguments (normally ISA). */ | 46 | /* Dummy device used for NULL arguments (normally ISA). */ |
50 | struct device x86_dma_fallback_dev = { | 47 | struct device x86_dma_fallback_dev = { |
51 | .init_name = "fallback device", | 48 | .init_name = "fallback device", |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 875e3822ae61..22be12b60a8f 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
33 | dma_addr_t bus = page_to_phys(page) + offset; | 33 | dma_addr_t bus = page_to_phys(page) + offset; |
34 | WARN_ON(size == 0); | 34 | WARN_ON(size == 0); |
35 | if (!check_addr("map_single", dev, bus, size)) | 35 | if (!check_addr("map_single", dev, bus, size)) |
36 | return bad_dma_address; | 36 | return DMA_ERROR_CODE; |
37 | flush_write_buffers(); | 37 | flush_write_buffers(); |
38 | return bus; | 38 | return bus; |
39 | } | 39 | } |