diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-09-24 07:48:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-25 05:02:26 -0400 |
commit | 1d990882153f36723f9e8717c4401689e64c7a36 (patch) | |
tree | e0e09333cf42756d90bf6e8a0e1ae588a852babe /arch/x86/kernel/pci-gart_64.c | |
parent | ecef533ea68b2fb3baaf459beb2f802a240bdb16 (diff) |
x86: restore old GART alloc_coherent behavior
Currently, GART alloc_coherent tries to allocate pages with GFP_DMA32
for a device having dma_masks > 24bit < 32bits. If GART gets an
address that a device can't access to, GART try to map the address to
a virtual I/O address that the device can access to.
But Andi pointed out, "The GART is somewhere in the 4GB range so you
cannot use it to map anything < 4GB. Also GART is pretty small."
http://lkml.org/lkml/2008/9/12/43
That is, it's possible that GART doesn't have virtual I/O address
space that a device can access to. The above behavior doesn't work for
a device having dma_masks > 24bit < 32bits.
This patch restores old GART alloc_coherent behavior (before the
alloc_coherent rewrite).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 43 |
1 files changed, 20 insertions, 23 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 7e08e466b8ad..25c94fb96d74 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -487,31 +487,28 @@ static void * | |||
487 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | 487 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, |
488 | gfp_t flag) | 488 | gfp_t flag) |
489 | { | 489 | { |
490 | void *vaddr; | ||
491 | dma_addr_t paddr; | 490 | dma_addr_t paddr; |
492 | unsigned long align_mask; | 491 | unsigned long align_mask; |
493 | u64 dma_mask = dma_alloc_coherent_mask(dev, flag); | 492 | struct page *page; |
494 | 493 | ||
495 | vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size)); | 494 | if (force_iommu && !(flag & GFP_DMA)) { |
496 | if (!vaddr) | 495 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
497 | return NULL; | 496 | page = alloc_pages(flag | __GFP_ZERO, get_order(size)); |
498 | 497 | if (!page) | |
499 | paddr = virt_to_phys(vaddr); | 498 | return NULL; |
500 | if (is_buffer_dma_capable(dma_mask, paddr, size)) { | 499 | |
501 | *dma_addr = paddr; | 500 | align_mask = (1UL << get_order(size)) - 1; |
502 | return vaddr; | 501 | paddr = dma_map_area(dev, page_to_phys(page), size, |
503 | } | 502 | DMA_BIDIRECTIONAL, align_mask); |
504 | 503 | ||
505 | align_mask = (1UL << get_order(size)) - 1; | 504 | flush_gart(); |
506 | 505 | if (paddr != bad_dma_address) { | |
507 | *dma_addr = dma_map_area(dev, paddr, size, DMA_BIDIRECTIONAL, | 506 | *dma_addr = paddr; |
508 | align_mask); | 507 | return page_address(page); |
509 | flush_gart(); | 508 | } |
510 | 509 | __free_pages(page, get_order(size)); | |
511 | if (*dma_addr != bad_dma_address) | 510 | } else |
512 | return vaddr; | 511 | return dma_generic_alloc_coherent(dev, size, dma_addr, flag); |
513 | |||
514 | free_pages((unsigned long)vaddr, get_order(size)); | ||
515 | 512 | ||
516 | return NULL; | 513 | return NULL; |
517 | } | 514 | } |