aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-11-29 14:03:30 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-01-27 16:03:09 -0500
commit69a07f0b117a40fcc1a479358d8e1f41793617f2 (patch)
treefc3b827b9a9c7898b35d5cffd5995e56805cc4d8 /drivers/gpu/drm/ttm
parentf9820a46dd7888b05a36e81166fb1abcc47dcc3f (diff)
ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set.
For pages that have the TTM_PAGE_FLAG_DMA32 flag set we use the DMA API. We save the bus address in our array which we use to program the GART (see "radeon/ttm/PCIe: Use dma_addr if TTM has set it." and "nouveau/ttm/PCIe: Use dma_addr if TTM has set it."). The reason behind using the DMA API is that under Xen we would end up programming the GART with the bounce buffer (SWIOTLB) DMA address instead of the physical DMA address of the TTM page. The reason being that alloc_page with GFP_DMA32 does not allocate pages under the the 4GB mark when running under Xen hypervisor. Under baremetal this means we do the DMA API call earlier instead of when we program the GART. For details please refer to: https://lkml.org/lkml/2011/1/7/251 [v2: Fixed indentation, revised desc, added Reviewed-by] Reviewed-by: Thomas Hellstrom <thomas@shipmail.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Ian Campbell <ian.campbell@citrix.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 9d9d92945f8..737a2a2e46a 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -683,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags,
683 gfp_flags |= GFP_HIGHUSER; 683 gfp_flags |= GFP_HIGHUSER;
684 684
685 for (r = 0; r < count; ++r) { 685 for (r = 0; r < count; ++r) {
686 p = alloc_page(gfp_flags); 686 if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
687 void *addr;
688 addr = dma_alloc_coherent(NULL, PAGE_SIZE,
689 &dma_address[r],
690 gfp_flags);
691 if (addr == NULL)
692 return -ENOMEM;
693 p = virt_to_page(addr);
694 } else
695 p = alloc_page(gfp_flags);
687 if (!p) { 696 if (!p) {
688 697
689 printk(KERN_ERR TTM_PFX 698 printk(KERN_ERR TTM_PFX
690 "Unable to allocate page."); 699 "Unable to allocate page.");
691 return -ENOMEM; 700 return -ENOMEM;
692 } 701 }
693
694 list_add(&p->lru, pages); 702 list_add(&p->lru, pages);
695 } 703 }
696 return 0; 704 return 0;
@@ -738,12 +746,24 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
738 unsigned long irq_flags; 746 unsigned long irq_flags;
739 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 747 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
740 struct page *p, *tmp; 748 struct page *p, *tmp;
749 unsigned r;
741 750
742 if (pool == NULL) { 751 if (pool == NULL) {
743 /* No pool for this memory type so free the pages */ 752 /* No pool for this memory type so free the pages */
744 753
754 r = page_count-1;
745 list_for_each_entry_safe(p, tmp, pages, lru) { 755 list_for_each_entry_safe(p, tmp, pages, lru) {
746 __free_page(p); 756 if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
757 void *addr = page_address(p);
758 WARN_ON(!addr || !dma_address[r]);
759 if (addr)
760 dma_free_coherent(NULL, PAGE_SIZE,
761 addr,
762 dma_address[r]);
763 dma_address[r] = 0;
764 } else
765 __free_page(p);
766 r--;
747 } 767 }
748 /* Make the pages list empty */ 768 /* Make the pages list empty */
749 INIT_LIST_HEAD(pages); 769 INIT_LIST_HEAD(pages);