diff options
author | Dave Airlie <airlied@redhat.com> | 2011-02-22 21:06:39 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-02-22 21:06:39 -0500 |
commit | de1e7cd63a8ec26a3bd3740708cfd72dd76509e2 (patch) | |
tree | 52bc82a71f34e92895d22821543a2be011834505 /drivers/gpu/drm/ttm | |
parent | 7811bddb6654337fd85837ef14c1a96a0c264745 (diff) | |
parent | 5a893fc28f0393adb7c885a871b8c59e623fd528 (diff) |
Merge branch 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen into drm-next
* 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
ttm: Include the 'struct dev' when using the DMA API.
nouveau/ttm/PCIe: Use dma_addr if TTM has set it.
radeon/ttm/PCIe: Use dma_addr if TTM has set it.
ttm: Expand (*populate) to support an array of DMA addresses.
ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set.
ttm: Introduce a placeholder for DMA (bus) addresses.
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_agp_backend.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 12 |
3 files changed, 40 insertions, 10 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index f999e36f30b..1c4a72f681c 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c | |||
@@ -47,7 +47,8 @@ struct ttm_agp_backend { | |||
47 | 47 | ||
48 | static int ttm_agp_populate(struct ttm_backend *backend, | 48 | static int ttm_agp_populate(struct ttm_backend *backend, |
49 | unsigned long num_pages, struct page **pages, | 49 | unsigned long num_pages, struct page **pages, |
50 | struct page *dummy_read_page) | 50 | struct page *dummy_read_page, |
51 | dma_addr_t *dma_addrs) | ||
51 | { | 52 | { |
52 | struct ttm_agp_backend *agp_be = | 53 | struct ttm_agp_backend *agp_be = |
53 | container_of(backend, struct ttm_agp_backend, backend); | 54 | container_of(backend, struct ttm_agp_backend, backend); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b1e02fffd3c..35849dbf3ab 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/seq_file.h> /* for seq_printf */ | 39 | #include <linux/seq_file.h> /* for seq_printf */ |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/dma-mapping.h> | ||
41 | 42 | ||
42 | #include <asm/atomic.h> | 43 | #include <asm/atomic.h> |
43 | 44 | ||
@@ -662,7 +663,8 @@ out: | |||
662 | * cached pages. | 663 | * cached pages. |
663 | */ | 664 | */ |
664 | int ttm_get_pages(struct list_head *pages, int flags, | 665 | int ttm_get_pages(struct list_head *pages, int flags, |
665 | enum ttm_caching_state cstate, unsigned count) | 666 | enum ttm_caching_state cstate, unsigned count, |
667 | dma_addr_t *dma_address, struct device *dev) | ||
666 | { | 668 | { |
667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 669 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | struct page *p = NULL; | 670 | struct page *p = NULL; |
@@ -681,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
681 | gfp_flags |= GFP_HIGHUSER; | 683 | gfp_flags |= GFP_HIGHUSER; |
682 | 684 | ||
683 | for (r = 0; r < count; ++r) { | 685 | for (r = 0; r < count; ++r) { |
684 | p = alloc_page(gfp_flags); | 686 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { |
687 | void *addr; | ||
688 | addr = dma_alloc_coherent(dev, PAGE_SIZE, | ||
689 | &dma_address[r], | ||
690 | gfp_flags); | ||
691 | if (addr == NULL) | ||
692 | return -ENOMEM; | ||
693 | p = virt_to_page(addr); | ||
694 | } else | ||
695 | p = alloc_page(gfp_flags); | ||
685 | if (!p) { | 696 | if (!p) { |
686 | 697 | ||
687 | printk(KERN_ERR TTM_PFX | 698 | printk(KERN_ERR TTM_PFX |
688 | "Unable to allocate page."); | 699 | "Unable to allocate page."); |
689 | return -ENOMEM; | 700 | return -ENOMEM; |
690 | } | 701 | } |
691 | |||
692 | list_add(&p->lru, pages); | 702 | list_add(&p->lru, pages); |
693 | } | 703 | } |
694 | return 0; | 704 | return 0; |
@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
720 | printk(KERN_ERR TTM_PFX | 730 | printk(KERN_ERR TTM_PFX |
721 | "Failed to allocate extra pages " | 731 | "Failed to allocate extra pages " |
722 | "for large request."); | 732 | "for large request."); |
723 | ttm_put_pages(pages, 0, flags, cstate); | 733 | ttm_put_pages(pages, 0, flags, cstate, NULL, NULL); |
724 | return r; | 734 | return r; |
725 | } | 735 | } |
726 | } | 736 | } |
@@ -731,17 +741,30 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
731 | 741 | ||
732 | /* Put all pages in pages list to correct pool to wait for reuse */ | 742 | /* Put all pages in pages list to correct pool to wait for reuse */ |
733 | void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | 743 | void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, |
734 | enum ttm_caching_state cstate) | 744 | enum ttm_caching_state cstate, dma_addr_t *dma_address, |
745 | struct device *dev) | ||
735 | { | 746 | { |
736 | unsigned long irq_flags; | 747 | unsigned long irq_flags; |
737 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 748 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
738 | struct page *p, *tmp; | 749 | struct page *p, *tmp; |
750 | unsigned r; | ||
739 | 751 | ||
740 | if (pool == NULL) { | 752 | if (pool == NULL) { |
741 | /* No pool for this memory type so free the pages */ | 753 | /* No pool for this memory type so free the pages */ |
742 | 754 | ||
755 | r = page_count-1; | ||
743 | list_for_each_entry_safe(p, tmp, pages, lru) { | 756 | list_for_each_entry_safe(p, tmp, pages, lru) { |
744 | __free_page(p); | 757 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { |
758 | void *addr = page_address(p); | ||
759 | WARN_ON(!addr || !dma_address[r]); | ||
760 | if (addr) | ||
761 | dma_free_coherent(dev, PAGE_SIZE, | ||
762 | addr, | ||
763 | dma_address[r]); | ||
764 | dma_address[r] = 0; | ||
765 | } else | ||
766 | __free_page(p); | ||
767 | r--; | ||
745 | } | 768 | } |
746 | /* Make the pages list empty */ | 769 | /* Make the pages list empty */ |
747 | INIT_LIST_HEAD(pages); | 770 | INIT_LIST_HEAD(pages); |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index af789dc869b..0f8fc9ff0c5 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm); | |||
49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | 49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) |
50 | { | 50 | { |
51 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); | 51 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); |
52 | ttm->dma_address = drm_calloc_large(ttm->num_pages, | ||
53 | sizeof(*ttm->dma_address)); | ||
52 | } | 54 | } |
53 | 55 | ||
54 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | 56 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) |
55 | { | 57 | { |
56 | drm_free_large(ttm->pages); | 58 | drm_free_large(ttm->pages); |
57 | ttm->pages = NULL; | 59 | ttm->pages = NULL; |
60 | drm_free_large(ttm->dma_address); | ||
61 | ttm->dma_address = NULL; | ||
58 | } | 62 | } |
59 | 63 | ||
60 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | 64 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) |
@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) | |||
105 | 109 | ||
106 | INIT_LIST_HEAD(&h); | 110 | INIT_LIST_HEAD(&h); |
107 | 111 | ||
108 | ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); | 112 | ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1, |
113 | &ttm->dma_address[index], ttm->be->bdev->dev); | ||
109 | 114 | ||
110 | if (ret != 0) | 115 | if (ret != 0) |
111 | return NULL; | 116 | return NULL; |
@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) | |||
164 | } | 169 | } |
165 | 170 | ||
166 | be->func->populate(be, ttm->num_pages, ttm->pages, | 171 | be->func->populate(be, ttm->num_pages, ttm->pages, |
167 | ttm->dummy_read_page); | 172 | ttm->dummy_read_page, ttm->dma_address); |
168 | ttm->state = tt_unbound; | 173 | ttm->state = tt_unbound; |
169 | return 0; | 174 | return 0; |
170 | } | 175 | } |
@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) | |||
298 | count++; | 303 | count++; |
299 | } | 304 | } |
300 | } | 305 | } |
301 | ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); | 306 | ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state, |
307 | ttm->dma_address, ttm->be->bdev->dev); | ||
302 | ttm->state = tt_unpopulated; | 308 | ttm->state = tt_unpopulated; |
303 | ttm->first_himem_page = ttm->num_pages; | 309 | ttm->first_himem_page = ttm->num_pages; |
304 | ttm->last_lomem_page = -1; | 310 | ttm->last_lomem_page = -1; |