diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2010-12-02 11:36:24 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-02-18 17:27:49 -0500 |
commit | e0138c26cdeee8c033256ccd9e07d66db3c998be (patch) | |
tree | 2d354ce89abb54bf9ae36e6ca287e12d84c2ed5b /drivers/gpu/drm | |
parent | c39d35161e87f1d7c0628af6907ac66a8c77f63f (diff) |
nouveau/ttm/PCIe: Use dma_addr if TTM has set it.
If the TTM layer has used the DMA API to setup pages that are
TTM_PAGE_FLAG_DMA32 (look at patch titled: "ttm: Utilize the
DMA API for pages that have TTM_PAGE_FLAG_DMA32 set"), lets
use it when programming the GART in the PCIe type cards.
This patch skips doing the pci_map_page (and pci_unmap_page) if
there is a DMA addresses passed in for that page. If the dma_address
is zero (or DMA_ERROR_CODE), then we continue on with our old
behaviour.
[v2: Added a review-by tag]
Reviewed-by: Thomas Hellstrom <thomas@shipmail.org>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Ian Campbell <ian.campbell@citrix.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 28 |
1 files changed, 21 insertions, 7 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index edc140ab4df1..bbdd982cbb3e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be { | |||
12 | struct drm_device *dev; | 12 | struct drm_device *dev; |
13 | 13 | ||
14 | dma_addr_t *pages; | 14 | dma_addr_t *pages; |
15 | bool *ttm_alloced; | ||
15 | unsigned nr_pages; | 16 | unsigned nr_pages; |
16 | 17 | ||
17 | unsigned pte_start; | 18 | unsigned pte_start; |
@@ -35,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
35 | if (!nvbe->pages) | 36 | if (!nvbe->pages) |
36 | return -ENOMEM; | 37 | return -ENOMEM; |
37 | 38 | ||
39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); | ||
40 | if (!nvbe->ttm_alloced) | ||
41 | return -ENOMEM; | ||
42 | |||
38 | nvbe->nr_pages = 0; | 43 | nvbe->nr_pages = 0; |
39 | while (num_pages--) { | 44 | while (num_pages--) { |
40 | nvbe->pages[nvbe->nr_pages] = | 45 | if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { |
41 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, | 46 | nvbe->pages[nvbe->nr_pages] = |
47 | dma_addrs[nvbe->nr_pages]; | ||
48 | nvbe->ttm_alloced[nvbe->nr_pages] = true; | ||
49 | } else { | ||
50 | nvbe->pages[nvbe->nr_pages] = | ||
51 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, | ||
42 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 52 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
43 | if (pci_dma_mapping_error(dev->pdev, | 53 | if (pci_dma_mapping_error(dev->pdev, |
44 | nvbe->pages[nvbe->nr_pages])) { | 54 | nvbe->pages[nvbe->nr_pages])) { |
45 | be->func->clear(be); | 55 | be->func->clear(be); |
46 | return -EFAULT; | 56 | return -EFAULT; |
57 | } | ||
47 | } | 58 | } |
48 | 59 | ||
49 | nvbe->nr_pages++; | 60 | nvbe->nr_pages++; |
@@ -66,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be) | |||
66 | be->func->unbind(be); | 77 | be->func->unbind(be); |
67 | 78 | ||
68 | while (nvbe->nr_pages--) { | 79 | while (nvbe->nr_pages--) { |
69 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | 80 | if (!nvbe->ttm_alloced[nvbe->nr_pages]) |
81 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | ||
70 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 82 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
71 | } | 83 | } |
72 | kfree(nvbe->pages); | 84 | kfree(nvbe->pages); |
85 | kfree(nvbe->ttm_alloced); | ||
73 | nvbe->pages = NULL; | 86 | nvbe->pages = NULL; |
87 | nvbe->ttm_alloced = NULL; | ||
74 | nvbe->nr_pages = 0; | 88 | nvbe->nr_pages = 0; |
75 | } | 89 | } |
76 | } | 90 | } |