diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2011-08-30 00:30:11 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2011-09-20 02:12:27 -0400 |
commit | a0d9a8feb928465f3cb525a19e5fafd06ef66ced (patch) | |
tree | 8c9cbb022ccb4364cf7c754311c070954d8584bc /drivers/gpu/drm | |
parent | a14845121c1e9cfe302d23ca4ffcfc62cf8e1033 (diff) |
drm/nouveau: remove allocations from gart populate() hook
Since some somewhat questionable changes a while back, TTM provides a
completely empty array of struct dma_address that stays around for the
entire lifetime of the TTM object.
Lets use this array, *always*, rather than wasting yet more memory on
another array who's purpose is identical, as well as yet another bool array
of the same size saying *which* of the previous two arrays to use...
This change will also solve the high order allocation failures seen by
some people while using nouveau.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 66 |
1 files changed, 21 insertions, 45 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 2706cb3d871a..b75258a9fe44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -12,8 +12,8 @@ struct nouveau_sgdma_be { | |||
12 | struct drm_device *dev; | 12 | struct drm_device *dev; |
13 | 13 | ||
14 | dma_addr_t *pages; | 14 | dma_addr_t *pages; |
15 | bool *ttm_alloced; | ||
16 | unsigned nr_pages; | 15 | unsigned nr_pages; |
16 | bool unmap_pages; | ||
17 | 17 | ||
18 | u64 offset; | 18 | u64 offset; |
19 | bool bound; | 19 | bool bound; |
@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
26 | { | 26 | { |
27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
28 | struct drm_device *dev = nvbe->dev; | 28 | struct drm_device *dev = nvbe->dev; |
29 | int i; | ||
29 | 30 | ||
30 | NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); | 31 | NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); |
31 | 32 | ||
32 | if (nvbe->pages) | 33 | nvbe->pages = dma_addrs; |
33 | return -EINVAL; | 34 | nvbe->nr_pages = num_pages; |
34 | 35 | nvbe->unmap_pages = true; | |
35 | nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL); | ||
36 | if (!nvbe->pages) | ||
37 | return -ENOMEM; | ||
38 | 36 | ||
39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); | 37 | /* this code path isn't called and is incorrect anyways */ |
40 | if (!nvbe->ttm_alloced) { | 38 | if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */ |
41 | kfree(nvbe->pages); | 39 | nvbe->unmap_pages = false; |
42 | nvbe->pages = NULL; | 40 | return 0; |
43 | return -ENOMEM; | ||
44 | } | 41 | } |
45 | 42 | ||
46 | nvbe->nr_pages = 0; | 43 | for (i = 0; i < num_pages; i++) { |
47 | while (num_pages--) { | 44 | nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0, |
48 | /* this code path isn't called and is incorrect anyways */ | 45 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
49 | if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ | 46 | if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) { |
50 | nvbe->pages[nvbe->nr_pages] = | 47 | nvbe->nr_pages = --i; |
51 | dma_addrs[nvbe->nr_pages]; | 48 | be->func->clear(be); |
52 | nvbe->ttm_alloced[nvbe->nr_pages] = true; | 49 | return -EFAULT; |
53 | } else { | ||
54 | nvbe->pages[nvbe->nr_pages] = | ||
55 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, | ||
56 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
57 | if (pci_dma_mapping_error(dev->pdev, | ||
58 | nvbe->pages[nvbe->nr_pages])) { | ||
59 | be->func->clear(be); | ||
60 | return -EFAULT; | ||
61 | } | ||
62 | nvbe->ttm_alloced[nvbe->nr_pages] = false; | ||
63 | } | 50 | } |
64 | |||
65 | nvbe->nr_pages++; | ||
66 | } | 51 | } |
67 | 52 | ||
68 | return 0; | 53 | return 0; |
@@ -72,25 +57,16 @@ static void | |||
72 | nouveau_sgdma_clear(struct ttm_backend *be) | 57 | nouveau_sgdma_clear(struct ttm_backend *be) |
73 | { | 58 | { |
74 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 59 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
75 | struct drm_device *dev; | 60 | struct drm_device *dev = nvbe->dev; |
76 | |||
77 | if (nvbe && nvbe->pages) { | ||
78 | dev = nvbe->dev; | ||
79 | NV_DEBUG(dev, "\n"); | ||
80 | 61 | ||
81 | if (nvbe->bound) | 62 | if (nvbe->bound) |
82 | be->func->unbind(be); | 63 | be->func->unbind(be); |
83 | 64 | ||
65 | if (nvbe->unmap_pages) { | ||
84 | while (nvbe->nr_pages--) { | 66 | while (nvbe->nr_pages--) { |
85 | if (!nvbe->ttm_alloced[nvbe->nr_pages]) | 67 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], |
86 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | ||
87 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 68 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
88 | } | 69 | } |
89 | kfree(nvbe->pages); | ||
90 | kfree(nvbe->ttm_alloced); | ||
91 | nvbe->pages = NULL; | ||
92 | nvbe->ttm_alloced = NULL; | ||
93 | nvbe->nr_pages = 0; | ||
94 | } | 70 | } |
95 | } | 71 | } |
96 | 72 | ||