aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-10-17 17:14:26 -0400
committerDave Airlie <airlied@redhat.com>2011-12-06 05:39:51 -0500
commit3230cfc34fca9d17c1628cf0e4ac25199592a69a (patch)
tree40685914703f0a709b2180d7cdf01e770fa5a4dc /drivers/gpu/drm/nouveau/nouveau_sgdma.c
parentc52494f69538f6fe1a234972f024011b17a48329 (diff)
drm/nouveau: enable the ttm dma pool when swiotlb is active V3
If the card is capable of more than 32-bit, then use the default TTM page pool code which allocates from anywhere in the memory. Note: If the 'ttm.no_dma' parameter is set, the override is ignored and the default TTM pool is used. V2 use pci_set_consistent_dma_mask V3 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) CC: Ben Skeggs <bskeggs@redhat.com> CC: Francisco Jerez <currojerez@riseup.net> CC: Dave Airlie <airlied@redhat.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c60
1 files changed, 1 insertions, 59 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index bc2ab900b24c..ee1eb7cba798 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -13,41 +13,6 @@ struct nouveau_sgdma_be {
13 u64 offset; 13 u64 offset;
14}; 14};
15 15
16static int
17nouveau_sgdma_dma_map(struct ttm_tt *ttm)
18{
19 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
20 struct drm_device *dev = nvbe->dev;
21 int i;
22
23 for (i = 0; i < ttm->num_pages; i++) {
24 ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
25 0, PAGE_SIZE,
26 PCI_DMA_BIDIRECTIONAL);
27 if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
28 return -EFAULT;
29 }
30 }
31
32 return 0;
33}
34
35static void
36nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
37{
38 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
39 struct drm_device *dev = nvbe->dev;
40 int i;
41
42 for (i = 0; i < ttm->num_pages; i++) {
43 if (ttm->dma_address[i]) {
44 pci_unmap_page(dev->pdev, ttm->dma_address[i],
45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
46 }
47 ttm->dma_address[i] = 0;
48 }
49}
50
51static void 16static void
52nouveau_sgdma_destroy(struct ttm_tt *ttm) 17nouveau_sgdma_destroy(struct ttm_tt *ttm)
53{ 18{
@@ -67,13 +32,8 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
67 struct drm_nouveau_private *dev_priv = dev->dev_private; 32 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 33 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
69 unsigned i, j, pte; 34 unsigned i, j, pte;
70 int r;
71 35
72 NV_DEBUG(dev, "pg=0x%lx\n", mem->start); 36 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
73 r = nouveau_sgdma_dma_map(ttm);
74 if (r) {
75 return r;
76 }
77 37
78 nvbe->offset = mem->start << PAGE_SHIFT; 38 nvbe->offset = mem->start << PAGE_SHIFT;
79 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 39 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
@@ -110,7 +70,6 @@ nv04_sgdma_unbind(struct ttm_tt *ttm)
110 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); 70 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
111 } 71 }
112 72
113 nouveau_sgdma_dma_unmap(ttm);
114 return 0; 73 return 0;
115} 74}
116 75
@@ -141,13 +100,8 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
141 dma_addr_t *list = ttm->dma_address; 100 dma_addr_t *list = ttm->dma_address;
142 u32 pte = mem->start << 2; 101 u32 pte = mem->start << 2;
143 u32 cnt = ttm->num_pages; 102 u32 cnt = ttm->num_pages;
144 int r;
145 103
146 nvbe->offset = mem->start << PAGE_SHIFT; 104 nvbe->offset = mem->start << PAGE_SHIFT;
147 r = nouveau_sgdma_dma_map(ttm);
148 if (r) {
149 return r;
150 }
151 105
152 while (cnt--) { 106 while (cnt--) {
153 nv_wo32(pgt, pte, (*list++ >> 7) | 1); 107 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
@@ -173,7 +127,6 @@ nv41_sgdma_unbind(struct ttm_tt *ttm)
173 } 127 }
174 128
175 nv41_sgdma_flush(nvbe); 129 nv41_sgdma_flush(nvbe);
176 nouveau_sgdma_dma_unmap(ttm);
177 return 0; 130 return 0;
178} 131}
179 132
@@ -256,13 +209,9 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
256 dma_addr_t *list = ttm->dma_address; 209 dma_addr_t *list = ttm->dma_address;
257 u32 pte = mem->start << 2, tmp[4]; 210 u32 pte = mem->start << 2, tmp[4];
258 u32 cnt = ttm->num_pages; 211 u32 cnt = ttm->num_pages;
259 int i, r; 212 int i;
260 213
261 nvbe->offset = mem->start << PAGE_SHIFT; 214 nvbe->offset = mem->start << PAGE_SHIFT;
262 r = nouveau_sgdma_dma_map(ttm);
263 if (r) {
264 return r;
265 }
266 215
267 if (pte & 0x0000000c) { 216 if (pte & 0x0000000c) {
268 u32 max = 4 - ((pte >> 2) & 0x3); 217 u32 max = 4 - ((pte >> 2) & 0x3);
@@ -321,7 +270,6 @@ nv44_sgdma_unbind(struct ttm_tt *ttm)
321 nv44_sgdma_fill(pgt, NULL, pte, cnt); 270 nv44_sgdma_fill(pgt, NULL, pte, cnt);
322 271
323 nv44_sgdma_flush(ttm); 272 nv44_sgdma_flush(ttm);
324 nouveau_sgdma_dma_unmap(ttm);
325 return 0; 273 return 0;
326} 274}
327 275
@@ -335,13 +283,8 @@ static int
335nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 283nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
336{ 284{
337 struct nouveau_mem *node = mem->mm_node; 285 struct nouveau_mem *node = mem->mm_node;
338 int r;
339 286
340 /* noop: bound in move_notify() */ 287 /* noop: bound in move_notify() */
341 r = nouveau_sgdma_dma_map(ttm);
342 if (r) {
343 return r;
344 }
345 node->pages = ttm->dma_address; 288 node->pages = ttm->dma_address;
346 return 0; 289 return 0;
347} 290}
@@ -350,7 +293,6 @@ static int
350nv50_sgdma_unbind(struct ttm_tt *ttm) 293nv50_sgdma_unbind(struct ttm_tt *ttm)
351{ 294{
352 /* noop: unbound in move_notify() */ 295 /* noop: unbound in move_notify() */
353 nouveau_sgdma_dma_unmap(ttm);
354 return 0; 296 return 0;
355} 297}
356 298