aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-01 20:46:13 -0400
committerDave Airlie <airlied@redhat.com>2011-12-06 05:39:17 -0500
commit649bf3ca77343e3be1e0af8e21356fa569b1abd9 (patch)
tree01ad6a5f3f74b087cb791f5965d3190916975789 /drivers/gpu/drm/nouveau/nouveau_sgdma.c
parent822c4d9ae0d55a4fcea9f0a462bc6406a06692e2 (diff)
drm/ttm: merge ttm_backend and ttm_tt V5
ttm_backend will only exist with a ttm_tt, and ttm_tt will only be of interest when bound to a backend. Merge them to avoid code and data duplication. V2 Rebase on top of memory accounting overhaul V3 Rebase on top of more memory accounting changes V4 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) V5 make sure ttm is unbound before destroying, change commit message on suggestion from Tormod Volden Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c188
1 files changed, 85 insertions, 103 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe44..bc2ab900b24c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,44 +8,23 @@
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9 9
10struct nouveau_sgdma_be { 10struct nouveau_sgdma_be {
11 struct ttm_backend backend; 11 struct ttm_tt ttm;
12 struct drm_device *dev; 12 struct drm_device *dev;
13
14 dma_addr_t *pages;
15 unsigned nr_pages;
16 bool unmap_pages;
17
18 u64 offset; 13 u64 offset;
19 bool bound;
20}; 14};
21 15
22static int 16static int
23nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, 17nouveau_sgdma_dma_map(struct ttm_tt *ttm)
24 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
26{ 18{
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 19 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
28 struct drm_device *dev = nvbe->dev; 20 struct drm_device *dev = nvbe->dev;
29 int i; 21 int i;
30 22
31 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); 23 for (i = 0; i < ttm->num_pages; i++) {
32 24 ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
33 nvbe->pages = dma_addrs; 25 0, PAGE_SIZE,
34 nvbe->nr_pages = num_pages; 26 PCI_DMA_BIDIRECTIONAL);
35 nvbe->unmap_pages = true; 27 if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
36
37 /* this code path isn't called and is incorrect anyways */
38 if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
39 nvbe->unmap_pages = false;
40 return 0;
41 }
42
43 for (i = 0; i < num_pages; i++) {
44 nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
46 if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
47 nvbe->nr_pages = --i;
48 be->func->clear(be);
49 return -EFAULT; 28 return -EFAULT;
50 } 29 }
51 } 30 }
@@ -54,53 +33,52 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
54} 33}
55 34
56static void 35static void
57nouveau_sgdma_clear(struct ttm_backend *be) 36nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
58{ 37{
59 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 38 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
60 struct drm_device *dev = nvbe->dev; 39 struct drm_device *dev = nvbe->dev;
40 int i;
61 41
62 if (nvbe->bound) 42 for (i = 0; i < ttm->num_pages; i++) {
63 be->func->unbind(be); 43 if (ttm->dma_address[i]) {
64 44 pci_unmap_page(dev->pdev, ttm->dma_address[i],
65 if (nvbe->unmap_pages) {
66 while (nvbe->nr_pages--) {
67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69 } 46 }
47 ttm->dma_address[i] = 0;
70 } 48 }
71} 49}
72 50
73static void 51static void
74nouveau_sgdma_destroy(struct ttm_backend *be) 52nouveau_sgdma_destroy(struct ttm_tt *ttm)
75{ 53{
76 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 54 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
77 55
78 if (be) { 56 if (ttm) {
79 NV_DEBUG(nvbe->dev, "\n"); 57 NV_DEBUG(nvbe->dev, "\n");
80 58 kfree(nvbe);
81 if (nvbe) {
82 if (nvbe->pages)
83 be->func->clear(be);
84 kfree(nvbe);
85 }
86 } 59 }
87} 60}
88 61
89static int 62static int
90nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 63nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
91{ 64{
92 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 65 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
93 struct drm_device *dev = nvbe->dev; 66 struct drm_device *dev = nvbe->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private; 67 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 68 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte; 69 unsigned i, j, pte;
70 int r;
97 71
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start); 72 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
73 r = nouveau_sgdma_dma_map(ttm);
74 if (r) {
75 return r;
76 }
99 77
100 nvbe->offset = mem->start << PAGE_SHIFT; 78 nvbe->offset = mem->start << PAGE_SHIFT;
101 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 79 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
102 for (i = 0; i < nvbe->nr_pages; i++) { 80 for (i = 0; i < ttm->num_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i]; 81 dma_addr_t dma_offset = ttm->dma_address[i];
104 uint32_t offset_l = lower_32_bits(dma_offset); 82 uint32_t offset_l = lower_32_bits(dma_offset);
105 83
106 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 84 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -109,14 +87,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
109 } 87 }
110 } 88 }
111 89
112 nvbe->bound = true;
113 return 0; 90 return 0;
114} 91}
115 92
116static int 93static int
117nv04_sgdma_unbind(struct ttm_backend *be) 94nv04_sgdma_unbind(struct ttm_tt *ttm)
118{ 95{
119 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 96 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
120 struct drm_device *dev = nvbe->dev; 97 struct drm_device *dev = nvbe->dev;
121 struct drm_nouveau_private *dev_priv = dev->dev_private; 98 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 99 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -124,22 +101,20 @@ nv04_sgdma_unbind(struct ttm_backend *be)
124 101
125 NV_DEBUG(dev, "\n"); 102 NV_DEBUG(dev, "\n");
126 103
127 if (!nvbe->bound) 104 if (ttm->state != tt_bound)
128 return 0; 105 return 0;
129 106
130 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 107 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
131 for (i = 0; i < nvbe->nr_pages; i++) { 108 for (i = 0; i < ttm->num_pages; i++) {
132 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) 109 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
133 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); 110 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
134 } 111 }
135 112
136 nvbe->bound = false; 113 nouveau_sgdma_dma_unmap(ttm);
137 return 0; 114 return 0;
138} 115}
139 116
140static struct ttm_backend_func nv04_sgdma_backend = { 117static struct ttm_backend_func nv04_sgdma_backend = {
141 .populate = nouveau_sgdma_populate,
142 .clear = nouveau_sgdma_clear,
143 .bind = nv04_sgdma_bind, 118 .bind = nv04_sgdma_bind,
144 .unbind = nv04_sgdma_unbind, 119 .unbind = nv04_sgdma_unbind,
145 .destroy = nouveau_sgdma_destroy 120 .destroy = nouveau_sgdma_destroy
@@ -158,16 +133,21 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
158} 133}
159 134
160static int 135static int
161nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 136nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
162{ 137{
163 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 138 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
164 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 139 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
165 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 140 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
166 dma_addr_t *list = nvbe->pages; 141 dma_addr_t *list = ttm->dma_address;
167 u32 pte = mem->start << 2; 142 u32 pte = mem->start << 2;
168 u32 cnt = nvbe->nr_pages; 143 u32 cnt = ttm->num_pages;
144 int r;
169 145
170 nvbe->offset = mem->start << PAGE_SHIFT; 146 nvbe->offset = mem->start << PAGE_SHIFT;
147 r = nouveau_sgdma_dma_map(ttm);
148 if (r) {
149 return r;
150 }
171 151
172 while (cnt--) { 152 while (cnt--) {
173 nv_wo32(pgt, pte, (*list++ >> 7) | 1); 153 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
@@ -175,18 +155,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
175 } 155 }
176 156
177 nv41_sgdma_flush(nvbe); 157 nv41_sgdma_flush(nvbe);
178 nvbe->bound = true;
179 return 0; 158 return 0;
180} 159}
181 160
182static int 161static int
183nv41_sgdma_unbind(struct ttm_backend *be) 162nv41_sgdma_unbind(struct ttm_tt *ttm)
184{ 163{
185 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 164 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
186 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 165 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
187 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 166 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
188 u32 pte = (nvbe->offset >> 12) << 2; 167 u32 pte = (nvbe->offset >> 12) << 2;
189 u32 cnt = nvbe->nr_pages; 168 u32 cnt = ttm->num_pages;
190 169
191 while (cnt--) { 170 while (cnt--) {
192 nv_wo32(pgt, pte, 0x00000000); 171 nv_wo32(pgt, pte, 0x00000000);
@@ -194,24 +173,23 @@ nv41_sgdma_unbind(struct ttm_backend *be)
194 } 173 }
195 174
196 nv41_sgdma_flush(nvbe); 175 nv41_sgdma_flush(nvbe);
197 nvbe->bound = false; 176 nouveau_sgdma_dma_unmap(ttm);
198 return 0; 177 return 0;
199} 178}
200 179
201static struct ttm_backend_func nv41_sgdma_backend = { 180static struct ttm_backend_func nv41_sgdma_backend = {
202 .populate = nouveau_sgdma_populate,
203 .clear = nouveau_sgdma_clear,
204 .bind = nv41_sgdma_bind, 181 .bind = nv41_sgdma_bind,
205 .unbind = nv41_sgdma_unbind, 182 .unbind = nv41_sgdma_unbind,
206 .destroy = nouveau_sgdma_destroy 183 .destroy = nouveau_sgdma_destroy
207}; 184};
208 185
209static void 186static void
210nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) 187nv44_sgdma_flush(struct ttm_tt *ttm)
211{ 188{
189 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
212 struct drm_device *dev = nvbe->dev; 190 struct drm_device *dev = nvbe->dev;
213 191
214 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); 192 nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
215 nv_wr32(dev, 0x100808, nvbe->offset | 0x20); 193 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
216 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) 194 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
217 NV_ERROR(dev, "gart flush timeout: 0x%08x\n", 195 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -270,17 +248,21 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
270} 248}
271 249
272static int 250static int
273nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 251nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
274{ 252{
275 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 253 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
276 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 254 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
277 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 255 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
278 dma_addr_t *list = nvbe->pages; 256 dma_addr_t *list = ttm->dma_address;
279 u32 pte = mem->start << 2, tmp[4]; 257 u32 pte = mem->start << 2, tmp[4];
280 u32 cnt = nvbe->nr_pages; 258 u32 cnt = ttm->num_pages;
281 int i; 259 int i, r;
282 260
283 nvbe->offset = mem->start << PAGE_SHIFT; 261 nvbe->offset = mem->start << PAGE_SHIFT;
262 r = nouveau_sgdma_dma_map(ttm);
263 if (r) {
264 return r;
265 }
284 266
285 if (pte & 0x0000000c) { 267 if (pte & 0x0000000c) {
286 u32 max = 4 - ((pte >> 2) & 0x3); 268 u32 max = 4 - ((pte >> 2) & 0x3);
@@ -305,19 +287,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
305 if (cnt) 287 if (cnt)
306 nv44_sgdma_fill(pgt, list, pte, cnt); 288 nv44_sgdma_fill(pgt, list, pte, cnt);
307 289
308 nv44_sgdma_flush(nvbe); 290 nv44_sgdma_flush(ttm);
309 nvbe->bound = true;
310 return 0; 291 return 0;
311} 292}
312 293
313static int 294static int
314nv44_sgdma_unbind(struct ttm_backend *be) 295nv44_sgdma_unbind(struct ttm_tt *ttm)
315{ 296{
316 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 297 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
317 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 298 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
318 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 299 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
319 u32 pte = (nvbe->offset >> 12) << 2; 300 u32 pte = (nvbe->offset >> 12) << 2;
320 u32 cnt = nvbe->nr_pages; 301 u32 cnt = ttm->num_pages;
321 302
322 if (pte & 0x0000000c) { 303 if (pte & 0x0000000c) {
323 u32 max = 4 - ((pte >> 2) & 0x3); 304 u32 max = 4 - ((pte >> 2) & 0x3);
@@ -339,55 +320,53 @@ nv44_sgdma_unbind(struct ttm_backend *be)
339 if (cnt) 320 if (cnt)
340 nv44_sgdma_fill(pgt, NULL, pte, cnt); 321 nv44_sgdma_fill(pgt, NULL, pte, cnt);
341 322
342 nv44_sgdma_flush(nvbe); 323 nv44_sgdma_flush(ttm);
343 nvbe->bound = false; 324 nouveau_sgdma_dma_unmap(ttm);
344 return 0; 325 return 0;
345} 326}
346 327
347static struct ttm_backend_func nv44_sgdma_backend = { 328static struct ttm_backend_func nv44_sgdma_backend = {
348 .populate = nouveau_sgdma_populate,
349 .clear = nouveau_sgdma_clear,
350 .bind = nv44_sgdma_bind, 329 .bind = nv44_sgdma_bind,
351 .unbind = nv44_sgdma_unbind, 330 .unbind = nv44_sgdma_unbind,
352 .destroy = nouveau_sgdma_destroy 331 .destroy = nouveau_sgdma_destroy
353}; 332};
354 333
355static int 334static int
356nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 335nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
357{ 336{
358 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
359 struct nouveau_mem *node = mem->mm_node; 337 struct nouveau_mem *node = mem->mm_node;
338 int r;
339
360 /* noop: bound in move_notify() */ 340 /* noop: bound in move_notify() */
361 node->pages = nvbe->pages; 341 r = nouveau_sgdma_dma_map(ttm);
362 nvbe->pages = (dma_addr_t *)node; 342 if (r) {
363 nvbe->bound = true; 343 return r;
344 }
345 node->pages = ttm->dma_address;
364 return 0; 346 return 0;
365} 347}
366 348
367static int 349static int
368nv50_sgdma_unbind(struct ttm_backend *be) 350nv50_sgdma_unbind(struct ttm_tt *ttm)
369{ 351{
370 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
371 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
372 /* noop: unbound in move_notify() */ 352 /* noop: unbound in move_notify() */
373 nvbe->pages = node->pages; 353 nouveau_sgdma_dma_unmap(ttm);
374 node->pages = NULL;
375 nvbe->bound = false;
376 return 0; 354 return 0;
377} 355}
378 356
379static struct ttm_backend_func nv50_sgdma_backend = { 357static struct ttm_backend_func nv50_sgdma_backend = {
380 .populate = nouveau_sgdma_populate,
381 .clear = nouveau_sgdma_clear,
382 .bind = nv50_sgdma_bind, 358 .bind = nv50_sgdma_bind,
383 .unbind = nv50_sgdma_unbind, 359 .unbind = nv50_sgdma_unbind,
384 .destroy = nouveau_sgdma_destroy 360 .destroy = nouveau_sgdma_destroy
385}; 361};
386 362
387struct ttm_backend * 363struct ttm_tt *
388nouveau_sgdma_init_ttm(struct drm_device *dev) 364nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
365 unsigned long size, uint32_t page_flags,
366 struct page *dummy_read_page)
389{ 367{
390 struct drm_nouveau_private *dev_priv = dev->dev_private; 368 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
369 struct drm_device *dev = dev_priv->dev;
391 struct nouveau_sgdma_be *nvbe; 370 struct nouveau_sgdma_be *nvbe;
392 371
393 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 372 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -395,9 +374,12 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
395 return NULL; 374 return NULL;
396 375
397 nvbe->dev = dev; 376 nvbe->dev = dev;
377 nvbe->ttm.func = dev_priv->gart_info.func;
398 378
399 nvbe->backend.func = dev_priv->gart_info.func; 379 if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
400 return &nvbe->backend; 380 return NULL;
381 }
382 return &nvbe->ttm;
401} 383}
402 384
403int 385int