aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-01-10 23:52:40 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:44:04 -0500
commit7948758d27be1b69b6a79ed4f3f22e36a3b95965 (patch)
treeb308749b1bab10164d5ae268ee5ed7fc923bf381
parent58e6c7a9183071b89b0ac94862f369ed55775a7a (diff)
drm/nv40: implement support for on-chip PCIEGART
v2. moved nv44 pciegart table back to instmem, where it's not accessible by userspace clients. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c290
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c59
3 files changed, 330 insertions, 26 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b36dc351f8eb..1c6279f588ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -699,6 +699,13 @@ struct drm_nouveau_private {
699 uint64_t aper_size; 699 uint64_t aper_size;
700 uint64_t aper_free; 700 uint64_t aper_free;
701 701
702 struct ttm_backend_func *func;
703
704 struct {
705 struct page *page;
706 dma_addr_t addr;
707 } dummy;
708
702 struct nouveau_gpuobj *sg_ctxdma; 709 struct nouveau_gpuobj *sg_ctxdma;
703 struct nouveau_vma vma; 710 struct nouveau_vma vma;
704 } gart_info; 711 } gart_info;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index fd2093c31e68..a2b89bf0ada1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -164,6 +164,213 @@ static struct ttm_backend_func nv04_sgdma_backend = {
164 .destroy = nouveau_sgdma_destroy 164 .destroy = nouveau_sgdma_destroy
165}; 165};
166 166
167static void
168nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
169{
170 struct drm_device *dev = nvbe->dev;
171
172 nv_wr32(dev, 0x100810, 0x00000022);
173 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
174 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
175 nv_rd32(dev, 0x100810));
176 nv_wr32(dev, 0x100810, 0x00000000);
177}
178
179static int
180nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
181{
182 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
183 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
184 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
185 dma_addr_t *list = nvbe->pages;
186 u32 pte = mem->start << 2;
187 u32 cnt = nvbe->nr_pages;
188
189 nvbe->offset = mem->start << PAGE_SHIFT;
190
191 while (cnt--) {
192 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
193 pte += 4;
194 }
195
196 nv41_sgdma_flush(nvbe);
197 nvbe->bound = true;
198 return 0;
199}
200
201static int
202nv41_sgdma_unbind(struct ttm_backend *be)
203{
204 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
205 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
206 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
207 u32 pte = (nvbe->offset >> 12) << 2;
208 u32 cnt = nvbe->nr_pages;
209
210 while (cnt--) {
211 nv_wo32(pgt, pte, 0x00000000);
212 pte += 4;
213 }
214
215 nv41_sgdma_flush(nvbe);
216 nvbe->bound = false;
217 return 0;
218}
219
220static struct ttm_backend_func nv41_sgdma_backend = {
221 .populate = nouveau_sgdma_populate,
222 .clear = nouveau_sgdma_clear,
223 .bind = nv41_sgdma_bind,
224 .unbind = nv41_sgdma_unbind,
225 .destroy = nouveau_sgdma_destroy
226};
227
228static void
229nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
230{
231 struct drm_device *dev = nvbe->dev;
232
233 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
234 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
235 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
236 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
237 nv_rd32(dev, 0x100808));
238 nv_wr32(dev, 0x100808, 0x00000000);
239}
240
241static void
242nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
243{
244 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
245 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
246 u32 pte, tmp[4];
247
248 pte = base >> 2;
249 base &= ~0x0000000f;
250
251 tmp[0] = nv_ro32(pgt, base + 0x0);
252 tmp[1] = nv_ro32(pgt, base + 0x4);
253 tmp[2] = nv_ro32(pgt, base + 0x8);
254 tmp[3] = nv_ro32(pgt, base + 0xc);
255 while (cnt--) {
256 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
257 switch (pte++ & 0x3) {
258 case 0:
259 tmp[0] &= ~0x07ffffff;
260 tmp[0] |= addr;
261 break;
262 case 1:
263 tmp[0] &= ~0xf8000000;
264 tmp[0] |= addr << 27;
265 tmp[1] &= ~0x003fffff;
266 tmp[1] |= addr >> 5;
267 break;
268 case 2:
269 tmp[1] &= ~0xffc00000;
270 tmp[1] |= addr << 22;
271 tmp[2] &= ~0x0001ffff;
272 tmp[2] |= addr >> 10;
273 break;
274 case 3:
275 tmp[2] &= ~0xfffe0000;
276 tmp[2] |= addr << 17;
277 tmp[3] &= ~0x00000fff;
278 tmp[3] |= addr >> 15;
279 break;
280 }
281 }
282
283 tmp[3] |= 0x40000000;
284
285 nv_wo32(pgt, base + 0x0, tmp[0]);
286 nv_wo32(pgt, base + 0x4, tmp[1]);
287 nv_wo32(pgt, base + 0x8, tmp[2]);
288 nv_wo32(pgt, base + 0xc, tmp[3]);
289}
290
291static int
292nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
293{
294 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
295 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
296 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
297 dma_addr_t *list = nvbe->pages;
298 u32 pte = mem->start << 2, tmp[4];
299 u32 cnt = nvbe->nr_pages;
300 int i;
301
302 nvbe->offset = mem->start << PAGE_SHIFT;
303
304 if (pte & 0x0000000c) {
305 u32 max = 4 - ((pte >> 2) & 0x3);
306 u32 part = (cnt > max) ? max : cnt;
307 nv44_sgdma_fill(pgt, list, pte, part);
308 pte += (part << 2);
309 list += part;
310 cnt -= part;
311 }
312
313 while (cnt >= 4) {
314 for (i = 0; i < 4; i++)
315 tmp[i] = *list++ >> 12;
316 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
317 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
318 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
319 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
320 pte += 0x10;
321 cnt -= 4;
322 }
323
324 if (cnt)
325 nv44_sgdma_fill(pgt, list, pte, cnt);
326
327 nv44_sgdma_flush(nvbe);
328 nvbe->bound = true;
329 return 0;
330}
331
332static int
333nv44_sgdma_unbind(struct ttm_backend *be)
334{
335 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
336 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
337 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
338 u32 pte = (nvbe->offset >> 12) << 2;
339 u32 cnt = nvbe->nr_pages;
340
341 if (pte & 0x0000000c) {
342 u32 max = 4 - ((pte >> 2) & 0x3);
343 u32 part = (cnt > max) ? max : cnt;
344 nv44_sgdma_fill(pgt, NULL, pte, part);
345 pte += (part << 2);
346 cnt -= part;
347 }
348
349 while (cnt >= 4) {
350 nv_wo32(pgt, pte + 0x0, 0x00000000);
351 nv_wo32(pgt, pte + 0x4, 0x00000000);
352 nv_wo32(pgt, pte + 0x8, 0x00000000);
353 nv_wo32(pgt, pte + 0xc, 0x00000000);
354 pte += 0x10;
355 cnt -= 4;
356 }
357
358 if (cnt)
359 nv44_sgdma_fill(pgt, NULL, pte, cnt);
360
361 nv44_sgdma_flush(nvbe);
362 nvbe->bound = false;
363 return 0;
364}
365
366static struct ttm_backend_func nv44_sgdma_backend = {
367 .populate = nouveau_sgdma_populate,
368 .clear = nouveau_sgdma_clear,
369 .bind = nv44_sgdma_bind,
370 .unbind = nv44_sgdma_unbind,
371 .destroy = nouveau_sgdma_destroy
372};
373
167static int 374static int
168nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 375nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
169{ 376{
@@ -213,10 +420,7 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
213 420
214 nvbe->dev = dev; 421 nvbe->dev = dev;
215 422
216 if (dev_priv->card_type >= NV_50) 423 nvbe->backend.func = dev_priv->gart_info.func;
217 nvbe->backend.func = &nv50_sgdma_backend;
218 else
219 nvbe->backend.func = &nv04_sgdma_backend;
220 return &nvbe->backend; 424 return &nvbe->backend;
221} 425}
222 426
@@ -225,31 +429,71 @@ nouveau_sgdma_init(struct drm_device *dev)
225{ 429{
226 struct drm_nouveau_private *dev_priv = dev->dev_private; 430 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_gpuobj *gpuobj = NULL; 431 struct nouveau_gpuobj *gpuobj = NULL;
228 uint32_t aper_size, obj_size; 432 u32 aper_size, align;
229 int i, ret; 433 int ret;
434
435 if (dev_priv->card_type >= NV_50 ||
436 dev_priv->ramin_rsvd_vram >= 2 * 1024 * 1024)
437 aper_size = 512 * 1024 * 1024;
438 else
439 aper_size = 64 * 1024 * 1024;
440
441 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
442 * christmas. The cards before it have them, the cards after
443 * it have them, why is NV44 so unloved?
444 */
445 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
446 if (!dev_priv->gart_info.dummy.page)
447 return -ENOMEM;
448
449 dev_priv->gart_info.dummy.addr =
450 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
451 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
452 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
453 NV_ERROR(dev, "error mapping dummy page\n");
454 __free_page(dev_priv->gart_info.dummy.page);
455 dev_priv->gart_info.dummy.page = NULL;
456 return -ENOMEM;
457 }
230 458
231 if (dev_priv->card_type >= NV_50) { 459 if (dev_priv->card_type >= NV_50) {
232 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024, 460 ret = nouveau_vm_get(dev_priv->chan_vm, aper_size,
233 12, NV_MEM_ACCESS_RW, 461 12, NV_MEM_ACCESS_RW,
234 &dev_priv->gart_info.vma); 462 &dev_priv->gart_info.vma);
235 if (ret) 463 if (ret)
236 return ret; 464 return ret;
237 465
238 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset; 466 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
239 dev_priv->gart_info.aper_size = 512 * 1024 * 1024; 467 dev_priv->gart_info.aper_size = aper_size;
240 dev_priv->gart_info.type = NOUVEAU_GART_HW; 468 dev_priv->gart_info.type = NOUVEAU_GART_HW;
241 } else { 469 dev_priv->gart_info.func = &nv50_sgdma_backend;
242 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) 470 } else
243 aper_size = 64 * 1024 * 1024; 471 if (drm_pci_device_is_pcie(dev) &&
244 else 472 dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
245 aper_size = 512 * 1024 * 1024; 473 if (nv44_graph_class(dev)) {
474 dev_priv->gart_info.func = &nv44_sgdma_backend;
475 align = 512 * 1024;
476 } else {
477 dev_priv->gart_info.func = &nv41_sgdma_backend;
478 align = 16;
479 }
246 480
247 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 481 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
248 obj_size += 8; /* ctxdma header */ 482 NVOBJ_FLAG_ZERO_ALLOC |
483 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
484 if (ret) {
485 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
486 return ret;
487 }
249 488
250 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, 489 dev_priv->gart_info.sg_ctxdma = gpuobj;
251 NVOBJ_FLAG_ZERO_ALLOC | 490 dev_priv->gart_info.aper_base = 0;
252 NVOBJ_FLAG_ZERO_FREE, &gpuobj); 491 dev_priv->gart_info.aper_size = aper_size;
492 dev_priv->gart_info.type = NOUVEAU_GART_HW;
493 } else {
494 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
495 NVOBJ_FLAG_ZERO_ALLOC |
496 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
253 if (ret) { 497 if (ret) {
254 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); 498 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
255 return ret; 499 return ret;
@@ -261,13 +505,12 @@ nouveau_sgdma_init(struct drm_device *dev)
261 (0 << 14) /* RW */ | 505 (0 << 14) /* RW */ |
262 (2 << 16) /* PCI */); 506 (2 << 16) /* PCI */);
263 nv_wo32(gpuobj, 4, aper_size - 1); 507 nv_wo32(gpuobj, 4, aper_size - 1);
264 for (i = 2; i < 2 + (aper_size >> 12); i++)
265 nv_wo32(gpuobj, i * 4, 0x00000000);
266 508
267 dev_priv->gart_info.sg_ctxdma = gpuobj; 509 dev_priv->gart_info.sg_ctxdma = gpuobj;
268 dev_priv->gart_info.aper_base = 0; 510 dev_priv->gart_info.aper_base = 0;
269 dev_priv->gart_info.aper_size = aper_size; 511 dev_priv->gart_info.aper_size = aper_size;
270 dev_priv->gart_info.type = NOUVEAU_GART_PDMA; 512 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
513 dev_priv->gart_info.func = &nv04_sgdma_backend;
271 } 514 }
272 515
273 return 0; 516 return 0;
@@ -280,6 +523,13 @@ nouveau_sgdma_takedown(struct drm_device *dev)
280 523
281 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); 524 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
282 nouveau_vm_put(&dev_priv->gart_info.vma); 525 nouveau_vm_put(&dev_priv->gart_info.vma);
526
527 if (dev_priv->gart_info.dummy.page) {
528 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
529 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
530 __free_page(dev_priv->gart_info.dummy.page);
531 dev_priv->gart_info.dummy.page = NULL;
532 }
283} 533}
284 534
285uint32_t 535uint32_t
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index f3d9c0505f7b..f0ac2a768c67 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -24,6 +24,53 @@ nv40_fb_set_tile_region(struct drm_device *dev, int i)
24 } 24 }
25} 25}
26 26
27static void
28nv40_fb_init_gart(struct drm_device *dev)
29{
30 struct drm_nouveau_private *dev_priv = dev->dev_private;
31 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
32
33 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
34 nv_wr32(dev, 0x100800, 0x00000001);
35 return;
36 }
37
38 nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
39 nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
40 nv_wr32(dev, 0x100820, 0x00000000);
41}
42
43static void
44nv44_fb_init_gart(struct drm_device *dev)
45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
48 u32 vinst;
49
50 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
51 nv_wr32(dev, 0x100850, 0x80000000);
52 nv_wr32(dev, 0x100800, 0x00000001);
53 return;
54 }
55
56 /* calculate vram address of this PRAMIN block, object
57 * must be allocated on 512KiB alignment, and not exceed
58 * a total size of 512KiB for this to work correctly
59 */
60 vinst = nv_rd32(dev, 0x10020c);
61 vinst -= ((gart->pinst >> 19) + 1) << 19;
62
63 nv_wr32(dev, 0x100850, 0x80000000);
64 nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
65
66 nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
67 nv_wr32(dev, 0x100850, 0x00008000);
68 nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
69 nv_wr32(dev, 0x100820, 0x00000000);
70 nv_wr32(dev, 0x10082c, 0x00000001);
71 nv_wr32(dev, 0x100800, vinst | 0x00000010);
72}
73
27int 74int
28nv40_fb_init(struct drm_device *dev) 75nv40_fb_init(struct drm_device *dev)
29{ 76{
@@ -32,12 +79,12 @@ nv40_fb_init(struct drm_device *dev)
32 uint32_t tmp; 79 uint32_t tmp;
33 int i; 80 int i;
34 81
35 /* This is strictly a NV4x register (don't know about NV5x). */ 82 if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
36 /* The blob sets these to all kinds of values, and they mess up our setup. */ 83 if (nv44_graph_class(dev))
37 /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */ 84 nv44_fb_init_gart(dev);
38 /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */ 85 else
39 /* Any idea what this is? */ 86 nv40_fb_init_gart(dev);
40 nv_wr32(dev, NV40_PFB_UNK_800, 0x1); 87 }
41 88
42 switch (dev_priv->chipset) { 89 switch (dev_priv->chipset) {
43 case 0x40: 90 case 0x40: