aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c479
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c7
-rw-r--r--include/drm/nouveau_drm.h86
7 files changed, 208 insertions, 404 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index ceb83961b16f..6dfb425cbae9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -385,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
385 return ret; 385 return ret;
386 init->channel = chan->id; 386 init->channel = chan->id;
387 387
388 if (chan->dma.ib_max)
389 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
390 NOUVEAU_GEM_DOMAIN_GART;
391 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
392 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
393 else
394 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
395
388 init->subchan[0].handle = NvM2MF; 396 init->subchan[0].handle = NvM2MF;
389 if (dev_priv->card_type < NV_50) 397 if (dev_priv->card_type < NV_50)
390 init->subchan[0].grclass = 0x0039; 398 init->subchan[0].grclass = 0x0039;
@@ -424,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
424 ***********************************/ 432 ***********************************/
425 433
426struct drm_ioctl_desc nouveau_ioctls[] = { 434struct drm_ioctl_desc nouveau_ioctls[] = {
427 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
428 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 435 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
429 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 436 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
430 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 437 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -434,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
434 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 441 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
435 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
436 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
437 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
438 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
439 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
440 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 444 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
441 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 445 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 446 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
444}; 447};
445 448
446int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 449int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 679b03c28df1..c8482a108a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -179,7 +179,7 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
179 179
180void 180void
181nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 181nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
182 int delta, int dwords) 182 int delta, int length)
183{ 183{
184 struct nouveau_bo *pb = chan->pushbuf_bo; 184 struct nouveau_bo *pb = chan->pushbuf_bo;
185 uint64_t offset = bo->bo.offset + delta; 185 uint64_t offset = bo->bo.offset + delta;
@@ -187,7 +187,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
187 187
188 BUG_ON(chan->dma.ib_free < 1); 188 BUG_ON(chan->dma.ib_free < 1);
189 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 189 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | dwords << 10); 190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
191 191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; 192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index da6e16dafa4d..8b05c15866d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -32,7 +32,7 @@
32#endif 32#endif
33 33
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, 34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int dwords); 35 int delta, int length);
36 36
37/* 37/*
38 * There's a hw race condition where you can't jump to your PUT offset, 38 * There's a hw race condition where you can't jump to your PUT offset,
@@ -149,7 +149,7 @@ FIRE_RING(struct nouveau_channel *chan)
149 149
150 if (chan->dma.ib_max) { 150 if (chan->dma.ib_max) {
151 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2, 151 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
152 chan->dma.cur - chan->dma.put); 152 (chan->dma.cur - chan->dma.put) << 2);
153 } else { 153 } else {
154 WRITE_PUT(chan->dma.cur); 154 WRITE_PUT(chan->dma.cur);
155 } 155 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d221044e0793..a33423622860 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -34,7 +34,7 @@
34 34
35#define DRIVER_MAJOR 0 35#define DRIVER_MAJOR 0
36#define DRIVER_MINOR 0 36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 15 37#define DRIVER_PATCHLEVEL 16
38 38
39#define NOUVEAU_FAMILY 0x0000FFFF 39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000 40#define NOUVEAU_FLAGS 0xFFFF0000
@@ -83,6 +83,7 @@ struct nouveau_bo {
83 struct drm_file *reserved_by; 83 struct drm_file *reserved_by;
84 struct list_head entry; 84 struct list_head entry;
85 int pbbo_index; 85 int pbbo_index;
86 bool validate_mapped;
86 87
87 struct nouveau_channel *channel; 88 struct nouveau_channel *channel;
88 89
@@ -704,12 +705,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
704 uint32_t reg, uint32_t mask, uint32_t val); 705 uint32_t reg, uint32_t mask, uint32_t val);
705extern bool nouveau_wait_for_idle(struct drm_device *); 706extern bool nouveau_wait_for_idle(struct drm_device *);
706extern int nouveau_card_init(struct drm_device *); 707extern int nouveau_card_init(struct drm_device *);
707extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
708 struct drm_file *);
709extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
710 struct drm_file *);
711extern int nouveau_ioctl_resume(struct drm_device *, void *data,
712 struct drm_file *);
713 708
714/* nouveau_mem.c */ 709/* nouveau_mem.c */
715extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, 710extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -1160,16 +1155,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1160 struct drm_file *); 1155 struct drm_file *);
1161extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, 1156extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1162 struct drm_file *); 1157 struct drm_file *);
1163extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
1164 struct drm_file *);
1165extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
1166 struct drm_file *);
1167extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
1168 struct drm_file *);
1169extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
1170 struct drm_file *);
1171extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
1172 struct drm_file *);
1173extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, 1158extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1174 struct drm_file *); 1159 struct drm_file *);
1175extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, 1160extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index df72cd847025..fee959c72f40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -243,6 +243,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
243 nouveau_fence_unref((void *)&prev_fence); 243 nouveau_fence_unref((void *)&prev_fence);
244 } 244 }
245 245
246 if (unlikely(nvbo->validate_mapped)) {
247 ttm_bo_kunmap(&nvbo->kmap);
248 nvbo->validate_mapped = false;
249 }
250
246 list_del(&nvbo->entry); 251 list_del(&nvbo->entry);
247 nvbo->reserved_by = NULL; 252 nvbo->reserved_by = NULL;
248 ttm_bo_unreserve(&nvbo->bo); 253 ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +307,14 @@ retry:
302 if (ret == -EAGAIN) 307 if (ret == -EAGAIN)
303 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 308 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
304 drm_gem_object_unreference(gem); 309 drm_gem_object_unreference(gem);
305 if (ret) 310 if (ret) {
311 NV_ERROR(dev, "fail reserve\n");
306 return ret; 312 return ret;
313 }
307 goto retry; 314 goto retry;
308 } 315 }
309 316
317 b->user_priv = (uint64_t)(unsigned long)nvbo;
310 nvbo->reserved_by = file_priv; 318 nvbo->reserved_by = file_priv;
311 nvbo->pbbo_index = i; 319 nvbo->pbbo_index = i;
312 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 320 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +344,10 @@ retry:
336 } 344 }
337 345
338 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 346 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
339 if (ret) 347 if (ret) {
348 NV_ERROR(dev, "fail wait_cpu\n");
340 return ret; 349 return ret;
350 }
341 goto retry; 351 goto retry;
342 } 352 }
343 } 353 }
@@ -351,6 +361,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
351{ 361{
352 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 362 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
353 (void __force __user *)(uintptr_t)user_pbbo_ptr; 363 (void __force __user *)(uintptr_t)user_pbbo_ptr;
364 struct drm_device *dev = chan->dev;
354 struct nouveau_bo *nvbo; 365 struct nouveau_bo *nvbo;
355 int ret, relocs = 0; 366 int ret, relocs = 0;
356 367
@@ -362,39 +373,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
362 spin_lock(&nvbo->bo.lock); 373 spin_lock(&nvbo->bo.lock);
363 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 374 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
364 spin_unlock(&nvbo->bo.lock); 375 spin_unlock(&nvbo->bo.lock);
365 if (unlikely(ret)) 376 if (unlikely(ret)) {
377 NV_ERROR(dev, "fail wait other chan\n");
366 return ret; 378 return ret;
379 }
367 } 380 }
368 381
369 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 382 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
370 b->write_domains, 383 b->write_domains,
371 b->valid_domains); 384 b->valid_domains);
372 if (unlikely(ret)) 385 if (unlikely(ret)) {
386 NV_ERROR(dev, "fail set_domain\n");
373 return ret; 387 return ret;
388 }
374 389
375 nvbo->channel = chan; 390 nvbo->channel = chan;
376 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 391 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
377 false, false); 392 false, false);
378 nvbo->channel = NULL; 393 nvbo->channel = NULL;
379 if (unlikely(ret)) 394 if (unlikely(ret)) {
395 NV_ERROR(dev, "fail ttm_validate\n");
380 return ret; 396 return ret;
397 }
381 398
382 if (nvbo->bo.offset == b->presumed_offset && 399 if (nvbo->bo.offset == b->presumed.offset &&
383 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 400 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
384 b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || 401 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
385 (nvbo->bo.mem.mem_type == TTM_PL_TT && 402 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
386 b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) 403 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
387 continue; 404 continue;
388 405
389 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 406 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
390 b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; 407 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
391 else 408 else
392 b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; 409 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
393 b->presumed_offset = nvbo->bo.offset; 410 b->presumed.offset = nvbo->bo.offset;
394 b->presumed_ok = 0; 411 b->presumed.valid = 0;
395 relocs++; 412 relocs++;
396 413
397 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) 414 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
415 &b->presumed, sizeof(b->presumed)))
398 return -EFAULT; 416 return -EFAULT;
399 } 417 }
400 418
@@ -408,6 +426,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
408 uint64_t user_buffers, int nr_buffers, 426 uint64_t user_buffers, int nr_buffers,
409 struct validate_op *op, int *apply_relocs) 427 struct validate_op *op, int *apply_relocs)
410{ 428{
429 struct drm_device *dev = chan->dev;
411 int ret, relocs = 0; 430 int ret, relocs = 0;
412 431
413 INIT_LIST_HEAD(&op->vram_list); 432 INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +437,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
418 return 0; 437 return 0;
419 438
420 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 439 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
421 if (unlikely(ret)) 440 if (unlikely(ret)) {
441 NV_ERROR(dev, "validate_init\n");
422 return ret; 442 return ret;
443 }
423 444
424 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 445 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
425 if (unlikely(ret < 0)) { 446 if (unlikely(ret < 0)) {
447 NV_ERROR(dev, "validate vram_list\n");
426 validate_fini(op, NULL); 448 validate_fini(op, NULL);
427 return ret; 449 return ret;
428 } 450 }
@@ -430,6 +452,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
430 452
431 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 453 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
432 if (unlikely(ret < 0)) { 454 if (unlikely(ret < 0)) {
455 NV_ERROR(dev, "validate gart_list\n");
433 validate_fini(op, NULL); 456 validate_fini(op, NULL);
434 return ret; 457 return ret;
435 } 458 }
@@ -437,6 +460,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
437 460
438 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 461 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
439 if (unlikely(ret < 0)) { 462 if (unlikely(ret < 0)) {
463 NV_ERROR(dev, "validate both_list\n");
440 validate_fini(op, NULL); 464 validate_fini(op, NULL);
441 return ret; 465 return ret;
442 } 466 }
@@ -465,59 +489,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
465} 489}
466 490
467static int 491static int
468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 492nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
469 struct drm_nouveau_gem_pushbuf_bo *bo, 493 struct drm_nouveau_gem_pushbuf *req,
470 unsigned nr_relocs, uint64_t ptr_relocs, 494 struct drm_nouveau_gem_pushbuf_bo *bo)
471 unsigned nr_dwords, unsigned first_dword,
472 uint32_t *pushbuf, bool is_iomem)
473{ 495{
474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 496 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
475 struct drm_device *dev = chan->dev;
476 int ret = 0; 497 int ret = 0;
477 unsigned i; 498 unsigned i;
478 499
479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 500 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
480 if (IS_ERR(reloc)) 501 if (IS_ERR(reloc))
481 return PTR_ERR(reloc); 502 return PTR_ERR(reloc);
482 503
483 for (i = 0; i < nr_relocs; i++) { 504 for (i = 0; i < req->nr_relocs; i++) {
484 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 505 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
485 struct drm_nouveau_gem_pushbuf_bo *b; 506 struct drm_nouveau_gem_pushbuf_bo *b;
507 struct nouveau_bo *nvbo;
486 uint32_t data; 508 uint32_t data;
487 509
488 if (r->bo_index >= nr_bo || r->reloc_index < first_dword || 510 if (unlikely(r->bo_index > req->nr_buffers)) {
489 r->reloc_index >= first_dword + nr_dwords) { 511 NV_ERROR(dev, "reloc bo index invalid\n");
490 NV_ERROR(dev, "Bad relocation %d\n", i);
491 NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
492 NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
493 ret = -EINVAL; 512 ret = -EINVAL;
494 break; 513 break;
495 } 514 }
496 515
497 b = &bo[r->bo_index]; 516 b = &bo[r->bo_index];
498 if (b->presumed_ok) 517 if (b->presumed.valid)
499 continue; 518 continue;
500 519
520 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
521 NV_ERROR(dev, "reloc container bo index invalid\n");
522 ret = -EINVAL;
523 break;
524 }
525 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
526
527 if (unlikely(r->reloc_bo_offset + 4 >
528 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
529 NV_ERROR(dev, "reloc outside of bo\n");
530 ret = -EINVAL;
531 break;
532 }
533
534 if (!nvbo->kmap.virtual) {
535 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
536 &nvbo->kmap);
537 if (ret) {
538 NV_ERROR(dev, "failed kmap for reloc\n");
539 break;
540 }
541 nvbo->validate_mapped = true;
542 }
543
501 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 544 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
502 data = b->presumed_offset + r->data; 545 data = b->presumed.offset + r->data;
503 else 546 else
504 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 547 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
505 data = (b->presumed_offset + r->data) >> 32; 548 data = (b->presumed.offset + r->data) >> 32;
506 else 549 else
507 data = r->data; 550 data = r->data;
508 551
509 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 552 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
510 if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) 553 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
511 data |= r->tor; 554 data |= r->tor;
512 else 555 else
513 data |= r->vor; 556 data |= r->vor;
514 } 557 }
515 558
516 if (is_iomem) 559 spin_lock(&nvbo->bo.lock);
517 iowrite32_native(data, (void __force __iomem *) 560 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
518 &pushbuf[r->reloc_index]); 561 if (ret) {
519 else 562 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
520 pushbuf[r->reloc_index] = data; 563 break;
564 }
565 spin_unlock(&nvbo->bo.lock);
566
567 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
521 } 568 }
522 569
523 kfree(reloc); 570 kfree(reloc);
@@ -528,125 +575,50 @@ int
528nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 575nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
529 struct drm_file *file_priv) 576 struct drm_file *file_priv)
530{ 577{
578 struct drm_nouveau_private *dev_priv = dev->dev_private;
531 struct drm_nouveau_gem_pushbuf *req = data; 579 struct drm_nouveau_gem_pushbuf *req = data;
532 struct drm_nouveau_gem_pushbuf_bo *bo = NULL; 580 struct drm_nouveau_gem_pushbuf_push *push;
581 struct drm_nouveau_gem_pushbuf_bo *bo;
533 struct nouveau_channel *chan; 582 struct nouveau_channel *chan;
534 struct validate_op op; 583 struct validate_op op;
535 struct nouveau_fence* fence = 0; 584 struct nouveau_fence *fence = 0;
536 uint32_t *pushbuf = NULL; 585 int i, j, ret = 0, do_reloc = 0;
537 int ret = 0, do_reloc = 0, i;
538 586
539 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 587 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
540 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); 588 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
541 589
542 if (req->nr_dwords >= chan->dma.max || 590 req->vram_available = dev_priv->fb_aper_free;
543 req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || 591 req->gart_available = dev_priv->gart_info.aper_free;
544 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { 592 if (unlikely(req->nr_push == 0))
545 NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); 593 goto out_next;
546 NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
547 chan->dma.max - 1);
548 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
549 NOUVEAU_GEM_MAX_BUFFERS);
550 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
551 NOUVEAU_GEM_MAX_RELOCS);
552 return -EINVAL;
553 }
554
555 pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
556 if (IS_ERR(pushbuf))
557 return PTR_ERR(pushbuf);
558
559 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
560 if (IS_ERR(bo)) {
561 kfree(pushbuf);
562 return PTR_ERR(bo);
563 }
564
565 mutex_lock(&dev->struct_mutex);
566
567 /* Validate buffer list */
568 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
569 req->nr_buffers, &op, &do_reloc);
570 if (ret)
571 goto out;
572
573 /* Apply any relocations that are required */
574 if (do_reloc) {
575 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
576 bo, req->nr_relocs,
577 req->relocs,
578 req->nr_dwords, 0,
579 pushbuf, false);
580 if (ret)
581 goto out;
582 }
583
584 /* Emit push buffer to the hw
585 */
586 ret = RING_SPACE(chan, req->nr_dwords);
587 if (ret)
588 goto out;
589
590 OUT_RINGp(chan, pushbuf, req->nr_dwords);
591 594
592 ret = nouveau_fence_new(chan, &fence, true); 595 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
593 if (ret) { 596 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
594 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 597 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
595 WIND_RING(chan); 598 return -EINVAL;
596 goto out;
597 } 599 }
598 600
599 if (nouveau_gem_pushbuf_sync(chan)) { 601 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
600 ret = nouveau_fence_wait(fence, NULL, false, false); 602 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
601 if (ret) { 603 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
602 for (i = 0; i < req->nr_dwords; i++) 604 return -EINVAL;
603 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
604 NV_ERROR(dev, "^^ above push buffer is fail :(\n");
605 }
606 } 605 }
607 606
608out: 607 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
609 validate_fini(&op, fence); 608 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
610 nouveau_fence_unref((void**)&fence); 609 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
611 mutex_unlock(&dev->struct_mutex);
612 kfree(pushbuf);
613 kfree(bo);
614 return ret;
615}
616
617int
618nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
619 struct drm_file *file_priv)
620{
621 struct drm_nouveau_private *dev_priv = dev->dev_private;
622 struct drm_nouveau_gem_pushbuf_call *req = data;
623 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
624 struct nouveau_channel *chan;
625 struct drm_gem_object *gem;
626 struct nouveau_bo *pbbo;
627 struct validate_op op;
628 struct nouveau_fence* fence = 0;
629 int i, ret = 0, do_reloc = 0;
630
631 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
632 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
633
634 if (unlikely(req->handle == 0))
635 goto out_next;
636
637 if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
638 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
639 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
640 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
641 NOUVEAU_GEM_MAX_BUFFERS);
642 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
643 NOUVEAU_GEM_MAX_RELOCS);
644 return -EINVAL; 610 return -EINVAL;
645 } 611 }
646 612
613 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
614 if (IS_ERR(push))
615 return PTR_ERR(push);
616
647 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 617 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
648 if (IS_ERR(bo)) 618 if (IS_ERR(bo)) {
619 kfree(push);
649 return PTR_ERR(bo); 620 return PTR_ERR(bo);
621 }
650 622
651 mutex_lock(&dev->struct_mutex); 623 mutex_lock(&dev->struct_mutex);
652 624
@@ -658,94 +630,9 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
658 goto out; 630 goto out;
659 } 631 }
660 632
661 /* Validate DMA push buffer */
662 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
663 if (!gem) {
664 NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
665 ret = -EINVAL;
666 goto out;
667 }
668 pbbo = nouveau_gem_object(gem);
669
670 if ((req->offset & 3) || req->nr_dwords < 2 ||
671 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
672 (unsigned long)req->nr_dwords >
673 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
674 NV_ERROR(dev, "pb call misaligned or out of bounds: "
675 "%d + %d * 4 > %ld\n",
676 req->offset, req->nr_dwords, pbbo->bo.mem.size);
677 ret = -EINVAL;
678 drm_gem_object_unreference(gem);
679 goto out;
680 }
681
682 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
683 chan->fence.sequence);
684 if (ret) {
685 NV_ERROR(dev, "resv pb: %d\n", ret);
686 drm_gem_object_unreference(gem);
687 goto out;
688 }
689
690 nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
691 ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
692 if (ret) {
693 NV_ERROR(dev, "validate pb: %d\n", ret);
694 ttm_bo_unreserve(&pbbo->bo);
695 drm_gem_object_unreference(gem);
696 goto out;
697 }
698
699 list_add_tail(&pbbo->entry, &op.both_list);
700
701 /* If presumed return address doesn't match, we need to map the
702 * push buffer and fix it..
703 */
704 if (dev_priv->card_type < NV_20) {
705 uint32_t retaddy;
706
707 if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
708 ret = nouveau_dma_wait(chan, 0, 4 + NOUVEAU_DMA_SKIPS);
709 if (ret) {
710 NV_ERROR(dev, "jmp_space: %d\n", ret);
711 goto out;
712 }
713 }
714
715 retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
716 retaddy |= 0x20000000;
717 if (retaddy != req->suffix0) {
718 req->suffix0 = retaddy;
719 do_reloc = 1;
720 }
721 }
722
723 /* Apply any relocations that are required */ 633 /* Apply any relocations that are required */
724 if (do_reloc) { 634 if (do_reloc) {
725 void *pbvirt; 635 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
726 bool is_iomem;
727 ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
728 &pbbo->kmap);
729 if (ret) {
730 NV_ERROR(dev, "kmap pb: %d\n", ret);
731 goto out;
732 }
733
734 pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
735 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
736 req->nr_relocs,
737 req->relocs,
738 req->nr_dwords,
739 req->offset / 4,
740 pbvirt, is_iomem);
741
742 if (dev_priv->card_type < NV_20) {
743 nouveau_bo_wr32(pbbo,
744 req->offset / 4 + req->nr_dwords - 2,
745 req->suffix0);
746 }
747
748 ttm_bo_kunmap(&pbbo->kmap);
749 if (ret) { 636 if (ret) {
750 NV_ERROR(dev, "reloc apply: %d\n", ret); 637 NV_ERROR(dev, "reloc apply: %d\n", ret);
751 goto out; 638 goto out;
@@ -753,36 +640,74 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
753 } 640 }
754 641
755 if (chan->dma.ib_max) { 642 if (chan->dma.ib_max) {
756 ret = nouveau_dma_wait(chan, 2, 6); 643 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
757 if (ret) { 644 if (ret) {
758 NV_INFO(dev, "nv50cal_space: %d\n", ret); 645 NV_INFO(dev, "nv50cal_space: %d\n", ret);
759 goto out; 646 goto out;
760 } 647 }
761 648
762 nv50_dma_push(chan, pbbo, req->offset, req->nr_dwords); 649 for (i = 0; i < req->nr_push; i++) {
650 struct nouveau_bo *nvbo = (void *)(unsigned long)
651 bo[push[i].bo_index].user_priv;
652
653 nv50_dma_push(chan, nvbo, push[i].offset,
654 push[i].length);
655 }
763 } else 656 } else
764 if (dev_priv->card_type >= NV_20) { 657 if (dev_priv->card_type >= NV_20) {
765 ret = RING_SPACE(chan, 2); 658 ret = RING_SPACE(chan, req->nr_push * 2);
766 if (ret) { 659 if (ret) {
767 NV_ERROR(dev, "cal_space: %d\n", ret); 660 NV_ERROR(dev, "cal_space: %d\n", ret);
768 goto out; 661 goto out;
769 } 662 }
770 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + 663
771 req->offset) | 2); 664 for (i = 0; i < req->nr_push; i++) {
772 OUT_RING(chan, 0); 665 struct nouveau_bo *nvbo = (void *)(unsigned long)
666 bo[push[i].bo_index].user_priv;
667 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
668
669 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
670 push[i].offset) | 2);
671 OUT_RING(chan, 0);
672 }
773 } else { 673 } else {
774 ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); 674 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
775 if (ret) { 675 if (ret) {
776 NV_ERROR(dev, "jmp_space: %d\n", ret); 676 NV_ERROR(dev, "jmp_space: %d\n", ret);
777 goto out; 677 goto out;
778 } 678 }
779 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
780 req->offset) | 0x20000000);
781 OUT_RING(chan, 0);
782 679
783 /* Space the jumps apart with NOPs. */ 680 for (i = 0; i < req->nr_push; i++) {
784 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 681 struct nouveau_bo *nvbo = (void *)(unsigned long)
682 bo[push[i].bo_index].user_priv;
683 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
684 uint32_t cmd;
685
686 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
687 cmd |= 0x20000000;
688 if (unlikely(cmd != req->suffix0)) {
689 if (!nvbo->kmap.virtual) {
690 ret = ttm_bo_kmap(&nvbo->bo, 0,
691 nvbo->bo.mem.
692 num_pages,
693 &nvbo->kmap);
694 if (ret) {
695 WIND_RING(chan);
696 goto out;
697 }
698 nvbo->validate_mapped = true;
699 }
700
701 nouveau_bo_wr32(nvbo, (push[i].offset +
702 push[i].length - 8) / 4, cmd);
703 }
704
705 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
706 push[i].offset) | 0x20000000);
785 OUT_RING(chan, 0); 707 OUT_RING(chan, 0);
708 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
709 OUT_RING(chan, 0);
710 }
786 } 711 }
787 712
788 ret = nouveau_fence_new(chan, &fence, true); 713 ret = nouveau_fence_new(chan, &fence, true);
@@ -797,6 +722,7 @@ out:
797 nouveau_fence_unref((void**)&fence); 722 nouveau_fence_unref((void**)&fence);
798 mutex_unlock(&dev->struct_mutex); 723 mutex_unlock(&dev->struct_mutex);
799 kfree(bo); 724 kfree(bo);
725 kfree(push);
800 726
801out_next: 727out_next:
802 if (chan->dma.ib_max) { 728 if (chan->dma.ib_max) {
@@ -815,19 +741,6 @@ out_next:
815 return ret; 741 return ret;
816} 742}
817 743
818int
819nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
820 struct drm_file *file_priv)
821{
822 struct drm_nouveau_private *dev_priv = dev->dev_private;
823 struct drm_nouveau_gem_pushbuf_call *req = data;
824
825 req->vram_available = dev_priv->fb_aper_free;
826 req->gart_available = dev_priv->gart_info.aper_free;
827
828 return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
829}
830
831static inline uint32_t 744static inline uint32_t
832domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) 745domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
833{ 746{
@@ -842,74 +755,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
842} 755}
843 756
844int 757int
845nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
846 struct drm_file *file_priv)
847{
848 struct drm_nouveau_gem_pin *req = data;
849 struct drm_gem_object *gem;
850 struct nouveau_bo *nvbo;
851 int ret = 0;
852
853 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
854
855 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
856 NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
857 return -EINVAL;
858 }
859
860 if (!DRM_SUSER(DRM_CURPROC))
861 return -EPERM;
862
863 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
864 if (!gem)
865 return -EINVAL;
866 nvbo = nouveau_gem_object(gem);
867
868 ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
869 if (ret)
870 goto out;
871
872 req->offset = nvbo->bo.offset;
873 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
874 req->domain = NOUVEAU_GEM_DOMAIN_GART;
875 else
876 req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
877
878out:
879 mutex_lock(&dev->struct_mutex);
880 drm_gem_object_unreference(gem);
881 mutex_unlock(&dev->struct_mutex);
882
883 return ret;
884}
885
886int
887nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
888 struct drm_file *file_priv)
889{
890 struct drm_nouveau_gem_pin *req = data;
891 struct drm_gem_object *gem;
892 int ret;
893
894 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
895
896 if (drm_core_check_feature(dev, DRIVER_MODESET))
897 return -EINVAL;
898
899 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
900 if (!gem)
901 return -EINVAL;
902
903 ret = nouveau_bo_unpin(nouveau_gem_object(gem));
904
905 mutex_lock(&dev->struct_mutex);
906 drm_gem_object_unreference(gem);
907 mutex_unlock(&dev->struct_mutex);
908
909 return ret;
910}
911
912int
913nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 758nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
914 struct drm_file *file_priv) 759 struct drm_file *file_priv)
915{ 760{
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index ed5ac0b9a0ac..516a8d36cb10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -777,13 +777,6 @@ int nouveau_unload(struct drm_device *dev)
777 return 0; 777 return 0;
778} 778}
779 779
780int
781nouveau_ioctl_card_init(struct drm_device *dev, void *data,
782 struct drm_file *file_priv)
783{
784 return nouveau_card_init(dev);
785}
786
787int nouveau_ioctl_getparam(struct drm_device *dev, void *data, 780int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
788 struct drm_file *file_priv) 781 struct drm_file *file_priv)
789{ 782{
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index f745948b61e4..a6a9f4af5ebd 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -25,13 +25,14 @@
25#ifndef __NOUVEAU_DRM_H__ 25#ifndef __NOUVEAU_DRM_H__
26#define __NOUVEAU_DRM_H__ 26#define __NOUVEAU_DRM_H__
27 27
28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15 28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
29 29
30struct drm_nouveau_channel_alloc { 30struct drm_nouveau_channel_alloc {
31 uint32_t fb_ctxdma_handle; 31 uint32_t fb_ctxdma_handle;
32 uint32_t tt_ctxdma_handle; 32 uint32_t tt_ctxdma_handle;
33 33
34 int channel; 34 int channel;
35 uint32_t pushbuf_domains;
35 36
36 /* Notifier memory */ 37 /* Notifier memory */
37 uint32_t notifier_handle; 38 uint32_t notifier_handle;
@@ -109,68 +110,58 @@ struct drm_nouveau_gem_new {
109 uint32_t align; 110 uint32_t align;
110}; 111};
111 112
113#define NOUVEAU_GEM_MAX_BUFFERS 1024
114struct drm_nouveau_gem_pushbuf_bo_presumed {
115 uint32_t valid;
116 uint32_t domain;
117 uint64_t offset;
118};
119
112struct drm_nouveau_gem_pushbuf_bo { 120struct drm_nouveau_gem_pushbuf_bo {
113 uint64_t user_priv; 121 uint64_t user_priv;
114 uint32_t handle; 122 uint32_t handle;
115 uint32_t read_domains; 123 uint32_t read_domains;
116 uint32_t write_domains; 124 uint32_t write_domains;
117 uint32_t valid_domains; 125 uint32_t valid_domains;
118 uint32_t presumed_ok; 126 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
119 uint32_t presumed_domain;
120 uint64_t presumed_offset;
121}; 127};
122 128
123#define NOUVEAU_GEM_RELOC_LOW (1 << 0) 129#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
124#define NOUVEAU_GEM_RELOC_HIGH (1 << 1) 130#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
125#define NOUVEAU_GEM_RELOC_OR (1 << 2) 131#define NOUVEAU_GEM_RELOC_OR (1 << 2)
132#define NOUVEAU_GEM_MAX_RELOCS 1024
126struct drm_nouveau_gem_pushbuf_reloc { 133struct drm_nouveau_gem_pushbuf_reloc {
134 uint32_t reloc_bo_index;
135 uint32_t reloc_bo_offset;
127 uint32_t bo_index; 136 uint32_t bo_index;
128 uint32_t reloc_index;
129 uint32_t flags; 137 uint32_t flags;
130 uint32_t data; 138 uint32_t data;
131 uint32_t vor; 139 uint32_t vor;
132 uint32_t tor; 140 uint32_t tor;
133}; 141};
134 142
135#define NOUVEAU_GEM_MAX_BUFFERS 1024 143#define NOUVEAU_GEM_MAX_PUSH 512
136#define NOUVEAU_GEM_MAX_RELOCS 1024 144struct drm_nouveau_gem_pushbuf_push {
145 uint32_t bo_index;
146 uint32_t pad;
147 uint64_t offset;
148 uint64_t length;
149};
137 150
138struct drm_nouveau_gem_pushbuf { 151struct drm_nouveau_gem_pushbuf {
139 uint32_t channel; 152 uint32_t channel;
140 uint32_t nr_dwords;
141 uint32_t nr_buffers; 153 uint32_t nr_buffers;
142 uint32_t nr_relocs;
143 uint64_t dwords;
144 uint64_t buffers; 154 uint64_t buffers;
145 uint64_t relocs;
146};
147
148struct drm_nouveau_gem_pushbuf_call {
149 uint32_t channel;
150 uint32_t handle;
151 uint32_t offset;
152 uint32_t nr_buffers;
153 uint32_t nr_relocs; 155 uint32_t nr_relocs;
154 uint32_t nr_dwords; 156 uint32_t nr_push;
155 uint64_t buffers;
156 uint64_t relocs; 157 uint64_t relocs;
158 uint64_t push;
157 uint32_t suffix0; 159 uint32_t suffix0;
158 uint32_t suffix1; 160 uint32_t suffix1;
159 /* below only accessed for CALL2 */
160 uint64_t vram_available; 161 uint64_t vram_available;
161 uint64_t gart_available; 162 uint64_t gart_available;
162}; 163};
163 164
164struct drm_nouveau_gem_pin {
165 uint32_t handle;
166 uint32_t domain;
167 uint64_t offset;
168};
169
170struct drm_nouveau_gem_unpin {
171 uint32_t handle;
172};
173
174#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 165#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
175#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 166#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
176#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 167#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
@@ -183,14 +174,6 @@ struct drm_nouveau_gem_cpu_fini {
183 uint32_t handle; 174 uint32_t handle;
184}; 175};
185 176
186struct drm_nouveau_gem_tile {
187 uint32_t handle;
188 uint32_t offset;
189 uint32_t size;
190 uint32_t tile_mode;
191 uint32_t tile_flags;
192};
193
194enum nouveau_bus_type { 177enum nouveau_bus_type {
195 NV_AGP = 0, 178 NV_AGP = 0,
196 NV_PCI = 1, 179 NV_PCI = 1,
@@ -200,22 +183,17 @@ enum nouveau_bus_type {
200struct drm_nouveau_sarea { 183struct drm_nouveau_sarea {
201}; 184};
202 185
203#define DRM_NOUVEAU_CARD_INIT 0x00 186#define DRM_NOUVEAU_GETPARAM 0x00
204#define DRM_NOUVEAU_GETPARAM 0x01 187#define DRM_NOUVEAU_SETPARAM 0x01
205#define DRM_NOUVEAU_SETPARAM 0x02 188#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
206#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 189#define DRM_NOUVEAU_CHANNEL_FREE 0x03
207#define DRM_NOUVEAU_CHANNEL_FREE 0x04 190#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
208#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 191#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
209#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 192#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
210#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
211#define DRM_NOUVEAU_GEM_NEW 0x40 193#define DRM_NOUVEAU_GEM_NEW 0x40
212#define DRM_NOUVEAU_GEM_PUSHBUF 0x41 194#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
213#define DRM_NOUVEAU_GEM_PUSHBUF_CALL 0x42 195#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
214#define DRM_NOUVEAU_GEM_PIN 0x43 /* !KMS only */ 196#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
215#define DRM_NOUVEAU_GEM_UNPIN 0x44 /* !KMS only */ 197#define DRM_NOUVEAU_GEM_INFO 0x44
216#define DRM_NOUVEAU_GEM_CPU_PREP 0x45
217#define DRM_NOUVEAU_GEM_CPU_FINI 0x46
218#define DRM_NOUVEAU_GEM_INFO 0x47
219#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48
220 198
221#endif /* __NOUVEAU_DRM_H__ */ 199#endif /* __NOUVEAU_DRM_H__ */