diff options
| -rw-r--r-- | drivers/gpu/drm/nouveau/core/engine/device/nvc0.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/core/subdev/pwr/base.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/core/subdev/therm/base.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_abi16.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 16 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_chan.c | 11 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_display.c | 78 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dma.h | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drm.c | 27 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drm.h | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/nouveau/nv04_fbcon.c | 6 |
16 files changed, 105 insertions, 86 deletions
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 606598f226fc..8d06eef2b9ee 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c | |||
| @@ -256,7 +256,7 @@ nvc0_identify(struct nouveau_device *device) | |||
| 256 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; | 256 | device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; |
| 257 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 257 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
| 258 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; | 258 | device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; |
| 259 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 259 | device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; |
| 260 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 260 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
| 261 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 261 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 262 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 262 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index e21453a94971..9ac94d4e5646 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c | |||
| @@ -494,13 +494,6 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit) | |||
| 494 | u32 mthd = (addr & 0x00003ffc); | 494 | u32 mthd = (addr & 0x00003ffc); |
| 495 | u32 show = stat; | 495 | u32 show = stat; |
| 496 | 496 | ||
| 497 | if (stat & 0x00200000) { | ||
| 498 | if (mthd == 0x0054) { | ||
| 499 | if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000)) | ||
| 500 | show &= ~0x00200000; | ||
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | if (stat & 0x00800000) { | 497 | if (stat & 0x00800000) { |
| 505 | if (!nvc0_fifo_swmthd(priv, chid, mthd, data)) | 498 | if (!nvc0_fifo_swmthd(priv, chid, mthd, data)) |
| 506 | show &= ~0x00800000; | 499 | show &= ~0x00800000; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index fcd449e5aba7..04f412922d2d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | |||
| @@ -481,13 +481,6 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit) | |||
| 481 | u32 mthd = (addr & 0x00003ffc); | 481 | u32 mthd = (addr & 0x00003ffc); |
| 482 | u32 show = stat; | 482 | u32 show = stat; |
| 483 | 483 | ||
| 484 | if (stat & 0x00200000) { | ||
| 485 | if (mthd == 0x0054) { | ||
| 486 | if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000)) | ||
| 487 | show &= ~0x00200000; | ||
| 488 | } | ||
| 489 | } | ||
| 490 | |||
| 491 | if (stat & 0x00800000) { | 484 | if (stat & 0x00800000) { |
| 492 | if (!nve0_fifo_swmthd(priv, chid, mthd, data)) | 485 | if (!nve0_fifo_swmthd(priv, chid, mthd, data)) |
| 493 | show &= ~0x00800000; | 486 | show &= ~0x00800000; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c index 64dca260912f..fe67415c3e17 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c | |||
| @@ -1039,7 +1039,7 @@ nvc0_grctx_generate_r406800(struct nvc0_graph_priv *priv) | |||
| 1039 | } while (!tpcnr[gpc]); | 1039 | } while (!tpcnr[gpc]); |
| 1040 | tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; | 1040 | tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; |
| 1041 | 1041 | ||
| 1042 | tpc_set |= 1 << ((gpc * 8) + tpc); | 1042 | tpc_set |= 1ULL << ((gpc * 8) + tpc); |
| 1043 | } | 1043 | } |
| 1044 | 1044 | ||
| 1045 | nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set)); | 1045 | nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set)); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c index 9908f1f05a00..d4fd3bc9c66f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c | |||
| @@ -32,6 +32,11 @@ nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2], | |||
| 32 | struct nouveau_subdev *subdev = nv_subdev(ppwr); | 32 | struct nouveau_subdev *subdev = nv_subdev(ppwr); |
| 33 | u32 addr; | 33 | u32 addr; |
| 34 | 34 | ||
| 35 | /* wait for a free slot in the fifo */ | ||
| 36 | addr = nv_rd32(ppwr, 0x10a4a0); | ||
| 37 | if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8)) | ||
| 38 | return -EBUSY; | ||
| 39 | |||
| 35 | /* we currently only support a single process at a time waiting | 40 | /* we currently only support a single process at a time waiting |
| 36 | * on a synchronous reply, take the PPWR mutex and tell the | 41 | * on a synchronous reply, take the PPWR mutex and tell the |
| 37 | * receive handler what we're waiting for | 42 | * receive handler what we're waiting for |
| @@ -42,11 +47,6 @@ nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2], | |||
| 42 | ppwr->recv.process = process; | 47 | ppwr->recv.process = process; |
| 43 | } | 48 | } |
| 44 | 49 | ||
| 45 | /* wait for a free slot in the fifo */ | ||
| 46 | addr = nv_rd32(ppwr, 0x10a4a0); | ||
| 47 | if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8)) | ||
| 48 | return -EBUSY; | ||
| 49 | |||
| 50 | /* acquire data segment access */ | 50 | /* acquire data segment access */ |
| 51 | do { | 51 | do { |
| 52 | nv_wr32(ppwr, 0x10a580, 0x00000001); | 52 | nv_wr32(ppwr, 0x10a580, 0x00000001); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index 21b2b3021fad..80e584a1bd1c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c | |||
| @@ -117,7 +117,8 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode) | |||
| 117 | priv->fan->bios.linear_max_temp) { | 117 | priv->fan->bios.linear_max_temp) { |
| 118 | duty = nouveau_therm_update_linear(therm); | 118 | duty = nouveau_therm_update_linear(therm); |
| 119 | } else { | 119 | } else { |
| 120 | duty = priv->cstate; | 120 | if (priv->cstate) |
| 121 | duty = priv->cstate; | ||
| 121 | poll = false; | 122 | poll = false; |
| 122 | } | 123 | } |
| 123 | immd = false; | 124 | immd = false; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 3621e7f23477..6828d81ed7b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
| @@ -298,7 +298,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
| 298 | else | 298 | else |
| 299 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | 299 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; |
| 300 | 300 | ||
| 301 | if (device->card_type < NV_C0) { | 301 | if (device->card_type < NV_10) { |
| 302 | init->subchan[0].handle = 0x00000000; | 302 | init->subchan[0].handle = 0x00000000; |
| 303 | init->subchan[0].grclass = 0x0000; | 303 | init->subchan[0].grclass = 0x0000; |
| 304 | init->subchan[1].handle = NvSw; | 304 | init->subchan[1].handle = NvSw; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 949ab0cbc4ab..c0fde6b9393c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -98,12 +98,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, | |||
| 98 | 98 | ||
| 99 | if (tile) { | 99 | if (tile) { |
| 100 | spin_lock(&drm->tile.lock); | 100 | spin_lock(&drm->tile.lock); |
| 101 | if (fence) { | 101 | tile->fence = nouveau_fence_ref(fence); |
| 102 | /* Mark it as pending. */ | ||
| 103 | tile->fence = fence; | ||
| 104 | nouveau_fence_ref(fence); | ||
| 105 | } | ||
| 106 | |||
| 107 | tile->used = false; | 102 | tile->used = false; |
| 108 | spin_unlock(&drm->tile.lock); | 103 | spin_unlock(&drm->tile.lock); |
| 109 | } | 104 | } |
| @@ -1462,14 +1457,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
| 1462 | void | 1457 | void |
| 1463 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | 1458 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) |
| 1464 | { | 1459 | { |
| 1460 | struct nouveau_fence *new_fence = nouveau_fence_ref(fence); | ||
| 1465 | struct nouveau_fence *old_fence = NULL; | 1461 | struct nouveau_fence *old_fence = NULL; |
| 1466 | 1462 | ||
| 1467 | if (likely(fence)) | ||
| 1468 | nouveau_fence_ref(fence); | ||
| 1469 | |||
| 1470 | spin_lock(&nvbo->bo.bdev->fence_lock); | 1463 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 1471 | old_fence = nvbo->bo.sync_obj; | 1464 | old_fence = nvbo->bo.sync_obj; |
| 1472 | nvbo->bo.sync_obj = fence; | 1465 | nvbo->bo.sync_obj = new_fence; |
| 1473 | spin_unlock(&nvbo->bo.bdev->fence_lock); | 1466 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 1474 | 1467 | ||
| 1475 | nouveau_fence_unref(&old_fence); | 1468 | nouveau_fence_unref(&old_fence); |
| @@ -1552,7 +1545,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | |||
| 1552 | 1545 | ||
| 1553 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | 1546 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) |
| 1554 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); | 1547 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); |
| 1555 | else if (nvbo->bo.mem.mem_type == TTM_PL_TT) { | 1548 | else if (nvbo->bo.mem.mem_type == TTM_PL_TT && |
| 1549 | nvbo->page_shift == vma->vm->vmm->spg_shift) { | ||
| 1556 | if (node->sg) | 1550 | if (node->sg) |
| 1557 | nouveau_vm_map_sg_table(vma, 0, size, node); | 1551 | nouveau_vm_map_sg_table(vma, 0, size, node); |
| 1558 | else | 1552 | else |
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index e84f4c32331b..cc5152be2cf1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c | |||
| @@ -346,22 +346,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
| 346 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | 346 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) |
| 347 | OUT_RING(chan, 0x00000000); | 347 | OUT_RING(chan, 0x00000000); |
| 348 | 348 | ||
| 349 | /* allocate software object class (used for fences on <= nv05, and | 349 | /* allocate software object class (used for fences on <= nv05) */ |
| 350 | * to signal flip completion), bind it to a subchannel. | 350 | if (device->card_type < NV_10) { |
| 351 | */ | ||
| 352 | if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) { | ||
| 353 | ret = nouveau_object_new(nv_object(client), chan->handle, | 351 | ret = nouveau_object_new(nv_object(client), chan->handle, |
| 354 | NvSw, nouveau_abi16_swclass(chan->drm), | 352 | NvSw, 0x006e, NULL, 0, &object); |
| 355 | NULL, 0, &object); | ||
| 356 | if (ret) | 353 | if (ret) |
| 357 | return ret; | 354 | return ret; |
| 358 | 355 | ||
| 359 | swch = (void *)object->parent; | 356 | swch = (void *)object->parent; |
| 360 | swch->flip = nouveau_flip_complete; | 357 | swch->flip = nouveau_flip_complete; |
| 361 | swch->flip_data = chan; | 358 | swch->flip_data = chan; |
| 362 | } | ||
| 363 | 359 | ||
| 364 | if (device->card_type < NV_C0) { | ||
| 365 | ret = RING_SPACE(chan, 2); | 360 | ret = RING_SPACE(chan, 2); |
| 366 | if (ret) | 361 | if (ret) |
| 367 | return ret; | 362 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 44642d9094e6..7809d92183c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | 26 | ||
| 27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
| 28 | #include <drm/drm_crtc_helper.h> | 28 | #include <drm/drm_crtc_helper.h> |
| 29 | #include <drm/ttm/ttm_execbuf_util.h> | ||
| 30 | 29 | ||
| 31 | #include "nouveau_fbcon.h" | 30 | #include "nouveau_fbcon.h" |
| 32 | #include "dispnv04/hw.h" | 31 | #include "dispnv04/hw.h" |
| @@ -399,6 +398,11 @@ nouveau_display_create(struct drm_device *dev) | |||
| 399 | dev->mode_config.preferred_depth = 24; | 398 | dev->mode_config.preferred_depth = 24; |
| 400 | dev->mode_config.prefer_shadow = 1; | 399 | dev->mode_config.prefer_shadow = 1; |
| 401 | 400 | ||
| 401 | if (nv_device(drm->device)->chipset < 0x11) | ||
| 402 | dev->mode_config.async_page_flip = false; | ||
| 403 | else | ||
| 404 | dev->mode_config.async_page_flip = true; | ||
| 405 | |||
| 402 | drm_kms_helper_poll_init(dev); | 406 | drm_kms_helper_poll_init(dev); |
| 403 | drm_kms_helper_poll_disable(dev); | 407 | drm_kms_helper_poll_disable(dev); |
| 404 | 408 | ||
| @@ -555,19 +559,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan, | |||
| 555 | goto fail; | 559 | goto fail; |
| 556 | 560 | ||
| 557 | /* Emit the pageflip */ | 561 | /* Emit the pageflip */ |
| 558 | ret = RING_SPACE(chan, 3); | 562 | ret = RING_SPACE(chan, 2); |
| 559 | if (ret) | 563 | if (ret) |
| 560 | goto fail; | 564 | goto fail; |
| 561 | 565 | ||
| 562 | if (nv_device(drm->device)->card_type < NV_C0) { | 566 | if (nv_device(drm->device)->card_type < NV_C0) |
| 563 | BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); | 567 | BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); |
| 564 | OUT_RING (chan, 0x00000000); | 568 | else |
| 565 | OUT_RING (chan, 0x00000000); | 569 | BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1); |
| 566 | } else { | 570 | OUT_RING (chan, 0x00000000); |
| 567 | BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1); | ||
| 568 | OUT_RING (chan, 0); | ||
| 569 | BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000); | ||
| 570 | } | ||
| 571 | FIRE_RING (chan); | 571 | FIRE_RING (chan); |
| 572 | 572 | ||
| 573 | ret = nouveau_fence_new(chan, false, pfence); | 573 | ret = nouveau_fence_new(chan, false, pfence); |
| @@ -584,22 +584,16 @@ fail: | |||
| 584 | 584 | ||
| 585 | int | 585 | int |
| 586 | nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 586 | nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
| 587 | struct drm_pending_vblank_event *event, | 587 | struct drm_pending_vblank_event *event, u32 flags) |
| 588 | uint32_t page_flip_flags) | ||
| 589 | { | 588 | { |
| 589 | const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1; | ||
| 590 | struct drm_device *dev = crtc->dev; | 590 | struct drm_device *dev = crtc->dev; |
| 591 | struct nouveau_drm *drm = nouveau_drm(dev); | 591 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 592 | struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; | 592 | struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; |
| 593 | struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; | 593 | struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; |
| 594 | struct nouveau_page_flip_state *s; | 594 | struct nouveau_page_flip_state *s; |
| 595 | struct nouveau_channel *chan = NULL; | 595 | struct nouveau_channel *chan = drm->channel; |
| 596 | struct nouveau_fence *fence; | 596 | struct nouveau_fence *fence; |
| 597 | struct ttm_validate_buffer resv[2] = { | ||
| 598 | { .bo = &old_bo->bo }, | ||
| 599 | { .bo = &new_bo->bo }, | ||
| 600 | }; | ||
| 601 | struct ww_acquire_ctx ticket; | ||
| 602 | LIST_HEAD(res); | ||
| 603 | int ret; | 597 | int ret; |
| 604 | 598 | ||
| 605 | if (!drm->channel) | 599 | if (!drm->channel) |
| @@ -609,26 +603,22 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 609 | if (!s) | 603 | if (!s) |
| 610 | return -ENOMEM; | 604 | return -ENOMEM; |
| 611 | 605 | ||
| 612 | /* Choose the channel the flip will be handled in */ | 606 | /* synchronise rendering channel with the kernel's channel */ |
| 613 | spin_lock(&old_bo->bo.bdev->fence_lock); | 607 | spin_lock(&new_bo->bo.bdev->fence_lock); |
| 614 | fence = new_bo->bo.sync_obj; | 608 | fence = nouveau_fence_ref(new_bo->bo.sync_obj); |
| 615 | if (fence) | 609 | spin_unlock(&new_bo->bo.bdev->fence_lock); |
| 616 | chan = fence->channel; | 610 | ret = nouveau_fence_sync(fence, chan); |
| 617 | if (!chan) | 611 | if (ret) |
| 618 | chan = drm->channel; | 612 | return ret; |
| 619 | spin_unlock(&old_bo->bo.bdev->fence_lock); | ||
| 620 | 613 | ||
| 621 | if (new_bo != old_bo) { | 614 | if (new_bo != old_bo) { |
| 622 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 615 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
| 623 | if (ret) | 616 | if (ret) |
| 624 | goto fail_free; | 617 | goto fail_free; |
| 625 | |||
| 626 | list_add(&resv[1].head, &res); | ||
| 627 | } | 618 | } |
| 628 | list_add(&resv[0].head, &res); | ||
| 629 | 619 | ||
| 630 | mutex_lock(&chan->cli->mutex); | 620 | mutex_lock(&chan->cli->mutex); |
| 631 | ret = ttm_eu_reserve_buffers(&ticket, &res); | 621 | ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); |
| 632 | if (ret) | 622 | if (ret) |
| 633 | goto fail_unpin; | 623 | goto fail_unpin; |
| 634 | 624 | ||
| @@ -640,12 +630,29 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 640 | 630 | ||
| 641 | /* Emit a page flip */ | 631 | /* Emit a page flip */ |
| 642 | if (nv_device(drm->device)->card_type >= NV_50) { | 632 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 643 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 633 | ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); |
| 644 | if (ret) | 634 | if (ret) |
| 645 | goto fail_unreserve; | 635 | goto fail_unreserve; |
| 646 | } else { | 636 | } else { |
| 647 | struct nv04_display *dispnv04 = nv04_display(dev); | 637 | struct nv04_display *dispnv04 = nv04_display(dev); |
| 648 | nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); | 638 | int head = nouveau_crtc(crtc)->index; |
| 639 | |||
| 640 | if (swap_interval) { | ||
| 641 | ret = RING_SPACE(chan, 8); | ||
| 642 | if (ret) | ||
| 643 | goto fail_unreserve; | ||
| 644 | |||
| 645 | BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1); | ||
| 646 | OUT_RING (chan, 0); | ||
| 647 | BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1); | ||
| 648 | OUT_RING (chan, head); | ||
| 649 | BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1); | ||
| 650 | OUT_RING (chan, 0); | ||
| 651 | BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1); | ||
| 652 | OUT_RING (chan, 0); | ||
| 653 | } | ||
| 654 | |||
| 655 | nouveau_bo_ref(new_bo, &dispnv04->image[head]); | ||
| 649 | } | 656 | } |
| 650 | 657 | ||
| 651 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 658 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
| @@ -656,14 +663,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 656 | /* Update the crtc struct and cleanup */ | 663 | /* Update the crtc struct and cleanup */ |
| 657 | crtc->fb = fb; | 664 | crtc->fb = fb; |
| 658 | 665 | ||
| 659 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); | 666 | nouveau_bo_fence(old_bo, fence); |
| 667 | ttm_bo_unreserve(&old_bo->bo); | ||
| 660 | if (old_bo != new_bo) | 668 | if (old_bo != new_bo) |
| 661 | nouveau_bo_unpin(old_bo); | 669 | nouveau_bo_unpin(old_bo); |
| 662 | nouveau_fence_unref(&fence); | 670 | nouveau_fence_unref(&fence); |
| 663 | return 0; | 671 | return 0; |
| 664 | 672 | ||
| 665 | fail_unreserve: | 673 | fail_unreserve: |
| 666 | ttm_eu_backoff_reservation(&ticket, &res); | 674 | ttm_bo_unreserve(&old_bo->bo); |
| 667 | fail_unpin: | 675 | fail_unpin: |
| 668 | mutex_unlock(&chan->cli->mutex); | 676 | mutex_unlock(&chan->cli->mutex); |
| 669 | if (old_bo != new_bo) | 677 | if (old_bo != new_bo) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 690d5930ce32..984004d66a6d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h | |||
| @@ -51,9 +51,11 @@ enum { | |||
| 51 | NvSubCtxSurf2D = 0, | 51 | NvSubCtxSurf2D = 0, |
| 52 | NvSubSw = 1, | 52 | NvSubSw = 1, |
| 53 | NvSubImageBlit = 2, | 53 | NvSubImageBlit = 2, |
| 54 | NvSub2D = 3, | ||
| 55 | NvSubGdiRect = 3, | 54 | NvSubGdiRect = 3, |
| 56 | NvSubCopy = 4, | 55 | |
| 56 | NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */ | ||
| 57 | NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */ | ||
| 58 | FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */ | ||
| 57 | }; | 59 | }; |
| 58 | 60 | ||
| 59 | /* Object handles. */ | 61 | /* Object handles. */ |
| @@ -194,7 +196,6 @@ WIND_RING(struct nouveau_channel *chan) | |||
| 194 | #define NV84_SUBCHAN_UEVENT 0x00000020 | 196 | #define NV84_SUBCHAN_UEVENT 0x00000020 |
| 195 | #define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 | 197 | #define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 |
| 196 | #define NV10_SUBCHAN_REF_CNT 0x00000050 | 198 | #define NV10_SUBCHAN_REF_CNT 0x00000050 |
| 197 | #define NVSW_SUBCHAN_PAGE_FLIP 0x00000054 | ||
| 198 | #define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060 | 199 | #define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060 |
| 199 | #define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064 | 200 | #define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064 |
| 200 | #define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068 | 201 | #define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2418b0de589e..7a3759f1c41a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <engine/device.h> | 37 | #include <engine/device.h> |
| 38 | #include <engine/disp.h> | 38 | #include <engine/disp.h> |
| 39 | #include <engine/fifo.h> | 39 | #include <engine/fifo.h> |
| 40 | #include <engine/software.h> | ||
| 40 | 41 | ||
| 41 | #include <subdev/vm.h> | 42 | #include <subdev/vm.h> |
| 42 | 43 | ||
| @@ -191,6 +192,32 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
| 191 | return; | 192 | return; |
| 192 | } | 193 | } |
| 193 | 194 | ||
| 195 | ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW, | ||
| 196 | nouveau_abi16_swclass(drm), NULL, 0, &object); | ||
| 197 | if (ret == 0) { | ||
| 198 | struct nouveau_software_chan *swch = (void *)object->parent; | ||
| 199 | ret = RING_SPACE(drm->channel, 2); | ||
| 200 | if (ret == 0) { | ||
| 201 | if (device->card_type < NV_C0) { | ||
| 202 | BEGIN_NV04(drm->channel, NvSubSw, 0, 1); | ||
| 203 | OUT_RING (drm->channel, NVDRM_NVSW); | ||
| 204 | } else | ||
| 205 | if (device->card_type < NV_E0) { | ||
| 206 | BEGIN_NVC0(drm->channel, FermiSw, 0, 1); | ||
| 207 | OUT_RING (drm->channel, 0x001f0000); | ||
| 208 | } | ||
| 209 | } | ||
| 210 | swch = (void *)object->parent; | ||
| 211 | swch->flip = nouveau_flip_complete; | ||
| 212 | swch->flip_data = drm->channel; | ||
| 213 | } | ||
| 214 | |||
| 215 | if (ret) { | ||
| 216 | NV_ERROR(drm, "failed to allocate software object, %d\n", ret); | ||
| 217 | nouveau_accel_fini(drm); | ||
| 218 | return; | ||
| 219 | } | ||
| 220 | |||
| 194 | if (device->card_type < NV_C0) { | 221 | if (device->card_type < NV_C0) { |
| 195 | ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, | 222 | ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, |
| 196 | &drm->notify); | 223 | &drm->notify); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 71ed2dadae61..4b0fb6c66be9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
| @@ -56,6 +56,7 @@ enum nouveau_drm_handle { | |||
| 56 | NVDRM_CONTROL = 0xdddddddc, | 56 | NVDRM_CONTROL = 0xdddddddc, |
| 57 | NVDRM_PUSH = 0xbbbb0000, /* |= client chid */ | 57 | NVDRM_PUSH = 0xbbbb0000, /* |= client chid */ |
| 58 | NVDRM_CHAN = 0xcccc0000, /* |= client chid */ | 58 | NVDRM_CHAN = 0xcccc0000, /* |= client chid */ |
| 59 | NVDRM_NVSW = 0x55550000, | ||
| 59 | }; | 60 | }; |
| 60 | 61 | ||
| 61 | struct nouveau_cli { | 62 | struct nouveau_cli { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 34b82711e7c8..40cf52e6d6d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -306,7 +306,8 @@ nouveau_fence_unref(struct nouveau_fence **pfence) | |||
| 306 | struct nouveau_fence * | 306 | struct nouveau_fence * |
| 307 | nouveau_fence_ref(struct nouveau_fence *fence) | 307 | nouveau_fence_ref(struct nouveau_fence *fence) |
| 308 | { | 308 | { |
| 309 | kref_get(&fence->kref); | 309 | if (fence) |
| 310 | kref_get(&fence->kref); | ||
| 310 | return fence; | 311 | return fence; |
| 311 | } | 312 | } |
| 312 | 313 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 418a6177a653..78a27f8ad7d9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -106,8 +106,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |||
| 106 | 106 | ||
| 107 | if (mapped) { | 107 | if (mapped) { |
| 108 | spin_lock(&nvbo->bo.bdev->fence_lock); | 108 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 109 | if (nvbo->bo.sync_obj) | 109 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
| 110 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | ||
| 111 | spin_unlock(&nvbo->bo.bdev->fence_lock); | 110 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 112 | } | 111 | } |
| 113 | 112 | ||
| @@ -309,7 +308,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence, | |||
| 309 | list_for_each_safe(entry, tmp, list) { | 308 | list_for_each_safe(entry, tmp, list) { |
| 310 | nvbo = list_entry(entry, struct nouveau_bo, entry); | 309 | nvbo = list_entry(entry, struct nouveau_bo, entry); |
| 311 | 310 | ||
| 312 | nouveau_bo_fence(nvbo, fence); | 311 | if (likely(fence)) |
| 312 | nouveau_bo_fence(nvbo, fence); | ||
| 313 | 313 | ||
| 314 | if (unlikely(nvbo->validate_mapped)) { | 314 | if (unlikely(nvbo->validate_mapped)) { |
| 315 | ttm_bo_kunmap(&nvbo->kmap); | 315 | ttm_bo_kunmap(&nvbo->kmap); |
| @@ -438,8 +438,7 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) | |||
| 438 | int ret = 0; | 438 | int ret = 0; |
| 439 | 439 | ||
| 440 | spin_lock(&nvbo->bo.bdev->fence_lock); | 440 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 441 | if (nvbo->bo.sync_obj) | 441 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
| 442 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | ||
| 443 | spin_unlock(&nvbo->bo.bdev->fence_lock); | 442 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 444 | 443 | ||
| 445 | if (fence) { | 444 | if (fence) { |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 77dcc9c50777..8fe32bbed99a 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -255,6 +255,12 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
| 255 | OUT_RING(chan, NvCtxSurf2D); | 255 | OUT_RING(chan, NvCtxSurf2D); |
| 256 | BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); | 256 | BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); |
| 257 | OUT_RING(chan, 3); | 257 | OUT_RING(chan, 3); |
| 258 | if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) { | ||
| 259 | BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3); | ||
| 260 | OUT_RING(chan, 0); | ||
| 261 | OUT_RING(chan, 1); | ||
| 262 | OUT_RING(chan, 2); | ||
| 263 | } | ||
| 258 | 264 | ||
| 259 | BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); | 265 | BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); |
| 260 | OUT_RING(chan, NvGdiRect); | 266 | OUT_RING(chan, NvGdiRect); |
