diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2012-03-15 22:40:17 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2012-03-22 03:17:26 -0400 |
commit | accf94969f226ddfe7dd3a6a76ce093ace839b26 (patch) | |
tree | d376e4f4240ed414388d19aaa1fd1df3d1a2e54e /drivers/gpu | |
parent | 2f5394c3ed573de2ab18cdac503b8045cd16ac5e (diff) |
drm/nouveau/ttm: always do buffer moves on kernel channel
There was once good reasons for wanting the drm to be able to use M2MF etc
on user channels, but they're not relevant anymore. For the general
buffer move case, we've already lost by transferring between vram/sysmem
already so the context switching overhead is minimal in comparison.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 10 |
3 files changed, 4 insertions, 19 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ec54364ac828..7d15a774f9c9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -693,16 +693,12 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
693 | struct ttm_mem_reg *new_mem) | 693 | struct ttm_mem_reg *new_mem) |
694 | { | 694 | { |
695 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 695 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
696 | struct nouveau_channel *chan = chan = dev_priv->channel; | ||
696 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 697 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
697 | struct ttm_mem_reg *old_mem = &bo->mem; | 698 | struct ttm_mem_reg *old_mem = &bo->mem; |
698 | struct nouveau_channel *chan; | ||
699 | int ret; | 699 | int ret; |
700 | 700 | ||
701 | chan = nvbo->channel; | 701 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); |
702 | if (!chan) { | ||
703 | chan = dev_priv->channel; | ||
704 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); | ||
705 | } | ||
706 | 702 | ||
707 | /* create temporary vmas for the transfer and attach them to the | 703 | /* create temporary vmas for the transfer and attach them to the |
708 | * old nouveau_mem node, these will get cleaned up after ttm has | 704 | * old nouveau_mem node, these will get cleaned up after ttm has |
@@ -734,8 +730,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
734 | } | 730 | } |
735 | 731 | ||
736 | out: | 732 | out: |
737 | if (chan == dev_priv->channel) | 733 | mutex_unlock(&chan->mutex); |
738 | mutex_unlock(&chan->mutex); | ||
739 | return ret; | 734 | return ret; |
740 | } | 735 | } |
741 | 736 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a184ba331273..0df21752d274 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -113,8 +113,6 @@ struct nouveau_bo { | |||
113 | int pbbo_index; | 113 | int pbbo_index; |
114 | bool validate_mapped; | 114 | bool validate_mapped; |
115 | 115 | ||
116 | struct nouveau_channel *channel; | ||
117 | |||
118 | struct list_head vma_list; | 116 | struct list_head vma_list; |
119 | unsigned page_shift; | 117 | unsigned page_shift; |
120 | 118 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 7ce3fde40743..ed52a6f41613 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -426,9 +426,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
426 | return ret; | 426 | return ret; |
427 | } | 427 | } |
428 | 428 | ||
429 | nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; | ||
430 | ret = nouveau_bo_validate(nvbo, true, false, false); | 429 | ret = nouveau_bo_validate(nvbo, true, false, false); |
431 | nvbo->channel = NULL; | ||
432 | if (unlikely(ret)) { | 430 | if (unlikely(ret)) { |
433 | if (ret != -ERESTARTSYS) | 431 | if (ret != -ERESTARTSYS) |
434 | NV_ERROR(dev, "fail ttm_validate\n"); | 432 | NV_ERROR(dev, "fail ttm_validate\n"); |
@@ -678,19 +676,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
678 | return PTR_ERR(bo); | 676 | return PTR_ERR(bo); |
679 | } | 677 | } |
680 | 678 | ||
681 | /* Mark push buffers as being used on PFIFO, the validation code | 679 | /* Ensure all push buffers are on validate list */ |
682 | * will then make sure that if the pushbuf bo moves, that they | ||
683 | * happen on the kernel channel, which will in turn cause a sync | ||
684 | * to happen before we try and submit the push buffer. | ||
685 | */ | ||
686 | for (i = 0; i < req->nr_push; i++) { | 680 | for (i = 0; i < req->nr_push; i++) { |
687 | if (push[i].bo_index >= req->nr_buffers) { | 681 | if (push[i].bo_index >= req->nr_buffers) { |
688 | NV_ERROR(dev, "push %d buffer not in list\n", i); | 682 | NV_ERROR(dev, "push %d buffer not in list\n", i); |
689 | ret = -EINVAL; | 683 | ret = -EINVAL; |
690 | goto out_prevalid; | 684 | goto out_prevalid; |
691 | } | 685 | } |
692 | |||
693 | bo[push[i].bo_index].read_domains |= (1 << 31); | ||
694 | } | 686 | } |
695 | 687 | ||
696 | /* Validate buffer list */ | 688 | /* Validate buffer list */ |