diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2012-07-19 18:17:34 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2012-10-02 23:12:56 -0400 |
commit | ebb945a94bba2ce8dff7b0942ff2b3f2a52a0a69 (patch) | |
tree | 07cad59be501458e6ae1304b7c0352e322ac3387 /drivers/gpu/drm/nouveau/nv50_fence.c | |
parent | ac1499d9573f4aadd1d2beac11fe23af8ce90c24 (diff) |
drm/nouveau: port all engines to new engine module format
This is a HUGE commit, but it's not nearly as bad as it looks - any problems
can be isolated to a particular chipset and engine combination. It was
simply too difficult to port each one at a time, the compat layers are
*already* ridiculous.
Most of the changes here are simply to the glue, the process for each of the
engine modules was to start with a standard skeleton and copy+paste the old
code into the appropriate places, fixing up variable names etc as needed.
v2: Marcin Slusarz <marcin.slusarz@gmail.com>
- fix find/replace bug in license header
v3: Ben Skeggs <bskeggs@redhat.com>
- bump indirect pushbuf size to 8KiB, 4KiB barely enough for userspace and
left no space for kernel's requirements during GEM pushbuf submission.
- fix duplicate assignments noticed by clang
v4: Marcin Slusarz <marcin.slusarz@gmail.com>
- add sparse annotations to nv04_fifo_pause/nv04_fifo_start
- use ioread32_native/iowrite32_native for fifo control registers
v5: Ben Skeggs <bskeggs@redhat.com>
- rebase on v3.6-rc4, modified to keep copy engine fix intact
- nv10/fence: unmap fence bo before destroying
- fixed fermi regression when using nvidia gr fuc
- fixed typo in supported dma_mask checking
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv50_fence.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fence.c | 75 |
1 files changed, 37 insertions, 38 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 10aa04f26b8..e717aaaf62c 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
@@ -22,12 +22,12 @@ | |||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include "drmP.h" | 25 | #include <core/object.h> |
26 | #include "nouveau_drv.h" | 26 | #include <core/class.h> |
27 | |||
28 | #include "nouveau_drm.h" | ||
27 | #include "nouveau_dma.h" | 29 | #include "nouveau_dma.h" |
28 | #include <core/ramht.h> | ||
29 | #include "nouveau_fence.h" | 30 | #include "nouveau_fence.h" |
30 | #include "nv50_display.h" | ||
31 | 31 | ||
32 | struct nv50_fence_chan { | 32 | struct nv50_fence_chan { |
33 | struct nouveau_fence_chan base; | 33 | struct nouveau_fence_chan base; |
@@ -43,12 +43,11 @@ struct nv50_fence_priv { | |||
43 | static int | 43 | static int |
44 | nv50_fence_context_new(struct nouveau_channel *chan) | 44 | nv50_fence_context_new(struct nouveau_channel *chan) |
45 | { | 45 | { |
46 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 46 | struct nv50_fence_priv *priv = chan->drm->fence; |
47 | struct nv50_fence_priv *priv = dev_priv->fence.func; | ||
48 | struct nv50_fence_chan *fctx; | 47 | struct nv50_fence_chan *fctx; |
49 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 48 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
50 | struct nouveau_gpuobj *obj; | 49 | struct nouveau_object *object; |
51 | int ret = 0, i; | 50 | int ret, i; |
52 | 51 | ||
53 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 52 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
54 | if (!fctx) | 53 | if (!fctx) |
@@ -56,30 +55,29 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
56 | 55 | ||
57 | nouveau_fence_context_new(&fctx->base); | 56 | nouveau_fence_context_new(&fctx->base); |
58 | 57 | ||
59 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, | 58 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
60 | mem->start * PAGE_SIZE, mem->size, | 59 | NvSema, 0x0002, |
61 | NV_MEM_ACCESS_RW, | 60 | &(struct nv_dma_class) { |
62 | NV_MEM_TARGET_VRAM, &obj); | 61 | .flags = NV_DMA_TARGET_VRAM | |
63 | if (!ret) { | 62 | NV_DMA_ACCESS_RDWR, |
64 | ret = nouveau_ramht_insert(chan, NvSema, obj); | 63 | .start = mem->start * PAGE_SIZE, |
65 | nouveau_gpuobj_ref(NULL, &obj); | 64 | .limit = mem->size - 1, |
66 | } | 65 | }, sizeof(struct nv_dma_class), |
66 | &object); | ||
67 | 67 | ||
68 | /* dma objects for display sync channel semaphore blocks */ | 68 | /* dma objects for display sync channel semaphore blocks */ |
69 | for (i = 0; i < chan->dev->mode_config.num_crtc; i++) { | 69 | for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { |
70 | struct nv50_display *pdisp = nv50_display(chan->dev); | 70 | struct nouveau_bo *bo = nv50sema(chan->drm->dev, i); |
71 | struct nv50_display_crtc *dispc = &pdisp->crtc[i]; | 71 | |
72 | struct nouveau_gpuobj *obj = NULL; | 72 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
73 | 73 | NvEvoSema0 + i, 0x003d, | |
74 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 74 | &(struct nv_dma_class) { |
75 | dispc->sem.bo->bo.offset, 0x1000, | 75 | .flags = NV_DMA_TARGET_VRAM | |
76 | NV_MEM_ACCESS_RW, | 76 | NV_DMA_ACCESS_RDWR, |
77 | NV_MEM_TARGET_VRAM, &obj); | 77 | .start = bo->bo.offset, |
78 | if (ret) | 78 | .limit = bo->bo.offset + 0xfff, |
79 | break; | 79 | }, sizeof(struct nv_dma_class), |
80 | 80 | &object); | |
81 | ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj); | ||
82 | nouveau_gpuobj_ref(NULL, &obj); | ||
83 | } | 81 | } |
84 | 82 | ||
85 | if (ret) | 83 | if (ret) |
@@ -88,13 +86,12 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
88 | } | 86 | } |
89 | 87 | ||
90 | int | 88 | int |
91 | nv50_fence_create(struct drm_device *dev) | 89 | nv50_fence_create(struct nouveau_drm *drm) |
92 | { | 90 | { |
93 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
94 | struct nv50_fence_priv *priv; | 91 | struct nv50_fence_priv *priv; |
95 | int ret = 0; | 92 | int ret = 0; |
96 | 93 | ||
97 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 94 | priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); |
98 | if (!priv) | 95 | if (!priv) |
99 | return -ENOMEM; | 96 | return -ENOMEM; |
100 | 97 | ||
@@ -104,10 +101,9 @@ nv50_fence_create(struct drm_device *dev) | |||
104 | priv->base.emit = nv10_fence_emit; | 101 | priv->base.emit = nv10_fence_emit; |
105 | priv->base.read = nv10_fence_read; | 102 | priv->base.read = nv10_fence_read; |
106 | priv->base.sync = nv17_fence_sync; | 103 | priv->base.sync = nv17_fence_sync; |
107 | dev_priv->fence.func = &priv->base; | ||
108 | spin_lock_init(&priv->lock); | 104 | spin_lock_init(&priv->lock); |
109 | 105 | ||
110 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 106 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
111 | 0, 0x0000, NULL, &priv->bo); | 107 | 0, 0x0000, NULL, &priv->bo); |
112 | if (!ret) { | 108 | if (!ret) { |
113 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); | 109 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); |
@@ -117,9 +113,12 @@ nv50_fence_create(struct drm_device *dev) | |||
117 | nouveau_bo_ref(NULL, &priv->bo); | 113 | nouveau_bo_ref(NULL, &priv->bo); |
118 | } | 114 | } |
119 | 115 | ||
120 | if (ret == 0) | 116 | if (ret == 0) { |
121 | nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); | 117 | nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); |
122 | else | 118 | priv->base.sync = nv17_fence_sync; |
123 | nv10_fence_destroy(dev); | 119 | } |
120 | |||
121 | if (ret) | ||
122 | nv10_fence_destroy(drm); | ||
124 | return ret; | 123 | return ret; |
125 | } | 124 | } |