diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-09-01 01:24:31 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-09-24 02:20:14 -0400 |
commit | a8eaebc6c52bb0cd243b4cb421068f42d378be9c (patch) | |
tree | 12f796e5210d51f78b9fc6ddd4750cf1421373c2 /drivers/gpu/drm/nouveau | |
parent | de3a6c0a3b642c0c350414d63298a1b19a009290 (diff) |
drm/nouveau: remove nouveau_gpuobj_ref completely, replace with sanity
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau')
21 files changed, 459 insertions, 583 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 3144ddea593e..e01396747f6f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -70,14 +70,8 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |||
70 | chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; | 70 | chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; |
71 | } | 71 | } |
72 | 72 | ||
73 | ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); | 73 | nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); |
74 | if (ret) { | 74 | nouveau_gpuobj_ref(NULL, &pushbuf); |
75 | NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret); | ||
76 | if (pushbuf != dev_priv->gart_info.sg_ctxdma) | ||
77 | nouveau_gpuobj_del(dev, &pushbuf); | ||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | return 0; | 75 | return 0; |
82 | } | 76 | } |
83 | 77 | ||
@@ -308,7 +302,7 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
308 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 302 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
309 | 303 | ||
310 | /* Release the channel's resources */ | 304 | /* Release the channel's resources */ |
311 | nouveau_gpuobj_ref_del(dev, &chan->pushbuf); | 305 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
312 | if (chan->pushbuf_bo) { | 306 | if (chan->pushbuf_bo) { |
313 | nouveau_bo_unmap(chan->pushbuf_bo); | 307 | nouveau_bo_unmap(chan->pushbuf_bo); |
314 | nouveau_bo_unpin(chan->pushbuf_bo); | 308 | nouveau_bo_unpin(chan->pushbuf_bo); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 2d006993378a..9d27acda87e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_dma.h" | 30 | #include "nouveau_dma.h" |
31 | #include "nouveau_ramht.h" | ||
31 | 32 | ||
32 | void | 33 | void |
33 | nouveau_dma_pre_init(struct nouveau_channel *chan) | 34 | nouveau_dma_pre_init(struct nouveau_channel *chan) |
@@ -58,26 +59,27 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
58 | { | 59 | { |
59 | struct drm_device *dev = chan->dev; | 60 | struct drm_device *dev = chan->dev; |
60 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 61 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
61 | struct nouveau_gpuobj *m2mf = NULL; | 62 | struct nouveau_gpuobj *obj = NULL; |
62 | struct nouveau_gpuobj *nvsw = NULL; | ||
63 | int ret, i; | 63 | int ret, i; |
64 | 64 | ||
65 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ | 65 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ |
66 | ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? | 66 | ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? |
67 | 0x0039 : 0x5039, &m2mf); | 67 | 0x0039 : 0x5039, &obj); |
68 | if (ret) | 68 | if (ret) |
69 | return ret; | 69 | return ret; |
70 | 70 | ||
71 | ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL); | 71 | ret = nouveau_ramht_insert(chan, NvM2MF, obj); |
72 | nouveau_gpuobj_ref(NULL, &obj); | ||
72 | if (ret) | 73 | if (ret) |
73 | return ret; | 74 | return ret; |
74 | 75 | ||
75 | /* Create an NV_SW object for various sync purposes */ | 76 | /* Create an NV_SW object for various sync purposes */ |
76 | ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw); | 77 | ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); |
77 | if (ret) | 78 | if (ret) |
78 | return ret; | 79 | return ret; |
79 | 80 | ||
80 | ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL); | 81 | ret = nouveau_ramht_insert(chan, NvSw, obj); |
82 | nouveau_gpuobj_ref(NULL, &obj); | ||
81 | if (ret) | 83 | if (ret) |
82 | return ret; | 84 | return ret; |
83 | 85 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b8511c122f5f..bf89d0297e21 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -133,7 +133,6 @@ enum nouveau_flags { | |||
133 | #define NVOBJ_ENGINE_DISPLAY 2 | 133 | #define NVOBJ_ENGINE_DISPLAY 2 |
134 | #define NVOBJ_ENGINE_INT 0xdeadbeef | 134 | #define NVOBJ_ENGINE_INT 0xdeadbeef |
135 | 135 | ||
136 | #define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) | ||
137 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) | 136 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) |
138 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) | 137 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) |
139 | #define NVOBJ_FLAG_FAKE (1 << 3) | 138 | #define NVOBJ_FLAG_FAKE (1 << 3) |
@@ -141,7 +140,6 @@ struct nouveau_gpuobj { | |||
141 | struct drm_device *dev; | 140 | struct drm_device *dev; |
142 | struct list_head list; | 141 | struct list_head list; |
143 | 142 | ||
144 | struct nouveau_channel *im_channel; | ||
145 | struct drm_mm_node *im_pramin; | 143 | struct drm_mm_node *im_pramin; |
146 | struct nouveau_bo *im_backing; | 144 | struct nouveau_bo *im_backing; |
147 | uint32_t im_backing_start; | 145 | uint32_t im_backing_start; |
@@ -162,16 +160,6 @@ struct nouveau_gpuobj { | |||
162 | void *priv; | 160 | void *priv; |
163 | }; | 161 | }; |
164 | 162 | ||
165 | struct nouveau_gpuobj_ref { | ||
166 | struct list_head list; | ||
167 | |||
168 | struct nouveau_gpuobj *gpuobj; | ||
169 | uint32_t instance; | ||
170 | |||
171 | struct nouveau_channel *channel; | ||
172 | int handle; | ||
173 | }; | ||
174 | |||
175 | struct nouveau_channel { | 163 | struct nouveau_channel { |
176 | struct drm_device *dev; | 164 | struct drm_device *dev; |
177 | int id; | 165 | int id; |
@@ -197,33 +185,32 @@ struct nouveau_channel { | |||
197 | } fence; | 185 | } fence; |
198 | 186 | ||
199 | /* DMA push buffer */ | 187 | /* DMA push buffer */ |
200 | struct nouveau_gpuobj_ref *pushbuf; | 188 | struct nouveau_gpuobj *pushbuf; |
201 | struct nouveau_bo *pushbuf_bo; | 189 | struct nouveau_bo *pushbuf_bo; |
202 | uint32_t pushbuf_base; | 190 | uint32_t pushbuf_base; |
203 | 191 | ||
204 | /* Notifier memory */ | 192 | /* Notifier memory */ |
205 | struct nouveau_bo *notifier_bo; | 193 | struct nouveau_bo *notifier_bo; |
206 | struct drm_mm notifier_heap; | 194 | struct drm_mm notifier_heap; |
207 | 195 | ||
208 | /* PFIFO context */ | 196 | /* PFIFO context */ |
209 | struct nouveau_gpuobj_ref *ramfc; | 197 | struct nouveau_gpuobj *ramfc; |
210 | struct nouveau_gpuobj_ref *cache; | 198 | struct nouveau_gpuobj *cache; |
211 | 199 | ||
212 | /* PGRAPH context */ | 200 | /* PGRAPH context */ |
213 | /* XXX may be merge 2 pointers as private data ??? */ | 201 | /* XXX may be merge 2 pointers as private data ??? */ |
214 | struct nouveau_gpuobj_ref *ramin_grctx; | 202 | struct nouveau_gpuobj *ramin_grctx; |
215 | void *pgraph_ctx; | 203 | void *pgraph_ctx; |
216 | 204 | ||
217 | /* NV50 VM */ | 205 | /* NV50 VM */ |
218 | struct nouveau_gpuobj *vm_pd; | 206 | struct nouveau_gpuobj *vm_pd; |
219 | struct nouveau_gpuobj_ref *vm_gart_pt; | 207 | struct nouveau_gpuobj *vm_gart_pt; |
220 | struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR]; | 208 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; |
221 | 209 | ||
222 | /* Objects */ | 210 | /* Objects */ |
223 | struct nouveau_gpuobj_ref *ramin; /* Private instmem */ | 211 | struct nouveau_gpuobj *ramin; /* Private instmem */ |
224 | struct drm_mm ramin_heap; /* Private PRAMIN heap */ | 212 | struct drm_mm ramin_heap; /* Private PRAMIN heap */ |
225 | struct nouveau_gpuobj_ref *ramht; /* Hash table */ | 213 | struct nouveau_ramht *ramht; /* Hash table */ |
226 | struct list_head ramht_refs; /* Objects referenced by RAMHT */ | ||
227 | 214 | ||
228 | /* GPU object info for stuff used in-kernel (mm_enabled) */ | 215 | /* GPU object info for stuff used in-kernel (mm_enabled) */ |
229 | uint32_t m2mf_ntfy; | 216 | uint32_t m2mf_ntfy; |
@@ -301,7 +288,7 @@ struct nouveau_fb_engine { | |||
301 | struct nouveau_fifo_engine { | 288 | struct nouveau_fifo_engine { |
302 | int channels; | 289 | int channels; |
303 | 290 | ||
304 | struct nouveau_gpuobj_ref *playlist[2]; | 291 | struct nouveau_gpuobj *playlist[2]; |
305 | int cur_playlist; | 292 | int cur_playlist; |
306 | 293 | ||
307 | int (*init)(struct drm_device *); | 294 | int (*init)(struct drm_device *); |
@@ -339,7 +326,7 @@ struct nouveau_pgraph_engine { | |||
339 | int grctx_size; | 326 | int grctx_size; |
340 | 327 | ||
341 | /* NV2x/NV3x context table (0x400780) */ | 328 | /* NV2x/NV3x context table (0x400780) */ |
342 | struct nouveau_gpuobj_ref *ctx_table; | 329 | struct nouveau_gpuobj *ctx_table; |
343 | 330 | ||
344 | int (*init)(struct drm_device *); | 331 | int (*init)(struct drm_device *); |
345 | void (*takedown)(struct drm_device *); | 332 | void (*takedown)(struct drm_device *); |
@@ -555,7 +542,7 @@ struct drm_nouveau_private { | |||
555 | spinlock_t context_switch_lock; | 542 | spinlock_t context_switch_lock; |
556 | 543 | ||
557 | /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ | 544 | /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ |
558 | struct nouveau_gpuobj *ramht; | 545 | struct nouveau_ramht *ramht; |
559 | uint32_t ramin_rsvd_vram; | 546 | uint32_t ramin_rsvd_vram; |
560 | uint32_t ramht_offset; | 547 | uint32_t ramht_offset; |
561 | uint32_t ramht_size; | 548 | uint32_t ramht_size; |
@@ -764,24 +751,12 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); | |||
764 | extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, | 751 | extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, |
765 | uint32_t size, int align, uint32_t flags, | 752 | uint32_t size, int align, uint32_t flags, |
766 | struct nouveau_gpuobj **); | 753 | struct nouveau_gpuobj **); |
767 | extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); | 754 | extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *, |
768 | extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, | 755 | struct nouveau_gpuobj **); |
769 | uint32_t handle, struct nouveau_gpuobj *, | ||
770 | struct nouveau_gpuobj_ref **); | ||
771 | extern int nouveau_gpuobj_ref_del(struct drm_device *, | ||
772 | struct nouveau_gpuobj_ref **); | ||
773 | extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, | ||
774 | struct nouveau_gpuobj_ref **ref_ret); | ||
775 | extern int nouveau_gpuobj_new_ref(struct drm_device *, | ||
776 | struct nouveau_channel *alloc_chan, | ||
777 | struct nouveau_channel *ref_chan, | ||
778 | uint32_t handle, uint32_t size, int align, | ||
779 | uint32_t flags, struct nouveau_gpuobj_ref **); | ||
780 | extern int nouveau_gpuobj_new_fake(struct drm_device *, | 756 | extern int nouveau_gpuobj_new_fake(struct drm_device *, |
781 | uint32_t p_offset, uint32_t b_offset, | 757 | uint32_t p_offset, uint32_t b_offset, |
782 | uint32_t size, uint32_t flags, | 758 | uint32_t size, uint32_t flags, |
783 | struct nouveau_gpuobj **, | 759 | struct nouveau_gpuobj **); |
784 | struct nouveau_gpuobj_ref**); | ||
785 | extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, | 760 | extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, |
786 | uint64_t offset, uint64_t size, int access, | 761 | uint64_t offset, uint64_t size, int access, |
787 | int target, struct nouveau_gpuobj **); | 762 | int target, struct nouveau_gpuobj **); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 794b0ee30cf6..9cc3259a54b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "nouveau_drm.h" | 35 | #include "nouveau_drm.h" |
36 | #include "nouveau_drv.h" | 36 | #include "nouveau_drv.h" |
37 | #include "nouveau_reg.h" | 37 | #include "nouveau_reg.h" |
38 | #include "nouveau_ramht.h" | ||
38 | #include <linux/ratelimit.h> | 39 | #include <linux/ratelimit.h> |
39 | 40 | ||
40 | /* needed for hotplug irq */ | 41 | /* needed for hotplug irq */ |
@@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) | |||
106 | const int mthd = addr & 0x1ffc; | 107 | const int mthd = addr & 0x1ffc; |
107 | 108 | ||
108 | if (mthd == 0x0000) { | 109 | if (mthd == 0x0000) { |
109 | struct nouveau_gpuobj_ref *ref = NULL; | 110 | struct nouveau_gpuobj *gpuobj; |
110 | 111 | ||
111 | if (nouveau_gpuobj_ref_find(chan, data, &ref)) | 112 | gpuobj = nouveau_ramht_find(chan, data); |
113 | if (!gpuobj) | ||
112 | return false; | 114 | return false; |
113 | 115 | ||
114 | if (ref->gpuobj->engine != NVOBJ_ENGINE_SW) | 116 | if (gpuobj->engine != NVOBJ_ENGINE_SW) |
115 | return false; | 117 | return false; |
116 | 118 | ||
117 | chan->sw_subchannel[subc] = ref->gpuobj->class; | 119 | chan->sw_subchannel[subc] = gpuobj->class; |
118 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, | 120 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, |
119 | NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); | 121 | NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); |
120 | return true; | 122 | return true; |
@@ -357,7 +359,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) | |||
357 | if (!chan || !chan->ramin_grctx) | 359 | if (!chan || !chan->ramin_grctx) |
358 | continue; | 360 | continue; |
359 | 361 | ||
360 | if (inst == chan->ramin_grctx->instance) | 362 | if (inst == chan->ramin_grctx->pinst) |
361 | break; | 363 | break; |
362 | } | 364 | } |
363 | } else { | 365 | } else { |
@@ -369,7 +371,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) | |||
369 | if (!chan || !chan->ramin) | 371 | if (!chan || !chan->ramin) |
370 | continue; | 372 | continue; |
371 | 373 | ||
372 | if (inst == chan->ramin->instance) | 374 | if (inst == chan->ramin->vinst) |
373 | break; | 375 | break; |
374 | } | 376 | } |
375 | } | 377 | } |
@@ -625,7 +627,7 @@ nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) | |||
625 | if (!chan || !chan->ramin) | 627 | if (!chan || !chan->ramin) |
626 | continue; | 628 | continue; |
627 | 629 | ||
628 | if (trap[1] == chan->ramin->instance >> 12) | 630 | if (trap[1] == chan->ramin->vinst >> 12) |
629 | break; | 631 | break; |
630 | } | 632 | } |
631 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", | 633 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181ff50ce..22b86189b7bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "drm.h" | 29 | #include "drm.h" |
30 | #include "nouveau_drv.h" | 30 | #include "nouveau_drv.h" |
31 | #include "nouveau_ramht.h" | ||
31 | 32 | ||
32 | int | 33 | int |
33 | nouveau_notifier_init_channel(struct nouveau_channel *chan) | 34 | nouveau_notifier_init_channel(struct nouveau_channel *chan) |
@@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
146 | nobj->dtor = nouveau_notifier_gpuobj_dtor; | 147 | nobj->dtor = nouveau_notifier_gpuobj_dtor; |
147 | nobj->priv = mem; | 148 | nobj->priv = mem; |
148 | 149 | ||
149 | ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); | 150 | ret = nouveau_ramht_insert(chan, handle, nobj); |
151 | nouveau_gpuobj_ref(NULL, &nobj); | ||
150 | if (ret) { | 152 | if (ret) { |
151 | nouveau_gpuobj_del(dev, &nobj); | ||
152 | drm_mm_put_block(mem); | 153 | drm_mm_put_block(mem); |
153 | NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); | 154 | NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); |
154 | return ret; | 155 | return ret; |
155 | } | 156 | } |
156 | 157 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 552f5131650f..d55c50f1a2d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -90,7 +90,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
90 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 90 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
91 | gpuobj->dev = dev; | 91 | gpuobj->dev = dev; |
92 | gpuobj->flags = flags; | 92 | gpuobj->flags = flags; |
93 | gpuobj->im_channel = chan; | 93 | gpuobj->refcount = 1; |
94 | 94 | ||
95 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 95 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
96 | 96 | ||
@@ -108,7 +108,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
108 | 108 | ||
109 | ret = engine->instmem.populate(dev, gpuobj, &size); | 109 | ret = engine->instmem.populate(dev, gpuobj, &size); |
110 | if (ret) { | 110 | if (ret) { |
111 | nouveau_gpuobj_del(dev, &gpuobj); | 111 | nouveau_gpuobj_ref(NULL, &gpuobj); |
112 | return ret; | 112 | return ret; |
113 | } | 113 | } |
114 | } | 114 | } |
@@ -119,14 +119,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
119 | gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); | 119 | gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); |
120 | 120 | ||
121 | if (!gpuobj->im_pramin) { | 121 | if (!gpuobj->im_pramin) { |
122 | nouveau_gpuobj_del(dev, &gpuobj); | 122 | nouveau_gpuobj_ref(NULL, &gpuobj); |
123 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | } | 124 | } |
125 | 125 | ||
126 | if (!chan) { | 126 | if (!chan) { |
127 | ret = engine->instmem.bind(dev, gpuobj); | 127 | ret = engine->instmem.bind(dev, gpuobj); |
128 | if (ret) { | 128 | if (ret) { |
129 | nouveau_gpuobj_del(dev, &gpuobj); | 129 | nouveau_gpuobj_ref(NULL, &gpuobj); |
130 | return ret; | 130 | return ret; |
131 | } | 131 | } |
132 | } | 132 | } |
@@ -134,13 +134,13 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
134 | /* calculate the various different addresses for the object */ | 134 | /* calculate the various different addresses for the object */ |
135 | if (chan) { | 135 | if (chan) { |
136 | gpuobj->pinst = gpuobj->im_pramin->start + | 136 | gpuobj->pinst = gpuobj->im_pramin->start + |
137 | chan->ramin->gpuobj->im_pramin->start; | 137 | chan->ramin->im_pramin->start; |
138 | if (dev_priv->card_type < NV_50) { | 138 | if (dev_priv->card_type < NV_50) { |
139 | gpuobj->cinst = gpuobj->pinst; | 139 | gpuobj->cinst = gpuobj->pinst; |
140 | } else { | 140 | } else { |
141 | gpuobj->cinst = gpuobj->im_pramin->start; | 141 | gpuobj->cinst = gpuobj->im_pramin->start; |
142 | gpuobj->vinst = gpuobj->im_pramin->start + | 142 | gpuobj->vinst = gpuobj->im_pramin->start + |
143 | chan->ramin->gpuobj->im_backing_start; | 143 | chan->ramin->im_backing_start; |
144 | } | 144 | } |
145 | } else { | 145 | } else { |
146 | gpuobj->pinst = gpuobj->im_pramin->start; | 146 | gpuobj->pinst = gpuobj->im_pramin->start; |
@@ -156,6 +156,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
156 | engine->instmem.flush(dev); | 156 | engine->instmem.flush(dev); |
157 | } | 157 | } |
158 | 158 | ||
159 | |||
159 | *gpuobj_ret = gpuobj; | 160 | *gpuobj_ret = gpuobj; |
160 | return 0; | 161 | return 0; |
161 | } | 162 | } |
@@ -176,20 +177,23 @@ int | |||
176 | nouveau_gpuobj_init(struct drm_device *dev) | 177 | nouveau_gpuobj_init(struct drm_device *dev) |
177 | { | 178 | { |
178 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 179 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
180 | struct nouveau_gpuobj *ramht = NULL; | ||
179 | int ret; | 181 | int ret; |
180 | 182 | ||
181 | NV_DEBUG(dev, "\n"); | 183 | NV_DEBUG(dev, "\n"); |
182 | 184 | ||
183 | if (dev_priv->card_type < NV_50) { | 185 | if (dev_priv->card_type >= NV_50) |
184 | ret = nouveau_gpuobj_new_fake(dev, | 186 | return 0; |
185 | dev_priv->ramht_offset, ~0, dev_priv->ramht_size, | ||
186 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, | ||
187 | &dev_priv->ramht, NULL); | ||
188 | if (ret) | ||
189 | return ret; | ||
190 | } | ||
191 | 187 | ||
192 | return 0; | 188 | ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0, |
189 | dev_priv->ramht_size, | ||
190 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | ||
191 | if (ret) | ||
192 | return ret; | ||
193 | |||
194 | ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); | ||
195 | nouveau_gpuobj_ref(NULL, &ramht); | ||
196 | return ret; | ||
193 | } | 197 | } |
194 | 198 | ||
195 | void | 199 | void |
@@ -199,7 +203,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev) | |||
199 | 203 | ||
200 | NV_DEBUG(dev, "\n"); | 204 | NV_DEBUG(dev, "\n"); |
201 | 205 | ||
202 | nouveau_gpuobj_del(dev, &dev_priv->ramht); | 206 | nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); |
203 | } | 207 | } |
204 | 208 | ||
205 | void | 209 | void |
@@ -216,29 +220,21 @@ nouveau_gpuobj_late_takedown(struct drm_device *dev) | |||
216 | 220 | ||
217 | NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", | 221 | NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", |
218 | gpuobj, gpuobj->refcount); | 222 | gpuobj, gpuobj->refcount); |
219 | gpuobj->refcount = 0; | 223 | |
220 | nouveau_gpuobj_del(dev, &gpuobj); | 224 | gpuobj->refcount = 1; |
225 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
221 | } | 226 | } |
222 | } | 227 | } |
223 | 228 | ||
224 | int | 229 | static int |
225 | nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | 230 | nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj) |
226 | { | 231 | { |
232 | struct drm_device *dev = gpuobj->dev; | ||
227 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 233 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
228 | struct nouveau_engine *engine = &dev_priv->engine; | 234 | struct nouveau_engine *engine = &dev_priv->engine; |
229 | struct nouveau_gpuobj *gpuobj; | ||
230 | int i; | 235 | int i; |
231 | 236 | ||
232 | NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); | 237 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
233 | |||
234 | if (!dev_priv || !pgpuobj || !(*pgpuobj)) | ||
235 | return -EINVAL; | ||
236 | gpuobj = *pgpuobj; | ||
237 | |||
238 | if (gpuobj->refcount != 0) { | ||
239 | NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount); | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | 238 | ||
243 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | 239 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
244 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | 240 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) |
@@ -261,181 +257,26 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | |||
261 | 257 | ||
262 | list_del(&gpuobj->list); | 258 | list_del(&gpuobj->list); |
263 | 259 | ||
264 | *pgpuobj = NULL; | ||
265 | kfree(gpuobj); | 260 | kfree(gpuobj); |
266 | return 0; | 261 | return 0; |
267 | } | 262 | } |
268 | 263 | ||
269 | static int | 264 | void |
270 | nouveau_gpuobj_instance_get(struct drm_device *dev, | 265 | nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) |
271 | struct nouveau_channel *chan, | ||
272 | struct nouveau_gpuobj *gpuobj, uint32_t *inst) | ||
273 | { | ||
274 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
275 | struct nouveau_gpuobj *cpramin; | ||
276 | |||
277 | /* <NV50 use PRAMIN address everywhere */ | ||
278 | if (dev_priv->card_type < NV_50) { | ||
279 | *inst = gpuobj->im_pramin->start; | ||
280 | if (gpuobj->im_channel) { | ||
281 | cpramin = gpuobj->im_channel->ramin->gpuobj; | ||
282 | *inst += cpramin->im_pramin->start; | ||
283 | } | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /* NV50 channel-local instance */ | ||
288 | if (chan) { | ||
289 | *inst = gpuobj->im_pramin->start; | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | /* NV50 global (VRAM) instance */ | ||
294 | if (!gpuobj->im_channel) { | ||
295 | /* ...from global heap */ | ||
296 | if (!gpuobj->im_backing) { | ||
297 | NV_ERROR(dev, "AII, no VRAM backing gpuobj\n"); | ||
298 | return -EINVAL; | ||
299 | } | ||
300 | *inst = gpuobj->im_backing_start; | ||
301 | return 0; | ||
302 | } else { | ||
303 | /* ...from local heap */ | ||
304 | cpramin = gpuobj->im_channel->ramin->gpuobj; | ||
305 | *inst = cpramin->im_backing_start + gpuobj->im_pramin->start; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | return -EINVAL; | ||
310 | } | ||
311 | |||
312 | int | ||
313 | nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, | ||
314 | uint32_t handle, struct nouveau_gpuobj *gpuobj, | ||
315 | struct nouveau_gpuobj_ref **ref_ret) | ||
316 | { | ||
317 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
318 | struct nouveau_gpuobj_ref *ref; | ||
319 | uint32_t instance; | ||
320 | int ret; | ||
321 | |||
322 | NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n", | ||
323 | chan ? chan->id : -1, handle, gpuobj); | ||
324 | |||
325 | if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) | ||
326 | return -EINVAL; | ||
327 | |||
328 | if (!chan && !ref_ret) | ||
329 | return -EINVAL; | ||
330 | |||
331 | if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) { | ||
332 | /* sw object */ | ||
333 | instance = 0x40; | ||
334 | } else { | ||
335 | ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); | ||
336 | if (ret) | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
341 | if (!ref) | ||
342 | return -ENOMEM; | ||
343 | INIT_LIST_HEAD(&ref->list); | ||
344 | ref->gpuobj = gpuobj; | ||
345 | ref->channel = chan; | ||
346 | ref->instance = instance; | ||
347 | |||
348 | if (!ref_ret) { | ||
349 | ref->handle = handle; | ||
350 | |||
351 | ret = nouveau_ramht_insert(dev, ref); | ||
352 | if (ret) { | ||
353 | kfree(ref); | ||
354 | return ret; | ||
355 | } | ||
356 | } else { | ||
357 | ref->handle = ~0; | ||
358 | *ref_ret = ref; | ||
359 | } | ||
360 | |||
361 | ref->gpuobj->refcount++; | ||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) | ||
366 | { | ||
367 | struct nouveau_gpuobj_ref *ref; | ||
368 | |||
369 | NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL); | ||
370 | |||
371 | if (!dev || !pref || *pref == NULL) | ||
372 | return -EINVAL; | ||
373 | ref = *pref; | ||
374 | |||
375 | if (ref->handle != ~0) | ||
376 | nouveau_ramht_remove(dev, ref); | ||
377 | |||
378 | if (ref->gpuobj) { | ||
379 | ref->gpuobj->refcount--; | ||
380 | |||
381 | if (ref->gpuobj->refcount == 0) { | ||
382 | if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) | ||
383 | nouveau_gpuobj_del(dev, &ref->gpuobj); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | *pref = NULL; | ||
388 | kfree(ref); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | int | ||
393 | nouveau_gpuobj_new_ref(struct drm_device *dev, | ||
394 | struct nouveau_channel *oc, struct nouveau_channel *rc, | ||
395 | uint32_t handle, uint32_t size, int align, | ||
396 | uint32_t flags, struct nouveau_gpuobj_ref **ref) | ||
397 | { | 266 | { |
398 | struct nouveau_gpuobj *gpuobj = NULL; | 267 | if (ref) |
399 | int ret; | 268 | ref->refcount++; |
400 | 269 | ||
401 | ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj); | 270 | if (*ptr && --(*ptr)->refcount == 0) |
402 | if (ret) | 271 | nouveau_gpuobj_del(*ptr); |
403 | return ret; | ||
404 | 272 | ||
405 | ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref); | 273 | *ptr = ref; |
406 | if (ret) { | ||
407 | nouveau_gpuobj_del(dev, &gpuobj); | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | int | ||
415 | nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, | ||
416 | struct nouveau_gpuobj_ref **ref_ret) | ||
417 | { | ||
418 | struct nouveau_gpuobj_ref *ref; | ||
419 | struct list_head *entry, *tmp; | ||
420 | |||
421 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | ||
422 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | ||
423 | |||
424 | if (ref->handle == handle) { | ||
425 | if (ref_ret) | ||
426 | *ref_ret = ref; | ||
427 | return 0; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | return -EINVAL; | ||
432 | } | 274 | } |
433 | 275 | ||
434 | int | 276 | int |
435 | nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | 277 | nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, |
436 | uint32_t b_offset, uint32_t size, | 278 | uint32_t b_offset, uint32_t size, |
437 | uint32_t flags, struct nouveau_gpuobj **pgpuobj, | 279 | uint32_t flags, struct nouveau_gpuobj **pgpuobj) |
438 | struct nouveau_gpuobj_ref **pref) | ||
439 | { | 280 | { |
440 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 281 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
441 | struct nouveau_gpuobj *gpuobj = NULL; | 282 | struct nouveau_gpuobj *gpuobj = NULL; |
@@ -450,8 +291,8 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
450 | return -ENOMEM; | 291 | return -ENOMEM; |
451 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 292 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
452 | gpuobj->dev = dev; | 293 | gpuobj->dev = dev; |
453 | gpuobj->im_channel = NULL; | ||
454 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; | 294 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; |
295 | gpuobj->refcount = 1; | ||
455 | 296 | ||
456 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 297 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
457 | 298 | ||
@@ -459,7 +300,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
459 | gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), | 300 | gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), |
460 | GFP_KERNEL); | 301 | GFP_KERNEL); |
461 | if (!gpuobj->im_pramin) { | 302 | if (!gpuobj->im_pramin) { |
462 | nouveau_gpuobj_del(dev, &gpuobj); | 303 | nouveau_gpuobj_ref(NULL, &gpuobj); |
463 | return -ENOMEM; | 304 | return -ENOMEM; |
464 | } | 305 | } |
465 | gpuobj->im_pramin->start = p_offset; | 306 | gpuobj->im_pramin->start = p_offset; |
@@ -481,14 +322,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
481 | dev_priv->engine.instmem.flush(dev); | 322 | dev_priv->engine.instmem.flush(dev); |
482 | } | 323 | } |
483 | 324 | ||
484 | if (pref) { | ||
485 | i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); | ||
486 | if (i) { | ||
487 | nouveau_gpuobj_del(dev, &gpuobj); | ||
488 | return i; | ||
489 | } | ||
490 | } | ||
491 | |||
492 | if (pgpuobj) | 325 | if (pgpuobj) |
493 | *pgpuobj = gpuobj; | 326 | *pgpuobj = gpuobj; |
494 | return 0; | 327 | return 0; |
@@ -628,7 +461,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, | |||
628 | *o_ret = 0; | 461 | *o_ret = 0; |
629 | } else | 462 | } else |
630 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { | 463 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { |
631 | *gpuobj = dev_priv->gart_info.sg_ctxdma; | 464 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); |
632 | if (offset & ~0xffffffffULL) { | 465 | if (offset & ~0xffffffffULL) { |
633 | NV_ERROR(dev, "obj offset exceeds 32-bits\n"); | 466 | NV_ERROR(dev, "obj offset exceeds 32-bits\n"); |
634 | return -EINVAL; | 467 | return -EINVAL; |
@@ -760,8 +593,11 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | |||
760 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 593 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
761 | if (!gpuobj) | 594 | if (!gpuobj) |
762 | return -ENOMEM; | 595 | return -ENOMEM; |
596 | gpuobj->dev = chan->dev; | ||
763 | gpuobj->engine = NVOBJ_ENGINE_SW; | 597 | gpuobj->engine = NVOBJ_ENGINE_SW; |
764 | gpuobj->class = class; | 598 | gpuobj->class = class; |
599 | gpuobj->refcount = 1; | ||
600 | gpuobj->cinst = 0x40; | ||
765 | 601 | ||
766 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 602 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
767 | *gpuobj_ret = gpuobj; | 603 | *gpuobj_ret = gpuobj; |
@@ -773,7 +609,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
773 | { | 609 | { |
774 | struct drm_device *dev = chan->dev; | 610 | struct drm_device *dev = chan->dev; |
775 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 611 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
776 | struct nouveau_gpuobj *pramin = NULL; | ||
777 | uint32_t size; | 612 | uint32_t size; |
778 | uint32_t base; | 613 | uint32_t base; |
779 | int ret; | 614 | int ret; |
@@ -798,18 +633,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
798 | size += 0x1000; | 633 | size += 0x1000; |
799 | } | 634 | } |
800 | 635 | ||
801 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, | 636 | ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); |
802 | &chan->ramin); | ||
803 | if (ret) { | 637 | if (ret) { |
804 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); | 638 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); |
805 | return ret; | 639 | return ret; |
806 | } | 640 | } |
807 | pramin = chan->ramin->gpuobj; | ||
808 | 641 | ||
809 | ret = drm_mm_init(&chan->ramin_heap, base, size); | 642 | ret = drm_mm_init(&chan->ramin_heap, base, size); |
810 | if (ret) { | 643 | if (ret) { |
811 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | 644 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); |
812 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | 645 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
813 | return ret; | 646 | return ret; |
814 | } | 647 | } |
815 | 648 | ||
@@ -826,8 +659,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
826 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | 659 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; |
827 | int ret, i; | 660 | int ret, i; |
828 | 661 | ||
829 | INIT_LIST_HEAD(&chan->ramht_refs); | ||
830 | |||
831 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | 662 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
832 | 663 | ||
833 | /* Allocate a chunk of memory for per-channel object storage */ | 664 | /* Allocate a chunk of memory for per-channel object storage */ |
@@ -846,10 +677,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
846 | uint32_t vm_offset, pde; | 677 | uint32_t vm_offset, pde; |
847 | 678 | ||
848 | vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; | 679 | vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; |
849 | vm_offset += chan->ramin->gpuobj->im_pramin->start; | 680 | vm_offset += chan->ramin->im_pramin->start; |
850 | 681 | ||
851 | ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, | 682 | ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, |
852 | 0, &chan->vm_pd, NULL); | 683 | 0, &chan->vm_pd); |
853 | if (ret) | 684 | if (ret) |
854 | return ret; | 685 | return ret; |
855 | for (i = 0; i < 0x4000; i += 8) { | 686 | for (i = 0; i < 0x4000; i += 8) { |
@@ -857,25 +688,19 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
857 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); | 688 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); |
858 | } | 689 | } |
859 | 690 | ||
691 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, | ||
692 | &chan->vm_gart_pt); | ||
860 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; | 693 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; |
861 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | 694 | nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); |
862 | dev_priv->gart_info.sg_ctxdma, | ||
863 | &chan->vm_gart_pt); | ||
864 | if (ret) | ||
865 | return ret; | ||
866 | nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3); | ||
867 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | 695 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); |
868 | 696 | ||
869 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; | 697 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; |
870 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | 698 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { |
871 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | 699 | nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], |
872 | dev_priv->vm_vram_pt[i], | 700 | &chan->vm_vram_pt[i]); |
873 | &chan->vm_vram_pt[i]); | ||
874 | if (ret) | ||
875 | return ret; | ||
876 | 701 | ||
877 | nv_wo32(chan->vm_pd, pde + 0, | 702 | nv_wo32(chan->vm_pd, pde + 0, |
878 | chan->vm_vram_pt[i]->instance | 0x61); | 703 | chan->vm_vram_pt[i]->vinst | 0x61); |
879 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | 704 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); |
880 | pde += 8; | 705 | pde += 8; |
881 | } | 706 | } |
@@ -885,15 +710,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
885 | 710 | ||
886 | /* RAMHT */ | 711 | /* RAMHT */ |
887 | if (dev_priv->card_type < NV_50) { | 712 | if (dev_priv->card_type < NV_50) { |
888 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, | 713 | nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); |
889 | &chan->ramht); | 714 | } else { |
715 | struct nouveau_gpuobj *ramht = NULL; | ||
716 | |||
717 | ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, | ||
718 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | ||
890 | if (ret) | 719 | if (ret) |
891 | return ret; | 720 | return ret; |
892 | } else { | 721 | |
893 | ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, | 722 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); |
894 | 0x8000, 16, | 723 | nouveau_gpuobj_ref(NULL, &ramht); |
895 | NVOBJ_FLAG_ZERO_ALLOC, | ||
896 | &chan->ramht); | ||
897 | if (ret) | 724 | if (ret) |
898 | return ret; | 725 | return ret; |
899 | } | 726 | } |
@@ -910,24 +737,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
910 | } | 737 | } |
911 | } else { | 738 | } else { |
912 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 739 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
913 | 0, dev_priv->fb_available_size, | 740 | 0, dev_priv->fb_available_size, |
914 | NV_DMA_ACCESS_RW, | 741 | NV_DMA_ACCESS_RW, |
915 | NV_DMA_TARGET_VIDMEM, &vram); | 742 | NV_DMA_TARGET_VIDMEM, &vram); |
916 | if (ret) { | 743 | if (ret) { |
917 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | 744 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); |
918 | return ret; | 745 | return ret; |
919 | } | 746 | } |
920 | } | 747 | } |
921 | 748 | ||
922 | ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); | 749 | ret = nouveau_ramht_insert(chan, vram_h, vram); |
750 | nouveau_gpuobj_ref(NULL, &vram); | ||
923 | if (ret) { | 751 | if (ret) { |
924 | NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); | 752 | NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); |
925 | return ret; | 753 | return ret; |
926 | } | 754 | } |
927 | 755 | ||
928 | /* TT memory ctxdma */ | 756 | /* TT memory ctxdma */ |
929 | if (dev_priv->card_type >= NV_50) { | 757 | if (dev_priv->card_type >= NV_50) { |
930 | tt = vram; | 758 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
759 | 0, dev_priv->vm_end, | ||
760 | NV_DMA_ACCESS_RW, | ||
761 | NV_DMA_TARGET_AGP, &tt); | ||
762 | if (ret) { | ||
763 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | ||
764 | return ret; | ||
765 | } | ||
931 | } else | 766 | } else |
932 | if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { | 767 | if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { |
933 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, | 768 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, |
@@ -943,9 +778,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
943 | return ret; | 778 | return ret; |
944 | } | 779 | } |
945 | 780 | ||
946 | ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); | 781 | ret = nouveau_ramht_insert(chan, tt_h, tt); |
782 | nouveau_gpuobj_ref(NULL, &tt); | ||
947 | if (ret) { | 783 | if (ret) { |
948 | NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); | 784 | NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); |
949 | return ret; | 785 | return ret; |
950 | } | 786 | } |
951 | 787 | ||
@@ -957,33 +793,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |||
957 | { | 793 | { |
958 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 794 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
959 | struct drm_device *dev = chan->dev; | 795 | struct drm_device *dev = chan->dev; |
960 | struct list_head *entry, *tmp; | ||
961 | struct nouveau_gpuobj_ref *ref; | ||
962 | int i; | 796 | int i; |
963 | 797 | ||
964 | NV_DEBUG(dev, "ch%d\n", chan->id); | 798 | NV_DEBUG(dev, "ch%d\n", chan->id); |
965 | 799 | ||
966 | if (!chan->ramht_refs.next) | 800 | if (!chan->ramht) |
967 | return; | 801 | return; |
968 | 802 | ||
969 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | 803 | nouveau_ramht_ref(NULL, &chan->ramht, chan); |
970 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | ||
971 | |||
972 | nouveau_gpuobj_ref_del(dev, &ref); | ||
973 | } | ||
974 | |||
975 | nouveau_gpuobj_ref_del(dev, &chan->ramht); | ||
976 | 804 | ||
977 | nouveau_gpuobj_del(dev, &chan->vm_pd); | 805 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
978 | nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); | 806 | nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); |
979 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | 807 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) |
980 | nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); | 808 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); |
981 | 809 | ||
982 | if (chan->ramin_heap.free_stack.next) | 810 | if (chan->ramin_heap.free_stack.next) |
983 | drm_mm_takedown(&chan->ramin_heap); | 811 | drm_mm_takedown(&chan->ramin_heap); |
984 | if (chan->ramin) | 812 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
985 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | ||
986 | |||
987 | } | 813 | } |
988 | 814 | ||
989 | int | 815 | int |
@@ -1095,25 +921,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |||
1095 | return -EPERM; | 921 | return -EPERM; |
1096 | } | 922 | } |
1097 | 923 | ||
1098 | if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) | 924 | if (nouveau_ramht_find(chan, init->handle)) |
1099 | return -EEXIST; | 925 | return -EEXIST; |
1100 | 926 | ||
1101 | if (!grc->software) | 927 | if (!grc->software) |
1102 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); | 928 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); |
1103 | else | 929 | else |
1104 | ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); | 930 | ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); |
1105 | |||
1106 | if (ret) { | 931 | if (ret) { |
1107 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | 932 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", |
1108 | ret, init->channel, init->handle); | 933 | ret, init->channel, init->handle); |
1109 | return ret; | 934 | return ret; |
1110 | } | 935 | } |
1111 | 936 | ||
1112 | ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); | 937 | ret = nouveau_ramht_insert(chan, init->handle, gr); |
938 | nouveau_gpuobj_ref(NULL, &gr); | ||
1113 | if (ret) { | 939 | if (ret) { |
1114 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", | 940 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", |
1115 | ret, init->channel, init->handle); | 941 | ret, init->channel, init->handle); |
1116 | nouveau_gpuobj_del(dev, &gr); | ||
1117 | return ret; | 942 | return ret; |
1118 | } | 943 | } |
1119 | 944 | ||
@@ -1124,17 +949,16 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | |||
1124 | struct drm_file *file_priv) | 949 | struct drm_file *file_priv) |
1125 | { | 950 | { |
1126 | struct drm_nouveau_gpuobj_free *objfree = data; | 951 | struct drm_nouveau_gpuobj_free *objfree = data; |
1127 | struct nouveau_gpuobj_ref *ref; | 952 | struct nouveau_gpuobj *gpuobj; |
1128 | struct nouveau_channel *chan; | 953 | struct nouveau_channel *chan; |
1129 | int ret; | ||
1130 | 954 | ||
1131 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); | 955 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); |
1132 | 956 | ||
1133 | ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); | 957 | gpuobj = nouveau_ramht_find(chan, objfree->handle); |
1134 | if (ret) | 958 | if (!gpuobj) |
1135 | return ret; | 959 | return -ENOENT; |
1136 | nouveau_gpuobj_ref_del(dev, &ref); | ||
1137 | 960 | ||
961 | nouveau_ramht_remove(chan, objfree->handle); | ||
1138 | return 0; | 962 | return 0; |
1139 | } | 963 | } |
1140 | 964 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index e5cc93c55d80..5f9d52f06305 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c | |||
@@ -62,48 +62,56 @@ nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, | |||
62 | } | 62 | } |
63 | 63 | ||
64 | int | 64 | int |
65 | nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | 65 | nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, |
66 | struct nouveau_gpuobj *gpuobj) | ||
66 | { | 67 | { |
68 | struct drm_device *dev = chan->dev; | ||
67 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 69 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
68 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | 70 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
69 | struct nouveau_channel *chan = ref->channel; | 71 | struct nouveau_ramht_entry *entry; |
70 | struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; | 72 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; |
71 | uint32_t ctx, co, ho; | 73 | uint32_t ctx, co, ho; |
72 | 74 | ||
73 | if (!ramht) { | 75 | if (nouveau_ramht_find(chan, handle)) |
74 | NV_ERROR(dev, "No hash table!\n"); | 76 | return -EEXIST; |
75 | return -EINVAL; | 77 | |
76 | } | 78 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
79 | if (!entry) | ||
80 | return -ENOMEM; | ||
81 | entry->channel = chan; | ||
82 | entry->gpuobj = NULL; | ||
83 | entry->handle = handle; | ||
84 | list_add(&entry->head, &chan->ramht->entries); | ||
85 | nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); | ||
77 | 86 | ||
78 | if (dev_priv->card_type < NV_40) { | 87 | if (dev_priv->card_type < NV_40) { |
79 | ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | | 88 | ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | |
80 | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | | 89 | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | |
81 | (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); | 90 | (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); |
82 | } else | 91 | } else |
83 | if (dev_priv->card_type < NV_50) { | 92 | if (dev_priv->card_type < NV_50) { |
84 | ctx = (ref->instance >> 4) | | 93 | ctx = (gpuobj->cinst >> 4) | |
85 | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | | 94 | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | |
86 | (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); | 95 | (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); |
87 | } else { | 96 | } else { |
88 | if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { | 97 | if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { |
89 | ctx = (ref->instance << 10) | 2; | 98 | ctx = (gpuobj->cinst << 10) | 2; |
90 | } else { | 99 | } else { |
91 | ctx = (ref->instance >> 4) | | 100 | ctx = (gpuobj->cinst >> 4) | |
92 | ((ref->gpuobj->engine << | 101 | ((gpuobj->engine << |
93 | NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); | 102 | NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); |
94 | } | 103 | } |
95 | } | 104 | } |
96 | 105 | ||
97 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); | 106 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle); |
98 | do { | 107 | do { |
99 | if (!nouveau_ramht_entry_valid(dev, ramht, co)) { | 108 | if (!nouveau_ramht_entry_valid(dev, ramht, co)) { |
100 | NV_DEBUG(dev, | 109 | NV_DEBUG(dev, |
101 | "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | 110 | "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", |
102 | chan->id, co, ref->handle, ctx); | 111 | chan->id, co, handle, ctx); |
103 | nv_wo32(ramht, co + 0, ref->handle); | 112 | nv_wo32(ramht, co + 0, handle); |
104 | nv_wo32(ramht, co + 4, ctx); | 113 | nv_wo32(ramht, co + 4, ctx); |
105 | 114 | ||
106 | list_add_tail(&ref->list, &chan->ramht_refs); | ||
107 | instmem->flush(dev); | 115 | instmem->flush(dev); |
108 | return 0; | 116 | return 0; |
109 | } | 117 | } |
@@ -116,35 +124,40 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | |||
116 | } while (co != ho); | 124 | } while (co != ho); |
117 | 125 | ||
118 | NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); | 126 | NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); |
127 | list_del(&entry->head); | ||
128 | kfree(entry); | ||
119 | return -ENOMEM; | 129 | return -ENOMEM; |
120 | } | 130 | } |
121 | 131 | ||
122 | void | 132 | void |
123 | nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | 133 | nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) |
124 | { | 134 | { |
135 | struct drm_device *dev = chan->dev; | ||
125 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 136 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
126 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | 137 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
127 | struct nouveau_channel *chan = ref->channel; | 138 | struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; |
128 | struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; | 139 | struct nouveau_ramht_entry *entry, *tmp; |
129 | uint32_t co, ho; | 140 | u32 co, ho; |
130 | 141 | ||
131 | if (!ramht) { | 142 | list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { |
132 | NV_ERROR(dev, "No hash table!\n"); | 143 | if (entry->channel != chan || entry->handle != handle) |
133 | return; | 144 | continue; |
145 | |||
146 | nouveau_gpuobj_ref(NULL, &entry->gpuobj); | ||
147 | list_del(&entry->head); | ||
148 | kfree(entry); | ||
149 | break; | ||
134 | } | 150 | } |
135 | 151 | ||
136 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); | 152 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, handle); |
137 | do { | 153 | do { |
138 | if (nouveau_ramht_entry_valid(dev, ramht, co) && | 154 | if (nouveau_ramht_entry_valid(dev, ramht, co) && |
139 | (ref->handle == nv_ro32(ramht, co))) { | 155 | (handle == nv_ro32(ramht, co))) { |
140 | NV_DEBUG(dev, | 156 | NV_DEBUG(dev, |
141 | "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | 157 | "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", |
142 | chan->id, co, ref->handle, | 158 | chan->id, co, handle, nv_ro32(ramht, co + 4)); |
143 | nv_ro32(ramht, co + 4)); | ||
144 | nv_wo32(ramht, co + 0, 0x00000000); | 159 | nv_wo32(ramht, co + 0, 0x00000000); |
145 | nv_wo32(ramht, co + 4, 0x00000000); | 160 | nv_wo32(ramht, co + 4, 0x00000000); |
146 | |||
147 | list_del(&ref->list); | ||
148 | instmem->flush(dev); | 161 | instmem->flush(dev); |
149 | return; | 162 | return; |
150 | } | 163 | } |
@@ -153,8 +166,64 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | |||
153 | if (co >= dev_priv->ramht_size) | 166 | if (co >= dev_priv->ramht_size) |
154 | co = 0; | 167 | co = 0; |
155 | } while (co != ho); | 168 | } while (co != ho); |
156 | list_del(&ref->list); | ||
157 | 169 | ||
158 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", | 170 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", |
159 | chan->id, ref->handle); | 171 | chan->id, handle); |
172 | } | ||
173 | |||
174 | struct nouveau_gpuobj * | ||
175 | nouveau_ramht_find(struct nouveau_channel *chan, u32 handle) | ||
176 | { | ||
177 | struct nouveau_ramht_entry *entry; | ||
178 | |||
179 | list_for_each_entry(entry, &chan->ramht->entries, head) { | ||
180 | if (entry->channel == chan && entry->handle == handle) | ||
181 | return entry->gpuobj; | ||
182 | } | ||
183 | |||
184 | return NULL; | ||
185 | } | ||
186 | |||
187 | int | ||
188 | nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | ||
189 | struct nouveau_ramht **pramht) | ||
190 | { | ||
191 | struct nouveau_ramht *ramht; | ||
192 | |||
193 | ramht = kzalloc(sizeof(*ramht), GFP_KERNEL); | ||
194 | if (!ramht) | ||
195 | return -ENOMEM; | ||
196 | |||
197 | ramht->dev = dev; | ||
198 | ramht->refcount = 1; | ||
199 | INIT_LIST_HEAD(&ramht->entries); | ||
200 | nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj); | ||
201 | |||
202 | *pramht = ramht; | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | void | ||
207 | nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, | ||
208 | struct nouveau_channel *chan) | ||
209 | { | ||
210 | struct nouveau_ramht_entry *entry, *tmp; | ||
211 | struct nouveau_ramht *ramht; | ||
212 | |||
213 | if (ref) | ||
214 | ref->refcount++; | ||
215 | |||
216 | ramht = *ptr; | ||
217 | if (ramht) { | ||
218 | list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { | ||
219 | if (entry->channel == chan) | ||
220 | nouveau_ramht_remove(chan, entry->handle); | ||
221 | } | ||
222 | |||
223 | if (--ramht->refcount == 0) { | ||
224 | nouveau_gpuobj_ref(NULL, &ramht->gpuobj); | ||
225 | kfree(ramht); | ||
226 | } | ||
227 | } | ||
228 | *ptr = ref; | ||
160 | } | 229 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h index e10455c6e7ff..7076ae4c07a5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.h +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h | |||
@@ -25,7 +25,29 @@ | |||
25 | #ifndef __NOUVEAU_RAMHT_H__ | 25 | #ifndef __NOUVEAU_RAMHT_H__ |
26 | #define __NOUVEAU_RAMHT_H__ | 26 | #define __NOUVEAU_RAMHT_H__ |
27 | 27 | ||
28 | extern int nouveau_ramht_insert(struct drm_device *, struct nouveau_gpuobj_ref *); | 28 | struct nouveau_ramht_entry { |
29 | extern void nouveau_ramht_remove(struct drm_device *, struct nouveau_gpuobj_ref *); | 29 | struct list_head head; |
30 | struct nouveau_channel *channel; | ||
31 | struct nouveau_gpuobj *gpuobj; | ||
32 | u32 handle; | ||
33 | }; | ||
34 | |||
35 | struct nouveau_ramht { | ||
36 | struct drm_device *dev; | ||
37 | int refcount; | ||
38 | struct nouveau_gpuobj *gpuobj; | ||
39 | struct list_head entries; | ||
40 | }; | ||
41 | |||
42 | extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, | ||
43 | struct nouveau_ramht **); | ||
44 | extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, | ||
45 | struct nouveau_channel *unref_channel); | ||
46 | |||
47 | extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, | ||
48 | struct nouveau_gpuobj *); | ||
49 | extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); | ||
50 | extern struct nouveau_gpuobj * | ||
51 | nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); | ||
30 | 52 | ||
31 | #endif | 53 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 630988af801c..5a66a7ae6e29 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -234,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
234 | } | 234 | } |
235 | 235 | ||
236 | ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, | 236 | ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, |
237 | NVOBJ_FLAG_ALLOW_NO_REFS | | ||
238 | NVOBJ_FLAG_ZERO_ALLOC | | 237 | NVOBJ_FLAG_ZERO_ALLOC | |
239 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); | 238 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); |
240 | if (ret) { | 239 | if (ret) { |
@@ -245,7 +244,7 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
245 | dev_priv->gart_info.sg_dummy_page = | 244 | dev_priv->gart_info.sg_dummy_page = |
246 | alloc_page(GFP_KERNEL|__GFP_DMA32); | 245 | alloc_page(GFP_KERNEL|__GFP_DMA32); |
247 | if (!dev_priv->gart_info.sg_dummy_page) { | 246 | if (!dev_priv->gart_info.sg_dummy_page) { |
248 | nouveau_gpuobj_del(dev, &gpuobj); | 247 | nouveau_gpuobj_ref(NULL, &gpuobj); |
249 | return -ENOMEM; | 248 | return -ENOMEM; |
250 | } | 249 | } |
251 | 250 | ||
@@ -254,11 +253,17 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
254 | pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, | 253 | pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, |
255 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 254 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
256 | if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { | 255 | if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { |
257 | nouveau_gpuobj_del(dev, &gpuobj); | 256 | nouveau_gpuobj_ref(NULL, &gpuobj); |
258 | return -EFAULT; | 257 | return -EFAULT; |
259 | } | 258 | } |
260 | 259 | ||
261 | if (dev_priv->card_type < NV_50) { | 260 | if (dev_priv->card_type < NV_50) { |
261 | /* special case, allocated from global instmem heap so | ||
262 | * cinst is invalid, we use it on all channels though so | ||
263 | * cinst needs to be valid, set it the same as pinst | ||
264 | */ | ||
265 | gpuobj->cinst = gpuobj->pinst; | ||
266 | |||
262 | /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and | 267 | /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and |
263 | * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE | 268 | * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE |
264 | * on those cards? */ | 269 | * on those cards? */ |
@@ -302,7 +307,7 @@ nouveau_sgdma_takedown(struct drm_device *dev) | |||
302 | dev_priv->gart_info.sg_dummy_bus = 0; | 307 | dev_priv->gart_info.sg_dummy_bus = 0; |
303 | } | 308 | } |
304 | 309 | ||
305 | nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); | 310 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); |
306 | } | 311 | } |
307 | 312 | ||
308 | int | 313 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 989322be3728..fec29522298d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "nouveau_drv.h" | 35 | #include "nouveau_drv.h" |
36 | #include "nouveau_drm.h" | 36 | #include "nouveau_drm.h" |
37 | #include "nouveau_fbcon.h" | 37 | #include "nouveau_fbcon.h" |
38 | #include "nouveau_ramht.h" | ||
38 | #include "nv50_display.h" | 39 | #include "nv50_display.h" |
39 | 40 | ||
40 | static void nouveau_stub_takedown(struct drm_device *dev) {} | 41 | static void nouveau_stub_takedown(struct drm_device *dev) {} |
@@ -437,16 +438,14 @@ static int | |||
437 | nouveau_card_init_channel(struct drm_device *dev) | 438 | nouveau_card_init_channel(struct drm_device *dev) |
438 | { | 439 | { |
439 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 440 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
440 | struct nouveau_gpuobj *gpuobj; | 441 | struct nouveau_gpuobj *gpuobj = NULL; |
441 | int ret; | 442 | int ret; |
442 | 443 | ||
443 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, | 444 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, |
444 | (struct drm_file *)-2, | 445 | (struct drm_file *)-2, NvDmaFB, NvDmaTT); |
445 | NvDmaFB, NvDmaTT); | ||
446 | if (ret) | 446 | if (ret) |
447 | return ret; | 447 | return ret; |
448 | 448 | ||
449 | gpuobj = NULL; | ||
450 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, | 449 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, |
451 | 0, dev_priv->vram_size, | 450 | 0, dev_priv->vram_size, |
452 | NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, | 451 | NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, |
@@ -454,26 +453,25 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
454 | if (ret) | 453 | if (ret) |
455 | goto out_err; | 454 | goto out_err; |
456 | 455 | ||
457 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM, | 456 | ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj); |
458 | gpuobj, NULL); | 457 | nouveau_gpuobj_ref(NULL, &gpuobj); |
459 | if (ret) | 458 | if (ret) |
460 | goto out_err; | 459 | goto out_err; |
461 | 460 | ||
462 | gpuobj = NULL; | ||
463 | ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, | 461 | ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, |
464 | dev_priv->gart_info.aper_size, | 462 | dev_priv->gart_info.aper_size, |
465 | NV_DMA_ACCESS_RW, &gpuobj, NULL); | 463 | NV_DMA_ACCESS_RW, &gpuobj, NULL); |
466 | if (ret) | 464 | if (ret) |
467 | goto out_err; | 465 | goto out_err; |
468 | 466 | ||
469 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART, | 467 | ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj); |
470 | gpuobj, NULL); | 468 | nouveau_gpuobj_ref(NULL, &gpuobj); |
471 | if (ret) | 469 | if (ret) |
472 | goto out_err; | 470 | goto out_err; |
473 | 471 | ||
474 | return 0; | 472 | return 0; |
473 | |||
475 | out_err: | 474 | out_err: |
476 | nouveau_gpuobj_del(dev, &gpuobj); | ||
477 | nouveau_channel_free(dev_priv->channel); | 475 | nouveau_channel_free(dev_priv->channel); |
478 | dev_priv->channel = NULL; | 476 | dev_priv->channel = NULL; |
479 | return ret; | 477 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 1eeac4fae73d..33e4c9388bc1 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "drmP.h" | 25 | #include "drmP.h" |
26 | #include "nouveau_drv.h" | 26 | #include "nouveau_drv.h" |
27 | #include "nouveau_dma.h" | 27 | #include "nouveau_dma.h" |
28 | #include "nouveau_ramht.h" | ||
28 | #include "nouveau_fbcon.h" | 29 | #include "nouveau_fbcon.h" |
29 | 30 | ||
30 | void | 31 | void |
@@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) | |||
169 | if (ret) | 170 | if (ret) |
170 | return ret; | 171 | return ret; |
171 | 172 | ||
172 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL); | 173 | ret = nouveau_ramht_insert(dev_priv->channel, handle, obj); |
173 | if (ret) | 174 | nouveau_gpuobj_ref(NULL, &obj); |
174 | return ret; | 175 | return ret; |
175 | |||
176 | return 0; | ||
177 | } | 176 | } |
178 | 177 | ||
179 | int | 178 | int |
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index bbb87ef262c0..b7ecafb78d77 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
@@ -38,10 +38,8 @@ | |||
38 | #define NV04_RAMFC_ENGINE 0x14 | 38 | #define NV04_RAMFC_ENGINE 0x14 |
39 | #define NV04_RAMFC_PULL1_ENGINE 0x18 | 39 | #define NV04_RAMFC_PULL1_ENGINE 0x18 |
40 | 40 | ||
41 | #define RAMFC_WR(offset, val) nv_wo32(chan->ramfc->gpuobj, \ | 41 | #define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val)) |
42 | NV04_RAMFC_##offset, (val)) | 42 | #define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset) |
43 | #define RAMFC_RD(offset) nv_ro32(chan->ramfc->gpuobj, \ | ||
44 | NV04_RAMFC_##offset) | ||
45 | 43 | ||
46 | void | 44 | void |
47 | nv04_fifo_disable(struct drm_device *dev) | 45 | nv04_fifo_disable(struct drm_device *dev) |
@@ -130,7 +128,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) | |||
130 | NV04_RAMFC__SIZE, | 128 | NV04_RAMFC__SIZE, |
131 | NVOBJ_FLAG_ZERO_ALLOC | | 129 | NVOBJ_FLAG_ZERO_ALLOC | |
132 | NVOBJ_FLAG_ZERO_FREE, | 130 | NVOBJ_FLAG_ZERO_FREE, |
133 | NULL, &chan->ramfc); | 131 | &chan->ramfc); |
134 | if (ret) | 132 | if (ret) |
135 | return ret; | 133 | return ret; |
136 | 134 | ||
@@ -139,7 +137,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) | |||
139 | /* Setup initial state */ | 137 | /* Setup initial state */ |
140 | RAMFC_WR(DMA_PUT, chan->pushbuf_base); | 138 | RAMFC_WR(DMA_PUT, chan->pushbuf_base); |
141 | RAMFC_WR(DMA_GET, chan->pushbuf_base); | 139 | RAMFC_WR(DMA_GET, chan->pushbuf_base); |
142 | RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); | 140 | RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4); |
143 | RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 141 | RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
144 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 142 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
145 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | 143 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | |
@@ -161,7 +159,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan) | |||
161 | nv_wr32(dev, NV04_PFIFO_MODE, | 159 | nv_wr32(dev, NV04_PFIFO_MODE, |
162 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); | 160 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); |
163 | 161 | ||
164 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 162 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
165 | } | 163 | } |
166 | 164 | ||
167 | static void | 165 | static void |
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index 7a4069cf5d0b..ccb07fb701ca 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c | |||
@@ -48,7 +48,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan) | |||
48 | 48 | ||
49 | ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, | 49 | ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, |
50 | NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | | 50 | NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | |
51 | NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); | 51 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); |
52 | if (ret) | 52 | if (ret) |
53 | return ret; | 53 | return ret; |
54 | 54 | ||
@@ -57,7 +57,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan) | |||
57 | */ | 57 | */ |
58 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | 58 | nv_wi32(dev, fc + 0, chan->pushbuf_base); |
59 | nv_wi32(dev, fc + 4, chan->pushbuf_base); | 59 | nv_wi32(dev, fc + 4, chan->pushbuf_base); |
60 | nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); | 60 | nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); |
61 | nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 61 | nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
62 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 62 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
63 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | 63 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | |
@@ -80,7 +80,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan) | |||
80 | nv_wr32(dev, NV04_PFIFO_MODE, | 80 | nv_wr32(dev, NV04_PFIFO_MODE, |
81 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); | 81 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); |
82 | 82 | ||
83 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 83 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
84 | } | 84 | } |
85 | 85 | ||
86 | static void | 86 | static void |
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index d8693d32bd0e..12ab9cd56eca 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c | |||
@@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan) | |||
403 | BUG_ON(1); | 403 | BUG_ON(1); |
404 | } | 404 | } |
405 | 405 | ||
406 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, | 406 | ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, |
407 | 16, NVOBJ_FLAG_ZERO_ALLOC, | 407 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); |
408 | &chan->ramin_grctx); | ||
409 | if (ret) | 408 | if (ret) |
410 | return ret; | 409 | return ret; |
411 | 410 | ||
412 | /* Initialise default context values */ | 411 | /* Initialise default context values */ |
413 | ctx_init(dev, chan->ramin_grctx->gpuobj); | 412 | ctx_init(dev, chan->ramin_grctx); |
414 | 413 | ||
415 | /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ | 414 | /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ |
416 | nv_wo32(chan->ramin_grctx->gpuobj, idoffs, | 415 | nv_wo32(chan->ramin_grctx, idoffs, |
417 | (chan->id << 24) | 0x1); /* CTX_USER */ | 416 | (chan->id << 24) | 0x1); /* CTX_USER */ |
418 | 417 | ||
419 | nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4, | 418 | nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4); |
420 | chan->ramin_grctx->instance >> 4); | ||
421 | return 0; | 419 | return 0; |
422 | } | 420 | } |
423 | 421 | ||
@@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) | |||
428 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 426 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
429 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 427 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
430 | 428 | ||
431 | if (chan->ramin_grctx) | 429 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); |
432 | nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); | 430 | nv_wo32(pgraph->ctx_table, chan->id * 4, 0); |
433 | |||
434 | nv_wo32(pgraph->ctx_table->gpuobj, chan->id * 4, 0); | ||
435 | } | 431 | } |
436 | 432 | ||
437 | int | 433 | int |
@@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan) | |||
442 | 438 | ||
443 | if (!chan->ramin_grctx) | 439 | if (!chan->ramin_grctx) |
444 | return -EINVAL; | 440 | return -EINVAL; |
445 | inst = chan->ramin_grctx->instance >> 4; | 441 | inst = chan->ramin_grctx->pinst >> 4; |
446 | 442 | ||
447 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); | 443 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); |
448 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, | 444 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, |
@@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev) | |||
465 | chan = pgraph->channel(dev); | 461 | chan = pgraph->channel(dev); |
466 | if (!chan) | 462 | if (!chan) |
467 | return 0; | 463 | return 0; |
468 | inst = chan->ramin_grctx->instance >> 4; | 464 | inst = chan->ramin_grctx->pinst >> 4; |
469 | 465 | ||
470 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); | 466 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); |
471 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, | 467 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, |
@@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev) | |||
552 | 548 | ||
553 | if (!pgraph->ctx_table) { | 549 | if (!pgraph->ctx_table) { |
554 | /* Create Context Pointer Table */ | 550 | /* Create Context Pointer Table */ |
555 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, | 551 | ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, |
556 | NVOBJ_FLAG_ZERO_ALLOC, | 552 | NVOBJ_FLAG_ZERO_ALLOC, |
557 | &pgraph->ctx_table); | 553 | &pgraph->ctx_table); |
558 | if (ret) | 554 | if (ret) |
559 | return ret; | 555 | return ret; |
560 | } | 556 | } |
561 | 557 | ||
562 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, | 558 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, |
563 | pgraph->ctx_table->instance >> 4); | 559 | pgraph->ctx_table->pinst >> 4); |
564 | 560 | ||
565 | nv20_graph_rdi(dev); | 561 | nv20_graph_rdi(dev); |
566 | 562 | ||
@@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev) | |||
646 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 642 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
647 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 643 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
648 | 644 | ||
649 | nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table); | 645 | nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); |
650 | } | 646 | } |
651 | 647 | ||
652 | int | 648 | int |
@@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev) | |||
681 | 677 | ||
682 | if (!pgraph->ctx_table) { | 678 | if (!pgraph->ctx_table) { |
683 | /* Create Context Pointer Table */ | 679 | /* Create Context Pointer Table */ |
684 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, | 680 | ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, |
685 | NVOBJ_FLAG_ZERO_ALLOC, | 681 | NVOBJ_FLAG_ZERO_ALLOC, |
686 | &pgraph->ctx_table); | 682 | &pgraph->ctx_table); |
687 | if (ret) | 683 | if (ret) |
688 | return ret; | 684 | return ret; |
689 | } | 685 | } |
690 | 686 | ||
691 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, | 687 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, |
692 | pgraph->ctx_table->instance >> 4); | 688 | pgraph->ctx_table->pinst >> 4); |
693 | 689 | ||
694 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 690 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
695 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | 691 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); |
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 2b67f1835c39..03f4dc13725b 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c | |||
@@ -42,7 +42,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
42 | 42 | ||
43 | ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, | 43 | ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, |
44 | NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | | 44 | NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | |
45 | NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); | 45 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); |
46 | if (ret) | 46 | if (ret) |
47 | return ret; | 47 | return ret; |
48 | 48 | ||
@@ -50,7 +50,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
50 | 50 | ||
51 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | 51 | nv_wi32(dev, fc + 0, chan->pushbuf_base); |
52 | nv_wi32(dev, fc + 4, chan->pushbuf_base); | 52 | nv_wi32(dev, fc + 4, chan->pushbuf_base); |
53 | nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); | 53 | nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); |
54 | nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 54 | nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
55 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 55 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
56 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | 56 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | |
@@ -58,7 +58,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) | |||
58 | NV_PFIFO_CACHE1_BIG_ENDIAN | | 58 | NV_PFIFO_CACHE1_BIG_ENDIAN | |
59 | #endif | 59 | #endif |
60 | 0x30000000 /* no idea.. */); | 60 | 0x30000000 /* no idea.. */); |
61 | nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); | 61 | nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4); |
62 | nv_wi32(dev, fc + 60, 0x0001FFFF); | 62 | nv_wi32(dev, fc + 60, 0x0001FFFF); |
63 | 63 | ||
64 | /* enable the fifo dma operation */ | 64 | /* enable the fifo dma operation */ |
@@ -77,8 +77,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan) | |||
77 | nv_wr32(dev, NV04_PFIFO_MODE, | 77 | nv_wr32(dev, NV04_PFIFO_MODE, |
78 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); | 78 | nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); |
79 | 79 | ||
80 | if (chan->ramfc) | 80 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
81 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | ||
82 | } | 81 | } |
83 | 82 | ||
84 | static void | 83 | static void |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 6215dfcf1ea9..912940e2457d 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev) | |||
45 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 45 | struct nouveau_channel *chan = dev_priv->fifos[i]; |
46 | 46 | ||
47 | if (chan && chan->ramin_grctx && | 47 | if (chan && chan->ramin_grctx && |
48 | chan->ramin_grctx->instance == inst) | 48 | chan->ramin_grctx->pinst == inst) |
49 | return chan; | 49 | return chan; |
50 | } | 50 | } |
51 | 51 | ||
@@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan) | |||
61 | struct nouveau_grctx ctx = {}; | 61 | struct nouveau_grctx ctx = {}; |
62 | int ret; | 62 | int ret; |
63 | 63 | ||
64 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, | 64 | ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, |
65 | 16, NVOBJ_FLAG_ZERO_ALLOC, | 65 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); |
66 | &chan->ramin_grctx); | ||
67 | if (ret) | 66 | if (ret) |
68 | return ret; | 67 | return ret; |
69 | 68 | ||
70 | /* Initialise default context values */ | 69 | /* Initialise default context values */ |
71 | ctx.dev = chan->dev; | 70 | ctx.dev = chan->dev; |
72 | ctx.mode = NOUVEAU_GRCTX_VALS; | 71 | ctx.mode = NOUVEAU_GRCTX_VALS; |
73 | ctx.data = chan->ramin_grctx->gpuobj; | 72 | ctx.data = chan->ramin_grctx; |
74 | nv40_grctx_init(&ctx); | 73 | nv40_grctx_init(&ctx); |
75 | 74 | ||
76 | nv_wo32(chan->ramin_grctx->gpuobj, 0, | 75 | nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->im_pramin->start); |
77 | chan->ramin_grctx->gpuobj->im_pramin->start); | ||
78 | return 0; | 76 | return 0; |
79 | } | 77 | } |
80 | 78 | ||
81 | void | 79 | void |
82 | nv40_graph_destroy_context(struct nouveau_channel *chan) | 80 | nv40_graph_destroy_context(struct nouveau_channel *chan) |
83 | { | 81 | { |
84 | nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); | 82 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); |
85 | } | 83 | } |
86 | 84 | ||
87 | static int | 85 | static int |
@@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan) | |||
135 | 133 | ||
136 | if (!chan->ramin_grctx) | 134 | if (!chan->ramin_grctx) |
137 | return -EINVAL; | 135 | return -EINVAL; |
138 | inst = chan->ramin_grctx->instance >> 4; | 136 | inst = chan->ramin_grctx->pinst >> 4; |
139 | 137 | ||
140 | ret = nv40_graph_transfer_context(dev, inst, 0); | 138 | ret = nv40_graph_transfer_context(dev, inst, 0); |
141 | if (ret) | 139 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 6f89674ebb96..b65d2ddd415d 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "nouveau_connector.h" | 30 | #include "nouveau_connector.h" |
31 | #include "nouveau_fb.h" | 31 | #include "nouveau_fb.h" |
32 | #include "nouveau_fbcon.h" | 32 | #include "nouveau_fbcon.h" |
33 | #include "nouveau_ramht.h" | ||
33 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
34 | 35 | ||
35 | static void | 36 | static void |
@@ -66,12 +67,6 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, | |||
66 | return ret; | 67 | return ret; |
67 | obj->engine = NVOBJ_ENGINE_DISPLAY; | 68 | obj->engine = NVOBJ_ENGINE_DISPLAY; |
68 | 69 | ||
69 | ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL); | ||
70 | if (ret) { | ||
71 | nouveau_gpuobj_del(dev, &obj); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); | 70 | nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); |
76 | nv_wo32(obj, 4, limit); | 71 | nv_wo32(obj, 4, limit); |
77 | nv_wo32(obj, 8, offset); | 72 | nv_wo32(obj, 8, offset); |
@@ -83,6 +78,12 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, | |||
83 | nv_wo32(obj, 20, 0x00020000); | 78 | nv_wo32(obj, 20, 0x00020000); |
84 | dev_priv->engine.instmem.flush(dev); | 79 | dev_priv->engine.instmem.flush(dev); |
85 | 80 | ||
81 | ret = nouveau_ramht_insert(evo, name, obj); | ||
82 | nouveau_gpuobj_ref(NULL, &obj); | ||
83 | if (ret) { | ||
84 | return ret; | ||
85 | } | ||
86 | |||
86 | return 0; | 87 | return 0; |
87 | } | 88 | } |
88 | 89 | ||
@@ -90,6 +91,7 @@ static int | |||
90 | nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) | 91 | nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) |
91 | { | 92 | { |
92 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 93 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
94 | struct nouveau_gpuobj *ramht = NULL; | ||
93 | struct nouveau_channel *chan; | 95 | struct nouveau_channel *chan; |
94 | int ret; | 96 | int ret; |
95 | 97 | ||
@@ -103,10 +105,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) | |||
103 | chan->user_get = 4; | 105 | chan->user_get = 4; |
104 | chan->user_put = 0; | 106 | chan->user_put = 0; |
105 | 107 | ||
106 | INIT_LIST_HEAD(&chan->ramht_refs); | 108 | ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, |
107 | 109 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); | |
108 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000, | ||
109 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); | ||
110 | if (ret) { | 110 | if (ret) { |
111 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); | 111 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); |
112 | nv50_evo_channel_del(pchan); | 112 | nv50_evo_channel_del(pchan); |
@@ -120,14 +120,20 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) | |||
120 | return ret; | 120 | return ret; |
121 | } | 121 | } |
122 | 122 | ||
123 | ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16, | 123 | ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); |
124 | 0, &chan->ramht); | ||
125 | if (ret) { | 124 | if (ret) { |
126 | NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); | 125 | NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); |
127 | nv50_evo_channel_del(pchan); | 126 | nv50_evo_channel_del(pchan); |
128 | return ret; | 127 | return ret; |
129 | } | 128 | } |
130 | 129 | ||
130 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); | ||
131 | nouveau_gpuobj_ref(NULL, &ramht); | ||
132 | if (ret) { | ||
133 | nv50_evo_channel_del(pchan); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
131 | if (dev_priv->chipset != 0x50) { | 137 | if (dev_priv->chipset != 0x50) { |
132 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, | 138 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, |
133 | 0, 0xffffffff); | 139 | 0, 0xffffffff); |
@@ -321,7 +327,7 @@ nv50_display_init(struct drm_device *dev) | |||
321 | } | 327 | } |
322 | } | 328 | } |
323 | 329 | ||
324 | nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9); | 330 | nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); |
325 | 331 | ||
326 | /* initialise fifo */ | 332 | /* initialise fifo */ |
327 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), | 333 | nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6bf025c6fc6f..6dcf048eddbc 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include "drmP.h" | 1 | #include "drmP.h" |
2 | #include "nouveau_drv.h" | 2 | #include "nouveau_drv.h" |
3 | #include "nouveau_dma.h" | 3 | #include "nouveau_dma.h" |
4 | #include "nouveau_ramht.h" | ||
4 | #include "nouveau_fbcon.h" | 5 | #include "nouveau_fbcon.h" |
5 | 6 | ||
6 | void | 7 | void |
@@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
193 | if (ret) | 194 | if (ret) |
194 | return ret; | 195 | return ret; |
195 | 196 | ||
196 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL); | 197 | ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d); |
198 | nouveau_gpuobj_ref(NULL, &eng2d); | ||
197 | if (ret) | 199 | if (ret) |
198 | return ret; | 200 | return ret; |
199 | 201 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 9201f35d8277..4fc8b59cc41e 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -27,13 +27,14 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_ramht.h" | ||
30 | 31 | ||
31 | static void | 32 | static void |
32 | nv50_fifo_playlist_update(struct drm_device *dev) | 33 | nv50_fifo_playlist_update(struct drm_device *dev) |
33 | { | 34 | { |
34 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 35 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
35 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 36 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
36 | struct nouveau_gpuobj_ref *cur; | 37 | struct nouveau_gpuobj *cur; |
37 | int i, nr; | 38 | int i, nr; |
38 | 39 | ||
39 | NV_DEBUG(dev, "\n"); | 40 | NV_DEBUG(dev, "\n"); |
@@ -44,13 +45,13 @@ nv50_fifo_playlist_update(struct drm_device *dev) | |||
44 | /* We never schedule channel 0 or 127 */ | 45 | /* We never schedule channel 0 or 127 */ |
45 | for (i = 1, nr = 0; i < 127; i++) { | 46 | for (i = 1, nr = 0; i < 127; i++) { |
46 | if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { | 47 | if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { |
47 | nv_wo32(cur->gpuobj, (nr * 4), i); | 48 | nv_wo32(cur, (nr * 4), i); |
48 | nr++; | 49 | nr++; |
49 | } | 50 | } |
50 | } | 51 | } |
51 | dev_priv->engine.instmem.flush(dev); | 52 | dev_priv->engine.instmem.flush(dev); |
52 | 53 | ||
53 | nv_wr32(dev, 0x32f4, cur->instance >> 12); | 54 | nv_wr32(dev, 0x32f4, cur->vinst >> 12); |
54 | nv_wr32(dev, 0x32ec, nr); | 55 | nv_wr32(dev, 0x32ec, nr); |
55 | nv_wr32(dev, 0x2500, 0x101); | 56 | nv_wr32(dev, 0x2500, 0x101); |
56 | } | 57 | } |
@@ -65,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel) | |||
65 | NV_DEBUG(dev, "ch%d\n", channel); | 66 | NV_DEBUG(dev, "ch%d\n", channel); |
66 | 67 | ||
67 | if (dev_priv->chipset == 0x50) | 68 | if (dev_priv->chipset == 0x50) |
68 | inst = chan->ramfc->instance >> 12; | 69 | inst = chan->ramfc->vinst >> 12; |
69 | else | 70 | else |
70 | inst = chan->ramfc->instance >> 8; | 71 | inst = chan->ramfc->vinst >> 8; |
71 | 72 | ||
72 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | | 73 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | |
73 | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); | 74 | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); |
@@ -165,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev) | |||
165 | goto just_reset; | 166 | goto just_reset; |
166 | } | 167 | } |
167 | 168 | ||
168 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, | 169 | ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, |
169 | NVOBJ_FLAG_ZERO_ALLOC, | 170 | NVOBJ_FLAG_ZERO_ALLOC, |
170 | &pfifo->playlist[0]); | 171 | &pfifo->playlist[0]); |
171 | if (ret) { | 172 | if (ret) { |
172 | NV_ERROR(dev, "error creating playlist 0: %d\n", ret); | 173 | NV_ERROR(dev, "error creating playlist 0: %d\n", ret); |
173 | return ret; | 174 | return ret; |
174 | } | 175 | } |
175 | 176 | ||
176 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, | 177 | ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, |
177 | NVOBJ_FLAG_ZERO_ALLOC, | 178 | NVOBJ_FLAG_ZERO_ALLOC, |
178 | &pfifo->playlist[1]); | 179 | &pfifo->playlist[1]); |
179 | if (ret) { | 180 | if (ret) { |
180 | nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); | 181 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); |
181 | NV_ERROR(dev, "error creating playlist 1: %d\n", ret); | 182 | NV_ERROR(dev, "error creating playlist 1: %d\n", ret); |
182 | return ret; | 183 | return ret; |
183 | } | 184 | } |
@@ -205,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev) | |||
205 | if (!pfifo->playlist[0]) | 206 | if (!pfifo->playlist[0]) |
206 | return; | 207 | return; |
207 | 208 | ||
208 | nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); | 209 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); |
209 | nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]); | 210 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); |
210 | } | 211 | } |
211 | 212 | ||
212 | int | 213 | int |
@@ -228,42 +229,39 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
228 | NV_DEBUG(dev, "ch%d\n", chan->id); | 229 | NV_DEBUG(dev, "ch%d\n", chan->id); |
229 | 230 | ||
230 | if (dev_priv->chipset == 0x50) { | 231 | if (dev_priv->chipset == 0x50) { |
231 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst, | 232 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, |
232 | chan->ramin->gpuobj->vinst, 0x100, | 233 | chan->ramin->vinst, 0x100, |
233 | NVOBJ_FLAG_ZERO_ALLOC | | 234 | NVOBJ_FLAG_ZERO_ALLOC | |
234 | NVOBJ_FLAG_ZERO_FREE, &ramfc, | 235 | NVOBJ_FLAG_ZERO_FREE, |
235 | &chan->ramfc); | 236 | &chan->ramfc); |
236 | if (ret) | 237 | if (ret) |
237 | return ret; | 238 | return ret; |
238 | 239 | ||
239 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->gpuobj->pinst + | 240 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400, |
240 | 0x0400, | 241 | chan->ramin->vinst + 0x0400, |
241 | chan->ramin->gpuobj->vinst + | 242 | 4096, 0, &chan->cache); |
242 | 0x0400, 4096, 0, NULL, | ||
243 | &chan->cache); | ||
244 | if (ret) | 243 | if (ret) |
245 | return ret; | 244 | return ret; |
246 | } else { | 245 | } else { |
247 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, | 246 | ret = nouveau_gpuobj_new(dev, chan, 0x100, 256, |
248 | NVOBJ_FLAG_ZERO_ALLOC | | 247 | NVOBJ_FLAG_ZERO_ALLOC | |
249 | NVOBJ_FLAG_ZERO_FREE, | 248 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); |
250 | &chan->ramfc); | ||
251 | if (ret) | 249 | if (ret) |
252 | return ret; | 250 | return ret; |
253 | ramfc = chan->ramfc->gpuobj; | ||
254 | 251 | ||
255 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, | 252 | ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, |
256 | 0, &chan->cache); | 253 | 0, &chan->cache); |
257 | if (ret) | 254 | if (ret) |
258 | return ret; | 255 | return ret; |
259 | } | 256 | } |
257 | ramfc = chan->ramfc; | ||
260 | 258 | ||
261 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 259 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
262 | 260 | ||
263 | nv_wo32(ramfc, 0x48, chan->pushbuf->instance >> 4); | 261 | nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); |
264 | nv_wo32(ramfc, 0x80, (0 << 27) /* 4KiB */ | | 262 | nv_wo32(ramfc, 0x80, (0 << 27) /* 4KiB */ | |
265 | (4 << 24) /* SEARCH_FULL */ | | 263 | (4 << 24) /* SEARCH_FULL */ | |
266 | (chan->ramht->instance >> 4)); | 264 | (chan->ramht->gpuobj->cinst >> 4)); |
267 | nv_wo32(ramfc, 0x44, 0x2101ffff); | 265 | nv_wo32(ramfc, 0x44, 0x2101ffff); |
268 | nv_wo32(ramfc, 0x60, 0x7fffffff); | 266 | nv_wo32(ramfc, 0x60, 0x7fffffff); |
269 | nv_wo32(ramfc, 0x40, 0x00000000); | 267 | nv_wo32(ramfc, 0x40, 0x00000000); |
@@ -274,11 +272,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
274 | nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16); | 272 | nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16); |
275 | 273 | ||
276 | if (dev_priv->chipset != 0x50) { | 274 | if (dev_priv->chipset != 0x50) { |
277 | nv_wo32(chan->ramin->gpuobj, 0, chan->id); | 275 | nv_wo32(chan->ramin, 0, chan->id); |
278 | nv_wo32(chan->ramin->gpuobj, 4, chan->ramfc->instance >> 8); | 276 | nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8); |
279 | 277 | ||
280 | nv_wo32(ramfc, 0x88, chan->cache->instance >> 10); | 278 | nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10); |
281 | nv_wo32(ramfc, 0x98, chan->ramin->instance >> 12); | 279 | nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12); |
282 | } | 280 | } |
283 | 281 | ||
284 | dev_priv->engine.instmem.flush(dev); | 282 | dev_priv->engine.instmem.flush(dev); |
@@ -293,12 +291,13 @@ void | |||
293 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | 291 | nv50_fifo_destroy_context(struct nouveau_channel *chan) |
294 | { | 292 | { |
295 | struct drm_device *dev = chan->dev; | 293 | struct drm_device *dev = chan->dev; |
296 | struct nouveau_gpuobj_ref *ramfc = chan->ramfc; | 294 | struct nouveau_gpuobj *ramfc = NULL; |
297 | 295 | ||
298 | NV_DEBUG(dev, "ch%d\n", chan->id); | 296 | NV_DEBUG(dev, "ch%d\n", chan->id); |
299 | 297 | ||
300 | /* This will ensure the channel is seen as disabled. */ | 298 | /* This will ensure the channel is seen as disabled. */ |
301 | chan->ramfc = NULL; | 299 | nouveau_gpuobj_ref(chan->ramfc, &ramfc); |
300 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | ||
302 | nv50_fifo_channel_disable(dev, chan->id); | 301 | nv50_fifo_channel_disable(dev, chan->id); |
303 | 302 | ||
304 | /* Dummy channel, also used on ch 127 */ | 303 | /* Dummy channel, also used on ch 127 */ |
@@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) | |||
306 | nv50_fifo_channel_disable(dev, 127); | 305 | nv50_fifo_channel_disable(dev, 127); |
307 | nv50_fifo_playlist_update(dev); | 306 | nv50_fifo_playlist_update(dev); |
308 | 307 | ||
309 | nouveau_gpuobj_ref_del(dev, &ramfc); | 308 | nouveau_gpuobj_ref(NULL, &ramfc); |
310 | nouveau_gpuobj_ref_del(dev, &chan->cache); | 309 | nouveau_gpuobj_ref(NULL, &chan->cache); |
311 | } | 310 | } |
312 | 311 | ||
313 | int | 312 | int |
@@ -315,8 +314,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan) | |||
315 | { | 314 | { |
316 | struct drm_device *dev = chan->dev; | 315 | struct drm_device *dev = chan->dev; |
317 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 316 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
318 | struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; | 317 | struct nouveau_gpuobj *ramfc = chan->ramfc; |
319 | struct nouveau_gpuobj *cache = chan->cache->gpuobj; | 318 | struct nouveau_gpuobj *cache = chan->cache; |
320 | int ptr, cnt; | 319 | int ptr, cnt; |
321 | 320 | ||
322 | NV_DEBUG(dev, "ch%d\n", chan->id); | 321 | NV_DEBUG(dev, "ch%d\n", chan->id); |
@@ -399,8 +398,8 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
399 | return -EINVAL; | 398 | return -EINVAL; |
400 | } | 399 | } |
401 | NV_DEBUG(dev, "ch%d\n", chan->id); | 400 | NV_DEBUG(dev, "ch%d\n", chan->id); |
402 | ramfc = chan->ramfc->gpuobj; | 401 | ramfc = chan->ramfc; |
403 | cache = chan->cache->gpuobj; | 402 | cache = chan->cache; |
404 | 403 | ||
405 | nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330)); | 404 | nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330)); |
406 | nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334)); | 405 | nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334)); |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 17a8d788a494..7db0d627c1b9 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | 30 | #include "nouveau_ramht.h" | |
31 | #include "nouveau_grctx.h" | 31 | #include "nouveau_grctx.h" |
32 | 32 | ||
33 | static void | 33 | static void |
@@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev) | |||
192 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 192 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
193 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 193 | struct nouveau_channel *chan = dev_priv->fifos[i]; |
194 | 194 | ||
195 | if (chan && chan->ramin && chan->ramin->instance == inst) | 195 | if (chan && chan->ramin && chan->ramin->vinst == inst) |
196 | return chan; | 196 | return chan; |
197 | } | 197 | } |
198 | 198 | ||
@@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan) | |||
204 | { | 204 | { |
205 | struct drm_device *dev = chan->dev; | 205 | struct drm_device *dev = chan->dev; |
206 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 206 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
207 | struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; | 207 | struct nouveau_gpuobj *ramin = chan->ramin; |
208 | struct nouveau_gpuobj *obj; | ||
209 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 208 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
210 | struct nouveau_grctx ctx = {}; | 209 | struct nouveau_grctx ctx = {}; |
211 | int hdr, ret; | 210 | int hdr, ret; |
212 | 211 | ||
213 | NV_DEBUG(dev, "ch%d\n", chan->id); | 212 | NV_DEBUG(dev, "ch%d\n", chan->id); |
214 | 213 | ||
215 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, | 214 | ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, |
216 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC | | 215 | NVOBJ_FLAG_ZERO_ALLOC | |
217 | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); | 216 | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); |
218 | if (ret) | 217 | if (ret) |
219 | return ret; | 218 | return ret; |
220 | obj = chan->ramin_grctx->gpuobj; | ||
221 | 219 | ||
222 | hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; | 220 | hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; |
223 | nv_wo32(ramin, hdr + 0x00, 0x00190002); | 221 | nv_wo32(ramin, hdr + 0x00, 0x00190002); |
224 | nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->instance + | 222 | nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + |
225 | pgraph->grctx_size - 1); | 223 | pgraph->grctx_size - 1); |
226 | nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->instance); | 224 | nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst); |
227 | nv_wo32(ramin, hdr + 0x0c, 0); | 225 | nv_wo32(ramin, hdr + 0x0c, 0); |
228 | nv_wo32(ramin, hdr + 0x10, 0); | 226 | nv_wo32(ramin, hdr + 0x10, 0); |
229 | nv_wo32(ramin, hdr + 0x14, 0x00010000); | 227 | nv_wo32(ramin, hdr + 0x14, 0x00010000); |
230 | 228 | ||
231 | ctx.dev = chan->dev; | 229 | ctx.dev = chan->dev; |
232 | ctx.mode = NOUVEAU_GRCTX_VALS; | 230 | ctx.mode = NOUVEAU_GRCTX_VALS; |
233 | ctx.data = obj; | 231 | ctx.data = chan->ramin_grctx; |
234 | nv50_grctx_init(&ctx); | 232 | nv50_grctx_init(&ctx); |
235 | 233 | ||
236 | nv_wo32(obj, 0x00000, chan->ramin->instance >> 12); | 234 | nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); |
237 | 235 | ||
238 | dev_priv->engine.instmem.flush(dev); | 236 | dev_priv->engine.instmem.flush(dev); |
239 | return 0; | 237 | return 0; |
@@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) | |||
248 | 246 | ||
249 | NV_DEBUG(dev, "ch%d\n", chan->id); | 247 | NV_DEBUG(dev, "ch%d\n", chan->id); |
250 | 248 | ||
251 | if (!chan->ramin || !chan->ramin->gpuobj) | 249 | if (!chan->ramin) |
252 | return; | 250 | return; |
253 | 251 | ||
254 | for (i = hdr; i < hdr + 24; i += 4) | 252 | for (i = hdr; i < hdr + 24; i += 4) |
255 | nv_wo32(chan->ramin->gpuobj, i, 0); | 253 | nv_wo32(chan->ramin, i, 0); |
256 | dev_priv->engine.instmem.flush(dev); | 254 | dev_priv->engine.instmem.flush(dev); |
257 | 255 | ||
258 | nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); | 256 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); |
259 | } | 257 | } |
260 | 258 | ||
261 | static int | 259 | static int |
@@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) | |||
282 | int | 280 | int |
283 | nv50_graph_load_context(struct nouveau_channel *chan) | 281 | nv50_graph_load_context(struct nouveau_channel *chan) |
284 | { | 282 | { |
285 | uint32_t inst = chan->ramin->instance >> 12; | 283 | uint32_t inst = chan->ramin->vinst >> 12; |
286 | 284 | ||
287 | NV_DEBUG(chan->dev, "ch%d\n", chan->id); | 285 | NV_DEBUG(chan->dev, "ch%d\n", chan->id); |
288 | return nv50_graph_do_load_context(chan->dev, inst); | 286 | return nv50_graph_do_load_context(chan->dev, inst); |
@@ -327,15 +325,16 @@ static int | |||
327 | nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, | 325 | nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, |
328 | int mthd, uint32_t data) | 326 | int mthd, uint32_t data) |
329 | { | 327 | { |
330 | struct nouveau_gpuobj_ref *ref = NULL; | 328 | struct nouveau_gpuobj *gpuobj; |
331 | 329 | ||
332 | if (nouveau_gpuobj_ref_find(chan, data, &ref)) | 330 | gpuobj = nouveau_ramht_find(chan, data); |
331 | if (!gpuobj) | ||
333 | return -ENOENT; | 332 | return -ENOENT; |
334 | 333 | ||
335 | if (nouveau_notifier_offset(ref->gpuobj, NULL)) | 334 | if (nouveau_notifier_offset(gpuobj, NULL)) |
336 | return -EINVAL; | 335 | return -EINVAL; |
337 | 336 | ||
338 | chan->nvsw.vblsem = ref->gpuobj; | 337 | chan->nvsw.vblsem = gpuobj; |
339 | chan->nvsw.vblsem_offset = ~0; | 338 | chan->nvsw.vblsem_offset = ~0; |
340 | return 0; | 339 | return 0; |
341 | } | 340 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 821806c835ba..0af0baf4f1a9 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -32,9 +32,9 @@ | |||
32 | struct nv50_instmem_priv { | 32 | struct nv50_instmem_priv { |
33 | uint32_t save1700[5]; /* 0x1700->0x1710 */ | 33 | uint32_t save1700[5]; /* 0x1700->0x1710 */ |
34 | 34 | ||
35 | struct nouveau_gpuobj_ref *pramin_pt; | 35 | struct nouveau_gpuobj *pramin_pt; |
36 | struct nouveau_gpuobj_ref *pramin_bar; | 36 | struct nouveau_gpuobj *pramin_bar; |
37 | struct nouveau_gpuobj_ref *fb_bar; | 37 | struct nouveau_gpuobj *fb_bar; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | #define NV50_INSTMEM_PAGE_SHIFT 12 | 40 | #define NV50_INSTMEM_PAGE_SHIFT 12 |
@@ -44,15 +44,8 @@ struct nv50_instmem_priv { | |||
44 | /*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN | 44 | /*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN |
45 | */ | 45 | */ |
46 | #define BAR0_WI32(g, o, v) do { \ | 46 | #define BAR0_WI32(g, o, v) do { \ |
47 | uint32_t offset; \ | 47 | u32 offset = (g)->vinst + (o); \ |
48 | if ((g)->im_backing) { \ | 48 | nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ |
49 | offset = (g)->im_backing_start; \ | ||
50 | } else { \ | ||
51 | offset = chan->ramin->gpuobj->im_backing_start; \ | ||
52 | offset += (g)->im_pramin->start; \ | ||
53 | } \ | ||
54 | offset += (o); \ | ||
55 | nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ | ||
56 | } while (0) | 49 | } while (0) |
57 | 50 | ||
58 | int | 51 | int |
@@ -142,8 +135,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
142 | INIT_LIST_HEAD(&chan->ramht_refs); | 135 | INIT_LIST_HEAD(&chan->ramht_refs); |
143 | 136 | ||
144 | /* Channel's PRAMIN object + heap */ | 137 | /* Channel's PRAMIN object + heap */ |
145 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, | 138 | ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, &chan->ramin); |
146 | NULL, &chan->ramin); | ||
147 | if (ret) | 139 | if (ret) |
148 | return ret; | 140 | return ret; |
149 | 141 | ||
@@ -152,16 +144,16 @@ nv50_instmem_init(struct drm_device *dev) | |||
152 | 144 | ||
153 | /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ | 145 | /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ |
154 | ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, | 146 | ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, |
155 | 0x4000, 0, NULL, &chan->ramfc); | 147 | 0x4000, 0, &chan->ramfc); |
156 | if (ret) | 148 | if (ret) |
157 | return ret; | 149 | return ret; |
158 | 150 | ||
159 | for (i = 0; i < c_vmpd; i += 4) | 151 | for (i = 0; i < c_vmpd; i += 4) |
160 | BAR0_WI32(chan->ramin->gpuobj, i, 0); | 152 | BAR0_WI32(chan->ramin, i, 0); |
161 | 153 | ||
162 | /* VM page directory */ | 154 | /* VM page directory */ |
163 | ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, | 155 | ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, |
164 | 0x4000, 0, &chan->vm_pd, NULL); | 156 | 0x4000, 0, &chan->vm_pd); |
165 | if (ret) | 157 | if (ret) |
166 | return ret; | 158 | return ret; |
167 | for (i = 0; i < 0x4000; i += 8) { | 159 | for (i = 0; i < 0x4000; i += 8) { |
@@ -172,8 +164,8 @@ nv50_instmem_init(struct drm_device *dev) | |||
172 | /* PRAMIN page table, cheat and map into VM at 0x0000000000. | 164 | /* PRAMIN page table, cheat and map into VM at 0x0000000000. |
173 | * We map the entire fake channel into the start of the PRAMIN BAR | 165 | * We map the entire fake channel into the start of the PRAMIN BAR |
174 | */ | 166 | */ |
175 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, | 167 | ret = nouveau_gpuobj_new(dev, chan, pt_size, 0x1000, 0, |
176 | 0, &priv->pramin_pt); | 168 | &priv->pramin_pt); |
177 | if (ret) | 169 | if (ret) |
178 | return ret; | 170 | return ret; |
179 | 171 | ||
@@ -185,76 +177,74 @@ nv50_instmem_init(struct drm_device *dev) | |||
185 | 177 | ||
186 | i = 0; | 178 | i = 0; |
187 | while (v < dev_priv->vram_sys_base + c_offset + c_size) { | 179 | while (v < dev_priv->vram_sys_base + c_offset + c_size) { |
188 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); | 180 | BAR0_WI32(priv->pramin_pt, i + 0, lower_32_bits(v)); |
189 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); | 181 | BAR0_WI32(priv->pramin_pt, i + 4, upper_32_bits(v)); |
190 | v += 0x1000; | 182 | v += 0x1000; |
191 | i += 8; | 183 | i += 8; |
192 | } | 184 | } |
193 | 185 | ||
194 | while (i < pt_size) { | 186 | while (i < pt_size) { |
195 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); | 187 | BAR0_WI32(priv->pramin_pt, i + 0, 0x00000000); |
196 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | 188 | BAR0_WI32(priv->pramin_pt, i + 4, 0x00000000); |
197 | i += 8; | 189 | i += 8; |
198 | } | 190 | } |
199 | 191 | ||
200 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); | 192 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->vinst | 0x63); |
201 | BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); | 193 | BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); |
202 | 194 | ||
203 | /* VRAM page table(s), mapped into VM at +1GiB */ | 195 | /* VRAM page table(s), mapped into VM at +1GiB */ |
204 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | 196 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { |
205 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, | 197 | ret = nouveau_gpuobj_new(dev, chan, NV50_VM_BLOCK / 0x10000 * 8, |
206 | NV50_VM_BLOCK/65536*8, 0, 0, | 198 | 0, 0, &chan->vm_vram_pt[i]); |
207 | &chan->vm_vram_pt[i]); | ||
208 | if (ret) { | 199 | if (ret) { |
209 | NV_ERROR(dev, "Error creating VRAM page tables: %d\n", | 200 | NV_ERROR(dev, "Error creating VRAM page tables: %d\n", |
210 | ret); | 201 | ret); |
211 | dev_priv->vm_vram_pt_nr = i; | 202 | dev_priv->vm_vram_pt_nr = i; |
212 | return ret; | 203 | return ret; |
213 | } | 204 | } |
214 | dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj; | 205 | /*XXX: double-check this is ok */ |
206 | dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; | ||
215 | 207 | ||
216 | for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size; | 208 | for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size; |
217 | v += 4) | 209 | v += 4) |
218 | BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); | 210 | BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); |
219 | 211 | ||
220 | BAR0_WI32(chan->vm_pd, 0x10 + (i*8), | 212 | BAR0_WI32(chan->vm_pd, 0x10 + (i*8), |
221 | chan->vm_vram_pt[i]->instance | 0x61); | 213 | chan->vm_vram_pt[i]->vinst | 0x61); |
222 | BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0); | 214 | BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0); |
223 | } | 215 | } |
224 | 216 | ||
225 | /* DMA object for PRAMIN BAR */ | 217 | /* DMA object for PRAMIN BAR */ |
226 | ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, | 218 | ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); |
227 | &priv->pramin_bar); | ||
228 | if (ret) | 219 | if (ret) |
229 | return ret; | 220 | return ret; |
230 | BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); | 221 | BAR0_WI32(priv->pramin_bar, 0x00, 0x7fc00000); |
231 | BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1); | 222 | BAR0_WI32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); |
232 | BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); | 223 | BAR0_WI32(priv->pramin_bar, 0x08, 0x00000000); |
233 | BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); | 224 | BAR0_WI32(priv->pramin_bar, 0x0c, 0x00000000); |
234 | BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); | 225 | BAR0_WI32(priv->pramin_bar, 0x10, 0x00000000); |
235 | BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); | 226 | BAR0_WI32(priv->pramin_bar, 0x14, 0x00000000); |
236 | 227 | ||
237 | /* DMA object for FB BAR */ | 228 | /* DMA object for FB BAR */ |
238 | ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, | 229 | ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); |
239 | &priv->fb_bar); | ||
240 | if (ret) | 230 | if (ret) |
241 | return ret; | 231 | return ret; |
242 | BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000); | 232 | BAR0_WI32(priv->fb_bar, 0x00, 0x7fc00000); |
243 | BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 + | 233 | BAR0_WI32(priv->fb_bar, 0x04, 0x40000000 + |
244 | pci_resource_len(dev->pdev, 1) - 1); | 234 | pci_resource_len(dev->pdev, 1) - 1); |
245 | BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000); | 235 | BAR0_WI32(priv->fb_bar, 0x08, 0x40000000); |
246 | BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000); | 236 | BAR0_WI32(priv->fb_bar, 0x0c, 0x00000000); |
247 | BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000); | 237 | BAR0_WI32(priv->fb_bar, 0x10, 0x00000000); |
248 | BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000); | 238 | BAR0_WI32(priv->fb_bar, 0x14, 0x00000000); |
249 | 239 | ||
250 | /* Poke the relevant regs, and pray it works :) */ | 240 | /* Poke the relevant regs, and pray it works :) */ |
251 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); | 241 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); |
252 | nv_wr32(dev, NV50_PUNK_UNK1710, 0); | 242 | nv_wr32(dev, NV50_PUNK_UNK1710, 0); |
253 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | | 243 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | |
254 | NV50_PUNK_BAR_CFG_BASE_VALID); | 244 | NV50_PUNK_BAR_CFG_BASE_VALID); |
255 | nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | | 245 | nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | |
256 | NV50_PUNK_BAR1_CTXDMA_VALID); | 246 | NV50_PUNK_BAR1_CTXDMA_VALID); |
257 | nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | | 247 | nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | |
258 | NV50_PUNK_BAR3_CTXDMA_VALID); | 248 | NV50_PUNK_BAR3_CTXDMA_VALID); |
259 | 249 | ||
260 | for (i = 0; i < 8; i++) | 250 | for (i = 0; i < 8; i++) |
@@ -301,21 +291,19 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
301 | for (i = 0x1700; i <= 0x1710; i += 4) | 291 | for (i = 0x1700; i <= 0x1710; i += 4) |
302 | nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); | 292 | nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); |
303 | 293 | ||
304 | nouveau_gpuobj_ref_del(dev, &priv->fb_bar); | 294 | nouveau_gpuobj_ref(NULL, &priv->fb_bar); |
305 | nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); | 295 | nouveau_gpuobj_ref(NULL, &priv->pramin_bar); |
306 | nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); | 296 | nouveau_gpuobj_ref(NULL, &priv->pramin_pt); |
307 | 297 | ||
308 | /* Destroy dummy channel */ | 298 | /* Destroy dummy channel */ |
309 | if (chan) { | 299 | if (chan) { |
310 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | 300 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) |
311 | nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); | 301 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); |
312 | dev_priv->vm_vram_pt[i] = NULL; | ||
313 | } | ||
314 | dev_priv->vm_vram_pt_nr = 0; | 302 | dev_priv->vm_vram_pt_nr = 0; |
315 | 303 | ||
316 | nouveau_gpuobj_del(dev, &chan->vm_pd); | 304 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
317 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 305 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
318 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | 306 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
319 | drm_mm_takedown(&chan->ramin_heap); | 307 | drm_mm_takedown(&chan->ramin_heap); |
320 | 308 | ||
321 | dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; | 309 | dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; |
@@ -331,7 +319,7 @@ nv50_instmem_suspend(struct drm_device *dev) | |||
331 | { | 319 | { |
332 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 320 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
333 | struct nouveau_channel *chan = dev_priv->fifos[0]; | 321 | struct nouveau_channel *chan = dev_priv->fifos[0]; |
334 | struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; | 322 | struct nouveau_gpuobj *ramin = chan->ramin; |
335 | int i; | 323 | int i; |
336 | 324 | ||
337 | ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size); | 325 | ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size); |
@@ -349,7 +337,7 @@ nv50_instmem_resume(struct drm_device *dev) | |||
349 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 337 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
350 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 338 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
351 | struct nouveau_channel *chan = dev_priv->fifos[0]; | 339 | struct nouveau_channel *chan = dev_priv->fifos[0]; |
352 | struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; | 340 | struct nouveau_gpuobj *ramin = chan->ramin; |
353 | int i; | 341 | int i; |
354 | 342 | ||
355 | nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16)); | 343 | nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16)); |
@@ -359,13 +347,13 @@ nv50_instmem_resume(struct drm_device *dev) | |||
359 | ramin->im_backing_suspend = NULL; | 347 | ramin->im_backing_suspend = NULL; |
360 | 348 | ||
361 | /* Poke the relevant regs, and pray it works :) */ | 349 | /* Poke the relevant regs, and pray it works :) */ |
362 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); | 350 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); |
363 | nv_wr32(dev, NV50_PUNK_UNK1710, 0); | 351 | nv_wr32(dev, NV50_PUNK_UNK1710, 0); |
364 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | | 352 | nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | |
365 | NV50_PUNK_BAR_CFG_BASE_VALID); | 353 | NV50_PUNK_BAR_CFG_BASE_VALID); |
366 | nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | | 354 | nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | |
367 | NV50_PUNK_BAR1_CTXDMA_VALID); | 355 | NV50_PUNK_BAR1_CTXDMA_VALID); |
368 | nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | | 356 | nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | |
369 | NV50_PUNK_BAR3_CTXDMA_VALID); | 357 | NV50_PUNK_BAR3_CTXDMA_VALID); |
370 | 358 | ||
371 | for (i = 0; i < 8; i++) | 359 | for (i = 0; i < 8; i++) |
@@ -424,7 +412,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
424 | { | 412 | { |
425 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 413 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
426 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 414 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
427 | struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; | 415 | struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; |
428 | uint32_t pte, pte_end; | 416 | uint32_t pte, pte_end; |
429 | uint64_t vram; | 417 | uint64_t vram; |
430 | 418 | ||
@@ -477,8 +465,8 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
477 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; | 465 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
478 | 466 | ||
479 | while (pte < pte_end) { | 467 | while (pte < pte_end) { |
480 | nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 0, 0x00000000); | 468 | nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); |
481 | nv_wo32(priv->pramin_pt->gpuobj, (pte * 4) + 4, 0x00000000); | 469 | nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); |
482 | pte += 2; | 470 | pte += 2; |
483 | } | 471 | } |
484 | dev_priv->engine.instmem.flush(dev); | 472 | dev_priv->engine.instmem.flush(dev); |