diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-09-01 01:24:31 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-09-24 02:20:14 -0400 |
commit | a8eaebc6c52bb0cd243b4cb421068f42d378be9c (patch) | |
tree | 12f796e5210d51f78b9fc6ddd4750cf1421373c2 /drivers/gpu/drm/nouveau/nouveau_object.c | |
parent | de3a6c0a3b642c0c350414d63298a1b19a009290 (diff) |
drm/nouveau: remove nouveau_gpuobj_ref completely, replace with sanity
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 358 |
1 files changed, 91 insertions, 267 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 552f5131650f..d55c50f1a2d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -90,7 +90,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
90 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 90 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
91 | gpuobj->dev = dev; | 91 | gpuobj->dev = dev; |
92 | gpuobj->flags = flags; | 92 | gpuobj->flags = flags; |
93 | gpuobj->im_channel = chan; | 93 | gpuobj->refcount = 1; |
94 | 94 | ||
95 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 95 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
96 | 96 | ||
@@ -108,7 +108,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
108 | 108 | ||
109 | ret = engine->instmem.populate(dev, gpuobj, &size); | 109 | ret = engine->instmem.populate(dev, gpuobj, &size); |
110 | if (ret) { | 110 | if (ret) { |
111 | nouveau_gpuobj_del(dev, &gpuobj); | 111 | nouveau_gpuobj_ref(NULL, &gpuobj); |
112 | return ret; | 112 | return ret; |
113 | } | 113 | } |
114 | } | 114 | } |
@@ -119,14 +119,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
119 | gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); | 119 | gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); |
120 | 120 | ||
121 | if (!gpuobj->im_pramin) { | 121 | if (!gpuobj->im_pramin) { |
122 | nouveau_gpuobj_del(dev, &gpuobj); | 122 | nouveau_gpuobj_ref(NULL, &gpuobj); |
123 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | } | 124 | } |
125 | 125 | ||
126 | if (!chan) { | 126 | if (!chan) { |
127 | ret = engine->instmem.bind(dev, gpuobj); | 127 | ret = engine->instmem.bind(dev, gpuobj); |
128 | if (ret) { | 128 | if (ret) { |
129 | nouveau_gpuobj_del(dev, &gpuobj); | 129 | nouveau_gpuobj_ref(NULL, &gpuobj); |
130 | return ret; | 130 | return ret; |
131 | } | 131 | } |
132 | } | 132 | } |
@@ -134,13 +134,13 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
134 | /* calculate the various different addresses for the object */ | 134 | /* calculate the various different addresses for the object */ |
135 | if (chan) { | 135 | if (chan) { |
136 | gpuobj->pinst = gpuobj->im_pramin->start + | 136 | gpuobj->pinst = gpuobj->im_pramin->start + |
137 | chan->ramin->gpuobj->im_pramin->start; | 137 | chan->ramin->im_pramin->start; |
138 | if (dev_priv->card_type < NV_50) { | 138 | if (dev_priv->card_type < NV_50) { |
139 | gpuobj->cinst = gpuobj->pinst; | 139 | gpuobj->cinst = gpuobj->pinst; |
140 | } else { | 140 | } else { |
141 | gpuobj->cinst = gpuobj->im_pramin->start; | 141 | gpuobj->cinst = gpuobj->im_pramin->start; |
142 | gpuobj->vinst = gpuobj->im_pramin->start + | 142 | gpuobj->vinst = gpuobj->im_pramin->start + |
143 | chan->ramin->gpuobj->im_backing_start; | 143 | chan->ramin->im_backing_start; |
144 | } | 144 | } |
145 | } else { | 145 | } else { |
146 | gpuobj->pinst = gpuobj->im_pramin->start; | 146 | gpuobj->pinst = gpuobj->im_pramin->start; |
@@ -156,6 +156,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
156 | engine->instmem.flush(dev); | 156 | engine->instmem.flush(dev); |
157 | } | 157 | } |
158 | 158 | ||
159 | |||
159 | *gpuobj_ret = gpuobj; | 160 | *gpuobj_ret = gpuobj; |
160 | return 0; | 161 | return 0; |
161 | } | 162 | } |
@@ -176,20 +177,23 @@ int | |||
176 | nouveau_gpuobj_init(struct drm_device *dev) | 177 | nouveau_gpuobj_init(struct drm_device *dev) |
177 | { | 178 | { |
178 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 179 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
180 | struct nouveau_gpuobj *ramht = NULL; | ||
179 | int ret; | 181 | int ret; |
180 | 182 | ||
181 | NV_DEBUG(dev, "\n"); | 183 | NV_DEBUG(dev, "\n"); |
182 | 184 | ||
183 | if (dev_priv->card_type < NV_50) { | 185 | if (dev_priv->card_type >= NV_50) |
184 | ret = nouveau_gpuobj_new_fake(dev, | 186 | return 0; |
185 | dev_priv->ramht_offset, ~0, dev_priv->ramht_size, | ||
186 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, | ||
187 | &dev_priv->ramht, NULL); | ||
188 | if (ret) | ||
189 | return ret; | ||
190 | } | ||
191 | 187 | ||
192 | return 0; | 188 | ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0, |
189 | dev_priv->ramht_size, | ||
190 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | ||
191 | if (ret) | ||
192 | return ret; | ||
193 | |||
194 | ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); | ||
195 | nouveau_gpuobj_ref(NULL, &ramht); | ||
196 | return ret; | ||
193 | } | 197 | } |
194 | 198 | ||
195 | void | 199 | void |
@@ -199,7 +203,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev) | |||
199 | 203 | ||
200 | NV_DEBUG(dev, "\n"); | 204 | NV_DEBUG(dev, "\n"); |
201 | 205 | ||
202 | nouveau_gpuobj_del(dev, &dev_priv->ramht); | 206 | nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); |
203 | } | 207 | } |
204 | 208 | ||
205 | void | 209 | void |
@@ -216,29 +220,21 @@ nouveau_gpuobj_late_takedown(struct drm_device *dev) | |||
216 | 220 | ||
217 | NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", | 221 | NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", |
218 | gpuobj, gpuobj->refcount); | 222 | gpuobj, gpuobj->refcount); |
219 | gpuobj->refcount = 0; | 223 | |
220 | nouveau_gpuobj_del(dev, &gpuobj); | 224 | gpuobj->refcount = 1; |
225 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
221 | } | 226 | } |
222 | } | 227 | } |
223 | 228 | ||
224 | int | 229 | static int |
225 | nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | 230 | nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj) |
226 | { | 231 | { |
232 | struct drm_device *dev = gpuobj->dev; | ||
227 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 233 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
228 | struct nouveau_engine *engine = &dev_priv->engine; | 234 | struct nouveau_engine *engine = &dev_priv->engine; |
229 | struct nouveau_gpuobj *gpuobj; | ||
230 | int i; | 235 | int i; |
231 | 236 | ||
232 | NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); | 237 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
233 | |||
234 | if (!dev_priv || !pgpuobj || !(*pgpuobj)) | ||
235 | return -EINVAL; | ||
236 | gpuobj = *pgpuobj; | ||
237 | |||
238 | if (gpuobj->refcount != 0) { | ||
239 | NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount); | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | 238 | ||
243 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | 239 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
244 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | 240 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) |
@@ -261,181 +257,26 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | |||
261 | 257 | ||
262 | list_del(&gpuobj->list); | 258 | list_del(&gpuobj->list); |
263 | 259 | ||
264 | *pgpuobj = NULL; | ||
265 | kfree(gpuobj); | 260 | kfree(gpuobj); |
266 | return 0; | 261 | return 0; |
267 | } | 262 | } |
268 | 263 | ||
269 | static int | 264 | void |
270 | nouveau_gpuobj_instance_get(struct drm_device *dev, | 265 | nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) |
271 | struct nouveau_channel *chan, | ||
272 | struct nouveau_gpuobj *gpuobj, uint32_t *inst) | ||
273 | { | ||
274 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
275 | struct nouveau_gpuobj *cpramin; | ||
276 | |||
277 | /* <NV50 use PRAMIN address everywhere */ | ||
278 | if (dev_priv->card_type < NV_50) { | ||
279 | *inst = gpuobj->im_pramin->start; | ||
280 | if (gpuobj->im_channel) { | ||
281 | cpramin = gpuobj->im_channel->ramin->gpuobj; | ||
282 | *inst += cpramin->im_pramin->start; | ||
283 | } | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /* NV50 channel-local instance */ | ||
288 | if (chan) { | ||
289 | *inst = gpuobj->im_pramin->start; | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | /* NV50 global (VRAM) instance */ | ||
294 | if (!gpuobj->im_channel) { | ||
295 | /* ...from global heap */ | ||
296 | if (!gpuobj->im_backing) { | ||
297 | NV_ERROR(dev, "AII, no VRAM backing gpuobj\n"); | ||
298 | return -EINVAL; | ||
299 | } | ||
300 | *inst = gpuobj->im_backing_start; | ||
301 | return 0; | ||
302 | } else { | ||
303 | /* ...from local heap */ | ||
304 | cpramin = gpuobj->im_channel->ramin->gpuobj; | ||
305 | *inst = cpramin->im_backing_start + gpuobj->im_pramin->start; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | return -EINVAL; | ||
310 | } | ||
311 | |||
312 | int | ||
313 | nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, | ||
314 | uint32_t handle, struct nouveau_gpuobj *gpuobj, | ||
315 | struct nouveau_gpuobj_ref **ref_ret) | ||
316 | { | ||
317 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
318 | struct nouveau_gpuobj_ref *ref; | ||
319 | uint32_t instance; | ||
320 | int ret; | ||
321 | |||
322 | NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n", | ||
323 | chan ? chan->id : -1, handle, gpuobj); | ||
324 | |||
325 | if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) | ||
326 | return -EINVAL; | ||
327 | |||
328 | if (!chan && !ref_ret) | ||
329 | return -EINVAL; | ||
330 | |||
331 | if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) { | ||
332 | /* sw object */ | ||
333 | instance = 0x40; | ||
334 | } else { | ||
335 | ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); | ||
336 | if (ret) | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
341 | if (!ref) | ||
342 | return -ENOMEM; | ||
343 | INIT_LIST_HEAD(&ref->list); | ||
344 | ref->gpuobj = gpuobj; | ||
345 | ref->channel = chan; | ||
346 | ref->instance = instance; | ||
347 | |||
348 | if (!ref_ret) { | ||
349 | ref->handle = handle; | ||
350 | |||
351 | ret = nouveau_ramht_insert(dev, ref); | ||
352 | if (ret) { | ||
353 | kfree(ref); | ||
354 | return ret; | ||
355 | } | ||
356 | } else { | ||
357 | ref->handle = ~0; | ||
358 | *ref_ret = ref; | ||
359 | } | ||
360 | |||
361 | ref->gpuobj->refcount++; | ||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) | ||
366 | { | ||
367 | struct nouveau_gpuobj_ref *ref; | ||
368 | |||
369 | NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL); | ||
370 | |||
371 | if (!dev || !pref || *pref == NULL) | ||
372 | return -EINVAL; | ||
373 | ref = *pref; | ||
374 | |||
375 | if (ref->handle != ~0) | ||
376 | nouveau_ramht_remove(dev, ref); | ||
377 | |||
378 | if (ref->gpuobj) { | ||
379 | ref->gpuobj->refcount--; | ||
380 | |||
381 | if (ref->gpuobj->refcount == 0) { | ||
382 | if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) | ||
383 | nouveau_gpuobj_del(dev, &ref->gpuobj); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | *pref = NULL; | ||
388 | kfree(ref); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | int | ||
393 | nouveau_gpuobj_new_ref(struct drm_device *dev, | ||
394 | struct nouveau_channel *oc, struct nouveau_channel *rc, | ||
395 | uint32_t handle, uint32_t size, int align, | ||
396 | uint32_t flags, struct nouveau_gpuobj_ref **ref) | ||
397 | { | 266 | { |
398 | struct nouveau_gpuobj *gpuobj = NULL; | 267 | if (ref) |
399 | int ret; | 268 | ref->refcount++; |
400 | 269 | ||
401 | ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj); | 270 | if (*ptr && --(*ptr)->refcount == 0) |
402 | if (ret) | 271 | nouveau_gpuobj_del(*ptr); |
403 | return ret; | ||
404 | 272 | ||
405 | ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref); | 273 | *ptr = ref; |
406 | if (ret) { | ||
407 | nouveau_gpuobj_del(dev, &gpuobj); | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | int | ||
415 | nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, | ||
416 | struct nouveau_gpuobj_ref **ref_ret) | ||
417 | { | ||
418 | struct nouveau_gpuobj_ref *ref; | ||
419 | struct list_head *entry, *tmp; | ||
420 | |||
421 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | ||
422 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | ||
423 | |||
424 | if (ref->handle == handle) { | ||
425 | if (ref_ret) | ||
426 | *ref_ret = ref; | ||
427 | return 0; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | return -EINVAL; | ||
432 | } | 274 | } |
433 | 275 | ||
434 | int | 276 | int |
435 | nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | 277 | nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, |
436 | uint32_t b_offset, uint32_t size, | 278 | uint32_t b_offset, uint32_t size, |
437 | uint32_t flags, struct nouveau_gpuobj **pgpuobj, | 279 | uint32_t flags, struct nouveau_gpuobj **pgpuobj) |
438 | struct nouveau_gpuobj_ref **pref) | ||
439 | { | 280 | { |
440 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 281 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
441 | struct nouveau_gpuobj *gpuobj = NULL; | 282 | struct nouveau_gpuobj *gpuobj = NULL; |
@@ -450,8 +291,8 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
450 | return -ENOMEM; | 291 | return -ENOMEM; |
451 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 292 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
452 | gpuobj->dev = dev; | 293 | gpuobj->dev = dev; |
453 | gpuobj->im_channel = NULL; | ||
454 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; | 294 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; |
295 | gpuobj->refcount = 1; | ||
455 | 296 | ||
456 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 297 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
457 | 298 | ||
@@ -459,7 +300,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
459 | gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), | 300 | gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), |
460 | GFP_KERNEL); | 301 | GFP_KERNEL); |
461 | if (!gpuobj->im_pramin) { | 302 | if (!gpuobj->im_pramin) { |
462 | nouveau_gpuobj_del(dev, &gpuobj); | 303 | nouveau_gpuobj_ref(NULL, &gpuobj); |
463 | return -ENOMEM; | 304 | return -ENOMEM; |
464 | } | 305 | } |
465 | gpuobj->im_pramin->start = p_offset; | 306 | gpuobj->im_pramin->start = p_offset; |
@@ -481,14 +322,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
481 | dev_priv->engine.instmem.flush(dev); | 322 | dev_priv->engine.instmem.flush(dev); |
482 | } | 323 | } |
483 | 324 | ||
484 | if (pref) { | ||
485 | i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); | ||
486 | if (i) { | ||
487 | nouveau_gpuobj_del(dev, &gpuobj); | ||
488 | return i; | ||
489 | } | ||
490 | } | ||
491 | |||
492 | if (pgpuobj) | 325 | if (pgpuobj) |
493 | *pgpuobj = gpuobj; | 326 | *pgpuobj = gpuobj; |
494 | return 0; | 327 | return 0; |
@@ -628,7 +461,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, | |||
628 | *o_ret = 0; | 461 | *o_ret = 0; |
629 | } else | 462 | } else |
630 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { | 463 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { |
631 | *gpuobj = dev_priv->gart_info.sg_ctxdma; | 464 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); |
632 | if (offset & ~0xffffffffULL) { | 465 | if (offset & ~0xffffffffULL) { |
633 | NV_ERROR(dev, "obj offset exceeds 32-bits\n"); | 466 | NV_ERROR(dev, "obj offset exceeds 32-bits\n"); |
634 | return -EINVAL; | 467 | return -EINVAL; |
@@ -760,8 +593,11 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | |||
760 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 593 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
761 | if (!gpuobj) | 594 | if (!gpuobj) |
762 | return -ENOMEM; | 595 | return -ENOMEM; |
596 | gpuobj->dev = chan->dev; | ||
763 | gpuobj->engine = NVOBJ_ENGINE_SW; | 597 | gpuobj->engine = NVOBJ_ENGINE_SW; |
764 | gpuobj->class = class; | 598 | gpuobj->class = class; |
599 | gpuobj->refcount = 1; | ||
600 | gpuobj->cinst = 0x40; | ||
765 | 601 | ||
766 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 602 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
767 | *gpuobj_ret = gpuobj; | 603 | *gpuobj_ret = gpuobj; |
@@ -773,7 +609,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
773 | { | 609 | { |
774 | struct drm_device *dev = chan->dev; | 610 | struct drm_device *dev = chan->dev; |
775 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 611 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
776 | struct nouveau_gpuobj *pramin = NULL; | ||
777 | uint32_t size; | 612 | uint32_t size; |
778 | uint32_t base; | 613 | uint32_t base; |
779 | int ret; | 614 | int ret; |
@@ -798,18 +633,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
798 | size += 0x1000; | 633 | size += 0x1000; |
799 | } | 634 | } |
800 | 635 | ||
801 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, | 636 | ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); |
802 | &chan->ramin); | ||
803 | if (ret) { | 637 | if (ret) { |
804 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); | 638 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); |
805 | return ret; | 639 | return ret; |
806 | } | 640 | } |
807 | pramin = chan->ramin->gpuobj; | ||
808 | 641 | ||
809 | ret = drm_mm_init(&chan->ramin_heap, base, size); | 642 | ret = drm_mm_init(&chan->ramin_heap, base, size); |
810 | if (ret) { | 643 | if (ret) { |
811 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | 644 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); |
812 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | 645 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
813 | return ret; | 646 | return ret; |
814 | } | 647 | } |
815 | 648 | ||
@@ -826,8 +659,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
826 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | 659 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; |
827 | int ret, i; | 660 | int ret, i; |
828 | 661 | ||
829 | INIT_LIST_HEAD(&chan->ramht_refs); | ||
830 | |||
831 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | 662 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
832 | 663 | ||
833 | /* Allocate a chunk of memory for per-channel object storage */ | 664 | /* Allocate a chunk of memory for per-channel object storage */ |
@@ -846,10 +677,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
846 | uint32_t vm_offset, pde; | 677 | uint32_t vm_offset, pde; |
847 | 678 | ||
848 | vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; | 679 | vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; |
849 | vm_offset += chan->ramin->gpuobj->im_pramin->start; | 680 | vm_offset += chan->ramin->im_pramin->start; |
850 | 681 | ||
851 | ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, | 682 | ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, |
852 | 0, &chan->vm_pd, NULL); | 683 | 0, &chan->vm_pd); |
853 | if (ret) | 684 | if (ret) |
854 | return ret; | 685 | return ret; |
855 | for (i = 0; i < 0x4000; i += 8) { | 686 | for (i = 0; i < 0x4000; i += 8) { |
@@ -857,25 +688,19 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
857 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); | 688 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); |
858 | } | 689 | } |
859 | 690 | ||
691 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, | ||
692 | &chan->vm_gart_pt); | ||
860 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; | 693 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; |
861 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | 694 | nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); |
862 | dev_priv->gart_info.sg_ctxdma, | ||
863 | &chan->vm_gart_pt); | ||
864 | if (ret) | ||
865 | return ret; | ||
866 | nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3); | ||
867 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | 695 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); |
868 | 696 | ||
869 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; | 697 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; |
870 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | 698 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { |
871 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | 699 | nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], |
872 | dev_priv->vm_vram_pt[i], | 700 | &chan->vm_vram_pt[i]); |
873 | &chan->vm_vram_pt[i]); | ||
874 | if (ret) | ||
875 | return ret; | ||
876 | 701 | ||
877 | nv_wo32(chan->vm_pd, pde + 0, | 702 | nv_wo32(chan->vm_pd, pde + 0, |
878 | chan->vm_vram_pt[i]->instance | 0x61); | 703 | chan->vm_vram_pt[i]->vinst | 0x61); |
879 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | 704 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); |
880 | pde += 8; | 705 | pde += 8; |
881 | } | 706 | } |
@@ -885,15 +710,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
885 | 710 | ||
886 | /* RAMHT */ | 711 | /* RAMHT */ |
887 | if (dev_priv->card_type < NV_50) { | 712 | if (dev_priv->card_type < NV_50) { |
888 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, | 713 | nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); |
889 | &chan->ramht); | 714 | } else { |
715 | struct nouveau_gpuobj *ramht = NULL; | ||
716 | |||
717 | ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, | ||
718 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | ||
890 | if (ret) | 719 | if (ret) |
891 | return ret; | 720 | return ret; |
892 | } else { | 721 | |
893 | ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, | 722 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); |
894 | 0x8000, 16, | 723 | nouveau_gpuobj_ref(NULL, &ramht); |
895 | NVOBJ_FLAG_ZERO_ALLOC, | ||
896 | &chan->ramht); | ||
897 | if (ret) | 724 | if (ret) |
898 | return ret; | 725 | return ret; |
899 | } | 726 | } |
@@ -910,24 +737,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
910 | } | 737 | } |
911 | } else { | 738 | } else { |
912 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 739 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
913 | 0, dev_priv->fb_available_size, | 740 | 0, dev_priv->fb_available_size, |
914 | NV_DMA_ACCESS_RW, | 741 | NV_DMA_ACCESS_RW, |
915 | NV_DMA_TARGET_VIDMEM, &vram); | 742 | NV_DMA_TARGET_VIDMEM, &vram); |
916 | if (ret) { | 743 | if (ret) { |
917 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | 744 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); |
918 | return ret; | 745 | return ret; |
919 | } | 746 | } |
920 | } | 747 | } |
921 | 748 | ||
922 | ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); | 749 | ret = nouveau_ramht_insert(chan, vram_h, vram); |
750 | nouveau_gpuobj_ref(NULL, &vram); | ||
923 | if (ret) { | 751 | if (ret) { |
924 | NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); | 752 | NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); |
925 | return ret; | 753 | return ret; |
926 | } | 754 | } |
927 | 755 | ||
928 | /* TT memory ctxdma */ | 756 | /* TT memory ctxdma */ |
929 | if (dev_priv->card_type >= NV_50) { | 757 | if (dev_priv->card_type >= NV_50) { |
930 | tt = vram; | 758 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
759 | 0, dev_priv->vm_end, | ||
760 | NV_DMA_ACCESS_RW, | ||
761 | NV_DMA_TARGET_AGP, &tt); | ||
762 | if (ret) { | ||
763 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | ||
764 | return ret; | ||
765 | } | ||
931 | } else | 766 | } else |
932 | if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { | 767 | if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { |
933 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, | 768 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, |
@@ -943,9 +778,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
943 | return ret; | 778 | return ret; |
944 | } | 779 | } |
945 | 780 | ||
946 | ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); | 781 | ret = nouveau_ramht_insert(chan, tt_h, tt); |
782 | nouveau_gpuobj_ref(NULL, &tt); | ||
947 | if (ret) { | 783 | if (ret) { |
948 | NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); | 784 | NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); |
949 | return ret; | 785 | return ret; |
950 | } | 786 | } |
951 | 787 | ||
@@ -957,33 +793,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |||
957 | { | 793 | { |
958 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 794 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
959 | struct drm_device *dev = chan->dev; | 795 | struct drm_device *dev = chan->dev; |
960 | struct list_head *entry, *tmp; | ||
961 | struct nouveau_gpuobj_ref *ref; | ||
962 | int i; | 796 | int i; |
963 | 797 | ||
964 | NV_DEBUG(dev, "ch%d\n", chan->id); | 798 | NV_DEBUG(dev, "ch%d\n", chan->id); |
965 | 799 | ||
966 | if (!chan->ramht_refs.next) | 800 | if (!chan->ramht) |
967 | return; | 801 | return; |
968 | 802 | ||
969 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | 803 | nouveau_ramht_ref(NULL, &chan->ramht, chan); |
970 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | ||
971 | |||
972 | nouveau_gpuobj_ref_del(dev, &ref); | ||
973 | } | ||
974 | |||
975 | nouveau_gpuobj_ref_del(dev, &chan->ramht); | ||
976 | 804 | ||
977 | nouveau_gpuobj_del(dev, &chan->vm_pd); | 805 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
978 | nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); | 806 | nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); |
979 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | 807 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) |
980 | nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); | 808 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); |
981 | 809 | ||
982 | if (chan->ramin_heap.free_stack.next) | 810 | if (chan->ramin_heap.free_stack.next) |
983 | drm_mm_takedown(&chan->ramin_heap); | 811 | drm_mm_takedown(&chan->ramin_heap); |
984 | if (chan->ramin) | 812 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
985 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | ||
986 | |||
987 | } | 813 | } |
988 | 814 | ||
989 | int | 815 | int |
@@ -1095,25 +921,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |||
1095 | return -EPERM; | 921 | return -EPERM; |
1096 | } | 922 | } |
1097 | 923 | ||
1098 | if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) | 924 | if (nouveau_ramht_find(chan, init->handle)) |
1099 | return -EEXIST; | 925 | return -EEXIST; |
1100 | 926 | ||
1101 | if (!grc->software) | 927 | if (!grc->software) |
1102 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); | 928 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); |
1103 | else | 929 | else |
1104 | ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); | 930 | ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); |
1105 | |||
1106 | if (ret) { | 931 | if (ret) { |
1107 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | 932 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", |
1108 | ret, init->channel, init->handle); | 933 | ret, init->channel, init->handle); |
1109 | return ret; | 934 | return ret; |
1110 | } | 935 | } |
1111 | 936 | ||
1112 | ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); | 937 | ret = nouveau_ramht_insert(chan, init->handle, gr); |
938 | nouveau_gpuobj_ref(NULL, &gr); | ||
1113 | if (ret) { | 939 | if (ret) { |
1114 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", | 940 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", |
1115 | ret, init->channel, init->handle); | 941 | ret, init->channel, init->handle); |
1116 | nouveau_gpuobj_del(dev, &gr); | ||
1117 | return ret; | 942 | return ret; |
1118 | } | 943 | } |
1119 | 944 | ||
@@ -1124,17 +949,16 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | |||
1124 | struct drm_file *file_priv) | 949 | struct drm_file *file_priv) |
1125 | { | 950 | { |
1126 | struct drm_nouveau_gpuobj_free *objfree = data; | 951 | struct drm_nouveau_gpuobj_free *objfree = data; |
1127 | struct nouveau_gpuobj_ref *ref; | 952 | struct nouveau_gpuobj *gpuobj; |
1128 | struct nouveau_channel *chan; | 953 | struct nouveau_channel *chan; |
1129 | int ret; | ||
1130 | 954 | ||
1131 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); | 955 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); |
1132 | 956 | ||
1133 | ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); | 957 | gpuobj = nouveau_ramht_find(chan, objfree->handle); |
1134 | if (ret) | 958 | if (!gpuobj) |
1135 | return ret; | 959 | return -ENOENT; |
1136 | nouveau_gpuobj_ref_del(dev, &ref); | ||
1137 | 960 | ||
961 | nouveau_ramht_remove(chan, objfree->handle); | ||
1138 | return 0; | 962 | return 0; |
1139 | } | 963 | } |
1140 | 964 | ||