aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c105
1 files changed, 31 insertions, 74 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index e7c100ba63a1..b6bcb254f4ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
132 } 132 }
133 } 133 }
134 134
135 instmem->prepare_access(dev, true);
136 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); 135 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
137 do { 136 do {
138 if (!nouveau_ramht_entry_valid(dev, ramht, co)) { 137 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
143 nv_wo32(dev, ramht, (co + 4)/4, ctx); 142 nv_wo32(dev, ramht, (co + 4)/4, ctx);
144 143
145 list_add_tail(&ref->list, &chan->ramht_refs); 144 list_add_tail(&ref->list, &chan->ramht_refs);
146 instmem->finish_access(dev); 145 instmem->flush(dev);
147 return 0; 146 return 0;
148 } 147 }
149 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", 148 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
153 if (co >= dev_priv->ramht_size) 152 if (co >= dev_priv->ramht_size)
154 co = 0; 153 co = 0;
155 } while (co != ho); 154 } while (co != ho);
156 instmem->finish_access(dev);
157 155
158 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); 156 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
159 return -ENOMEM; 157 return -ENOMEM;
@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
173 return; 171 return;
174 } 172 }
175 173
176 instmem->prepare_access(dev, true);
177 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); 174 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
178 do { 175 do {
179 if (nouveau_ramht_entry_valid(dev, ramht, co) && 176 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
186 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); 183 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
187 184
188 list_del(&ref->list); 185 list_del(&ref->list);
189 instmem->finish_access(dev); 186 instmem->flush(dev);
190 return; 187 return;
191 } 188 }
192 189
@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
195 co = 0; 192 co = 0;
196 } while (co != ho); 193 } while (co != ho);
197 list_del(&ref->list); 194 list_del(&ref->list);
198 instmem->finish_access(dev);
199 195
200 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", 196 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan->id, ref->handle); 197 chan->id, ref->handle);
@@ -209,7 +205,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
209 struct drm_nouveau_private *dev_priv = dev->dev_private; 205 struct drm_nouveau_private *dev_priv = dev->dev_private;
210 struct nouveau_engine *engine = &dev_priv->engine; 206 struct nouveau_engine *engine = &dev_priv->engine;
211 struct nouveau_gpuobj *gpuobj; 207 struct nouveau_gpuobj *gpuobj;
212 struct mem_block *pramin = NULL; 208 struct drm_mm *pramin = NULL;
213 int ret; 209 int ret;
214 210
215 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", 211 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
@@ -233,25 +229,12 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
233 * available. 229 * available.
234 */ 230 */
235 if (chan) { 231 if (chan) {
236 if (chan->ramin_heap) { 232 NV_DEBUG(dev, "channel heap\n");
237 NV_DEBUG(dev, "private heap\n"); 233 pramin = &chan->ramin_heap;
238 pramin = chan->ramin_heap;
239 } else
240 if (dev_priv->card_type < NV_50) {
241 NV_DEBUG(dev, "global heap fallback\n");
242 pramin = dev_priv->ramin_heap;
243 }
244 } else { 234 } else {
245 NV_DEBUG(dev, "global heap\n"); 235 NV_DEBUG(dev, "global heap\n");
246 pramin = dev_priv->ramin_heap; 236 pramin = &dev_priv->ramin_heap;
247 }
248
249 if (!pramin) {
250 NV_ERROR(dev, "No PRAMIN heap!\n");
251 return -EINVAL;
252 }
253 237
254 if (!chan) {
255 ret = engine->instmem.populate(dev, gpuobj, &size); 238 ret = engine->instmem.populate(dev, gpuobj, &size);
256 if (ret) { 239 if (ret) {
257 nouveau_gpuobj_del(dev, &gpuobj); 240 nouveau_gpuobj_del(dev, &gpuobj);
@@ -260,9 +243,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
260 } 243 }
261 244
262 /* Allocate a chunk of the PRAMIN aperture */ 245 /* Allocate a chunk of the PRAMIN aperture */
263 gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, 246 gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
264 drm_order(align), 247 if (gpuobj->im_pramin)
265 (struct drm_file *)-2, 0); 248 gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
249
266 if (!gpuobj->im_pramin) { 250 if (!gpuobj->im_pramin) {
267 nouveau_gpuobj_del(dev, &gpuobj); 251 nouveau_gpuobj_del(dev, &gpuobj);
268 return -ENOMEM; 252 return -ENOMEM;
@@ -279,10 +263,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
279 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 263 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
280 int i; 264 int i;
281 265
282 engine->instmem.prepare_access(dev, true);
283 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 266 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
284 nv_wo32(dev, gpuobj, i/4, 0); 267 nv_wo32(dev, gpuobj, i/4, 0);
285 engine->instmem.finish_access(dev); 268 engine->instmem.flush(dev);
286 } 269 }
287 270
288 *gpuobj_ret = gpuobj; 271 *gpuobj_ret = gpuobj;
@@ -370,10 +353,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
370 } 353 }
371 354
372 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { 355 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
373 engine->instmem.prepare_access(dev, true);
374 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 356 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
375 nv_wo32(dev, gpuobj, i/4, 0); 357 nv_wo32(dev, gpuobj, i/4, 0);
376 engine->instmem.finish_access(dev); 358 engine->instmem.flush(dev);
377 } 359 }
378 360
379 if (gpuobj->dtor) 361 if (gpuobj->dtor)
@@ -386,7 +368,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
386 if (gpuobj->flags & NVOBJ_FLAG_FAKE) 368 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
387 kfree(gpuobj->im_pramin); 369 kfree(gpuobj->im_pramin);
388 else 370 else
389 nouveau_mem_free_block(gpuobj->im_pramin); 371 drm_mm_put_block(gpuobj->im_pramin);
390 } 372 }
391 373
392 list_del(&gpuobj->list); 374 list_del(&gpuobj->list);
@@ -589,7 +571,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
589 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 571 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
590 572
591 if (p_offset != ~0) { 573 if (p_offset != ~0) {
592 gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), 574 gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
593 GFP_KERNEL); 575 GFP_KERNEL);
594 if (!gpuobj->im_pramin) { 576 if (!gpuobj->im_pramin) {
595 nouveau_gpuobj_del(dev, &gpuobj); 577 nouveau_gpuobj_del(dev, &gpuobj);
@@ -605,10 +587,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
605 } 587 }
606 588
607 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 589 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
608 dev_priv->engine.instmem.prepare_access(dev, true);
609 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 590 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
610 nv_wo32(dev, gpuobj, i/4, 0); 591 nv_wo32(dev, gpuobj, i/4, 0);
611 dev_priv->engine.instmem.finish_access(dev); 592 dev_priv->engine.instmem.flush(dev);
612 } 593 }
613 594
614 if (pref) { 595 if (pref) {
@@ -696,8 +677,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
696 return ret; 677 return ret;
697 } 678 }
698 679
699 instmem->prepare_access(dev, true);
700
701 if (dev_priv->card_type < NV_50) { 680 if (dev_priv->card_type < NV_50) {
702 uint32_t frame, adjust, pte_flags = 0; 681 uint32_t frame, adjust, pte_flags = 0;
703 682
@@ -734,7 +713,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
734 nv_wo32(dev, *gpuobj, 5, flags5); 713 nv_wo32(dev, *gpuobj, 5, flags5);
735 } 714 }
736 715
737 instmem->finish_access(dev); 716 instmem->flush(dev);
738 717
739 (*gpuobj)->engine = NVOBJ_ENGINE_SW; 718 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
740 (*gpuobj)->class = class; 719 (*gpuobj)->class = class;
@@ -849,7 +828,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
849 return ret; 828 return ret;
850 } 829 }
851 830
852 dev_priv->engine.instmem.prepare_access(dev, true);
853 if (dev_priv->card_type >= NV_50) { 831 if (dev_priv->card_type >= NV_50) {
854 nv_wo32(dev, *gpuobj, 0, class); 832 nv_wo32(dev, *gpuobj, 0, class);
855 nv_wo32(dev, *gpuobj, 5, 0x00010000); 833 nv_wo32(dev, *gpuobj, 5, 0x00010000);
@@ -874,7 +852,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
874 } 852 }
875 } 853 }
876 } 854 }
877 dev_priv->engine.instmem.finish_access(dev); 855 dev_priv->engine.instmem.flush(dev);
878 856
879 (*gpuobj)->engine = NVOBJ_ENGINE_GR; 857 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
880 (*gpuobj)->class = class; 858 (*gpuobj)->class = class;
@@ -920,6 +898,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
920 base = 0; 898 base = 0;
921 899
922 /* PGRAPH context */ 900 /* PGRAPH context */
901 size += dev_priv->engine.graph.grctx_size;
923 902
924 if (dev_priv->card_type == NV_50) { 903 if (dev_priv->card_type == NV_50) {
925 /* Various fixed table thingos */ 904 /* Various fixed table thingos */
@@ -930,12 +909,8 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
930 size += 0x8000; 909 size += 0x8000;
931 /* RAMFC */ 910 /* RAMFC */
932 size += 0x1000; 911 size += 0x1000;
933 /* PGRAPH context */
934 size += 0x70000;
935 } 912 }
936 913
937 NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
938 chan->id, size, base);
939 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, 914 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
940 &chan->ramin); 915 &chan->ramin);
941 if (ret) { 916 if (ret) {
@@ -944,8 +919,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
944 } 919 }
945 pramin = chan->ramin->gpuobj; 920 pramin = chan->ramin->gpuobj;
946 921
947 ret = nouveau_mem_init_heap(&chan->ramin_heap, 922 ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
948 pramin->im_pramin->start + base, size);
949 if (ret) { 923 if (ret) {
950 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); 924 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
951 nouveau_gpuobj_ref_del(dev, &chan->ramin); 925 nouveau_gpuobj_ref_del(dev, &chan->ramin);
@@ -969,15 +943,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
969 943
970 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 944 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
971 945
972 /* Reserve a block of PRAMIN for the channel 946 /* Allocate a chunk of memory for per-channel object storage */
973 *XXX: maybe on <NV50 too at some point 947 ret = nouveau_gpuobj_channel_init_pramin(chan);
974 */ 948 if (ret) {
975 if (0 || dev_priv->card_type == NV_50) { 949 NV_ERROR(dev, "init pramin\n");
976 ret = nouveau_gpuobj_channel_init_pramin(chan); 950 return ret;
977 if (ret) {
978 NV_ERROR(dev, "init pramin\n");
979 return ret;
980 }
981 } 951 }
982 952
983 /* NV50 VM 953 /* NV50 VM
@@ -988,17 +958,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
988 if (dev_priv->card_type >= NV_50) { 958 if (dev_priv->card_type >= NV_50) {
989 uint32_t vm_offset, pde; 959 uint32_t vm_offset, pde;
990 960
991 instmem->prepare_access(dev, true);
992
993 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; 961 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
994 vm_offset += chan->ramin->gpuobj->im_pramin->start; 962 vm_offset += chan->ramin->gpuobj->im_pramin->start;
995 963
996 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, 964 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
997 0, &chan->vm_pd, NULL); 965 0, &chan->vm_pd, NULL);
998 if (ret) { 966 if (ret)
999 instmem->finish_access(dev);
1000 return ret; 967 return ret;
1001 }
1002 for (i = 0; i < 0x4000; i += 8) { 968 for (i = 0; i < 0x4000; i += 8) {
1003 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); 969 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
1004 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); 970 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
@@ -1008,10 +974,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1008 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 974 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1009 dev_priv->gart_info.sg_ctxdma, 975 dev_priv->gart_info.sg_ctxdma,
1010 &chan->vm_gart_pt); 976 &chan->vm_gart_pt);
1011 if (ret) { 977 if (ret)
1012 instmem->finish_access(dev);
1013 return ret; 978 return ret;
1014 }
1015 nv_wo32(dev, chan->vm_pd, pde++, 979 nv_wo32(dev, chan->vm_pd, pde++,
1016 chan->vm_gart_pt->instance | 0x03); 980 chan->vm_gart_pt->instance | 0x03);
1017 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); 981 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
@@ -1021,17 +985,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1021 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 985 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1022 dev_priv->vm_vram_pt[i], 986 dev_priv->vm_vram_pt[i],
1023 &chan->vm_vram_pt[i]); 987 &chan->vm_vram_pt[i]);
1024 if (ret) { 988 if (ret)
1025 instmem->finish_access(dev);
1026 return ret; 989 return ret;
1027 }
1028 990
1029 nv_wo32(dev, chan->vm_pd, pde++, 991 nv_wo32(dev, chan->vm_pd, pde++,
1030 chan->vm_vram_pt[i]->instance | 0x61); 992 chan->vm_vram_pt[i]->instance | 0x61);
1031 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); 993 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1032 } 994 }
1033 995
1034 instmem->finish_access(dev); 996 instmem->flush(dev);
1035 } 997 }
1036 998
1037 /* RAMHT */ 999 /* RAMHT */
@@ -1130,8 +1092,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1130 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) 1092 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
1131 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); 1093 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
1132 1094
1133 if (chan->ramin_heap) 1095 if (chan->ramin_heap.free_stack.next)
1134 nouveau_mem_takedown(&chan->ramin_heap); 1096 drm_mm_takedown(&chan->ramin_heap);
1135 if (chan->ramin) 1097 if (chan->ramin)
1136 nouveau_gpuobj_ref_del(dev, &chan->ramin); 1098 nouveau_gpuobj_ref_del(dev, &chan->ramin);
1137 1099
@@ -1164,10 +1126,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
1164 return -ENOMEM; 1126 return -ENOMEM;
1165 } 1127 }
1166 1128
1167 dev_priv->engine.instmem.prepare_access(dev, false);
1168 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 1129 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1169 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); 1130 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
1170 dev_priv->engine.instmem.finish_access(dev);
1171 } 1131 }
1172 1132
1173 return 0; 1133 return 0;
@@ -1212,10 +1172,9 @@ nouveau_gpuobj_resume(struct drm_device *dev)
1212 if (!gpuobj->im_backing_suspend) 1172 if (!gpuobj->im_backing_suspend)
1213 continue; 1173 continue;
1214 1174
1215 dev_priv->engine.instmem.prepare_access(dev, true);
1216 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 1175 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1217 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); 1176 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
1218 dev_priv->engine.instmem.finish_access(dev); 1177 dev_priv->engine.instmem.flush(dev);
1219 } 1178 }
1220 1179
1221 nouveau_gpuobj_suspend_cleanup(dev); 1180 nouveau_gpuobj_suspend_cleanup(dev);
@@ -1232,7 +1191,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1232 struct nouveau_channel *chan; 1191 struct nouveau_channel *chan;
1233 int ret; 1192 int ret;
1234 1193
1235 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1236 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); 1194 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1237 1195
1238 if (init->handle == ~0) 1196 if (init->handle == ~0)
@@ -1283,7 +1241,6 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1283 struct nouveau_channel *chan; 1241 struct nouveau_channel *chan;
1284 int ret; 1242 int ret;
1285 1243
1286 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1287 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); 1244 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1288 1245
1289 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); 1246 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);