aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c45
1 files changed, 12 insertions, 33 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 5624f37e4c75..7d86e05ac883 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
132 } 132 }
133 } 133 }
134 134
135 instmem->prepare_access(dev, true);
136 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); 135 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
137 do { 136 do {
138 if (!nouveau_ramht_entry_valid(dev, ramht, co)) { 137 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
143 nv_wo32(dev, ramht, (co + 4)/4, ctx); 142 nv_wo32(dev, ramht, (co + 4)/4, ctx);
144 143
145 list_add_tail(&ref->list, &chan->ramht_refs); 144 list_add_tail(&ref->list, &chan->ramht_refs);
146 instmem->finish_access(dev); 145 instmem->flush(dev);
147 return 0; 146 return 0;
148 } 147 }
149 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", 148 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
153 if (co >= dev_priv->ramht_size) 152 if (co >= dev_priv->ramht_size)
154 co = 0; 153 co = 0;
155 } while (co != ho); 154 } while (co != ho);
156 instmem->finish_access(dev);
157 155
158 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); 156 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
159 return -ENOMEM; 157 return -ENOMEM;
@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
173 return; 171 return;
174 } 172 }
175 173
176 instmem->prepare_access(dev, true);
177 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); 174 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
178 do { 175 do {
179 if (nouveau_ramht_entry_valid(dev, ramht, co) && 176 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
186 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); 183 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
187 184
188 list_del(&ref->list); 185 list_del(&ref->list);
189 instmem->finish_access(dev); 186 instmem->flush(dev);
190 return; 187 return;
191 } 188 }
192 189
@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
195 co = 0; 192 co = 0;
196 } while (co != ho); 193 } while (co != ho);
197 list_del(&ref->list); 194 list_del(&ref->list);
198 instmem->finish_access(dev);
199 195
200 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", 196 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan->id, ref->handle); 197 chan->id, ref->handle);
@@ -280,10 +276,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
280 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 276 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
281 int i; 277 int i;
282 278
283 engine->instmem.prepare_access(dev, true);
284 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 279 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
285 nv_wo32(dev, gpuobj, i/4, 0); 280 nv_wo32(dev, gpuobj, i/4, 0);
286 engine->instmem.finish_access(dev); 281 engine->instmem.flush(dev);
287 } 282 }
288 283
289 *gpuobj_ret = gpuobj; 284 *gpuobj_ret = gpuobj;
@@ -371,10 +366,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
371 } 366 }
372 367
373 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { 368 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
374 engine->instmem.prepare_access(dev, true);
375 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 369 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
376 nv_wo32(dev, gpuobj, i/4, 0); 370 nv_wo32(dev, gpuobj, i/4, 0);
377 engine->instmem.finish_access(dev); 371 engine->instmem.flush(dev);
378 } 372 }
379 373
380 if (gpuobj->dtor) 374 if (gpuobj->dtor)
@@ -606,10 +600,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
606 } 600 }
607 601
608 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 602 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
609 dev_priv->engine.instmem.prepare_access(dev, true);
610 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 603 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
611 nv_wo32(dev, gpuobj, i/4, 0); 604 nv_wo32(dev, gpuobj, i/4, 0);
612 dev_priv->engine.instmem.finish_access(dev); 605 dev_priv->engine.instmem.flush(dev);
613 } 606 }
614 607
615 if (pref) { 608 if (pref) {
@@ -697,8 +690,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
697 return ret; 690 return ret;
698 } 691 }
699 692
700 instmem->prepare_access(dev, true);
701
702 if (dev_priv->card_type < NV_50) { 693 if (dev_priv->card_type < NV_50) {
703 uint32_t frame, adjust, pte_flags = 0; 694 uint32_t frame, adjust, pte_flags = 0;
704 695
@@ -735,7 +726,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
735 nv_wo32(dev, *gpuobj, 5, flags5); 726 nv_wo32(dev, *gpuobj, 5, flags5);
736 } 727 }
737 728
738 instmem->finish_access(dev); 729 instmem->flush(dev);
739 730
740 (*gpuobj)->engine = NVOBJ_ENGINE_SW; 731 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
741 (*gpuobj)->class = class; 732 (*gpuobj)->class = class;
@@ -850,7 +841,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
850 return ret; 841 return ret;
851 } 842 }
852 843
853 dev_priv->engine.instmem.prepare_access(dev, true);
854 if (dev_priv->card_type >= NV_50) { 844 if (dev_priv->card_type >= NV_50) {
855 nv_wo32(dev, *gpuobj, 0, class); 845 nv_wo32(dev, *gpuobj, 0, class);
856 nv_wo32(dev, *gpuobj, 5, 0x00010000); 846 nv_wo32(dev, *gpuobj, 5, 0x00010000);
@@ -875,7 +865,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
875 } 865 }
876 } 866 }
877 } 867 }
878 dev_priv->engine.instmem.finish_access(dev); 868 dev_priv->engine.instmem.flush(dev);
879 869
880 (*gpuobj)->engine = NVOBJ_ENGINE_GR; 870 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
881 (*gpuobj)->class = class; 871 (*gpuobj)->class = class;
@@ -988,17 +978,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
988 if (dev_priv->card_type >= NV_50) { 978 if (dev_priv->card_type >= NV_50) {
989 uint32_t vm_offset, pde; 979 uint32_t vm_offset, pde;
990 980
991 instmem->prepare_access(dev, true);
992
993 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; 981 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
994 vm_offset += chan->ramin->gpuobj->im_pramin->start; 982 vm_offset += chan->ramin->gpuobj->im_pramin->start;
995 983
996 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, 984 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
997 0, &chan->vm_pd, NULL); 985 0, &chan->vm_pd, NULL);
998 if (ret) { 986 if (ret)
999 instmem->finish_access(dev);
1000 return ret; 987 return ret;
1001 }
1002 for (i = 0; i < 0x4000; i += 8) { 988 for (i = 0; i < 0x4000; i += 8) {
1003 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); 989 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
1004 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); 990 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
@@ -1008,10 +994,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1008 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 994 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1009 dev_priv->gart_info.sg_ctxdma, 995 dev_priv->gart_info.sg_ctxdma,
1010 &chan->vm_gart_pt); 996 &chan->vm_gart_pt);
1011 if (ret) { 997 if (ret)
1012 instmem->finish_access(dev);
1013 return ret; 998 return ret;
1014 }
1015 nv_wo32(dev, chan->vm_pd, pde++, 999 nv_wo32(dev, chan->vm_pd, pde++,
1016 chan->vm_gart_pt->instance | 0x03); 1000 chan->vm_gart_pt->instance | 0x03);
1017 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); 1001 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
@@ -1021,17 +1005,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1021 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 1005 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1022 dev_priv->vm_vram_pt[i], 1006 dev_priv->vm_vram_pt[i],
1023 &chan->vm_vram_pt[i]); 1007 &chan->vm_vram_pt[i]);
1024 if (ret) { 1008 if (ret)
1025 instmem->finish_access(dev);
1026 return ret; 1009 return ret;
1027 }
1028 1010
1029 nv_wo32(dev, chan->vm_pd, pde++, 1011 nv_wo32(dev, chan->vm_pd, pde++,
1030 chan->vm_vram_pt[i]->instance | 0x61); 1012 chan->vm_vram_pt[i]->instance | 0x61);
1031 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); 1013 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1032 } 1014 }
1033 1015
1034 instmem->finish_access(dev); 1016 instmem->flush(dev);
1035 } 1017 }
1036 1018
1037 /* RAMHT */ 1019 /* RAMHT */
@@ -1164,10 +1146,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
1164 return -ENOMEM; 1146 return -ENOMEM;
1165 } 1147 }
1166 1148
1167 dev_priv->engine.instmem.prepare_access(dev, false);
1168 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 1149 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1169 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); 1150 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
1170 dev_priv->engine.instmem.finish_access(dev);
1171 } 1151 }
1172 1152
1173 return 0; 1153 return 0;
@@ -1212,10 +1192,9 @@ nouveau_gpuobj_resume(struct drm_device *dev)
1212 if (!gpuobj->im_backing_suspend) 1192 if (!gpuobj->im_backing_suspend)
1213 continue; 1193 continue;
1214 1194
1215 dev_priv->engine.instmem.prepare_access(dev, true);
1216 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 1195 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1217 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); 1196 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
1218 dev_priv->engine.instmem.finish_access(dev); 1197 dev_priv->engine.instmem.flush(dev);
1219 } 1198 }
1220 1199
1221 nouveau_gpuobj_suspend_cleanup(dev); 1200 nouveau_gpuobj_suspend_cleanup(dev);