aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c33
15 files changed, 39 insertions, 152 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index afebd32af203..e21eacc47290 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -269,8 +269,7 @@ struct nouveau_instmem_engine {
269 void (*clear)(struct drm_device *, struct nouveau_gpuobj *); 269 void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
270 int (*bind)(struct drm_device *, struct nouveau_gpuobj *); 270 int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
271 int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); 271 int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
272 void (*prepare_access)(struct drm_device *, bool write); 272 void (*flush)(struct drm_device *);
273 void (*finish_access)(struct drm_device *);
274}; 273};
275 274
276struct nouveau_mc_engine { 275struct nouveau_mc_engine {
@@ -1027,8 +1026,7 @@ extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
1027extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); 1026extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
1028extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); 1027extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
1029extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); 1028extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1030extern void nv04_instmem_prepare_access(struct drm_device *, bool write); 1029extern void nv04_instmem_flush(struct drm_device *);
1031extern void nv04_instmem_finish_access(struct drm_device *);
1032 1030
1033/* nv50_instmem.c */ 1031/* nv50_instmem.c */
1034extern int nv50_instmem_init(struct drm_device *); 1032extern int nv50_instmem_init(struct drm_device *);
@@ -1040,8 +1038,7 @@ extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
1040extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); 1038extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
1041extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); 1039extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
1042extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); 1040extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1043extern void nv50_instmem_prepare_access(struct drm_device *, bool write); 1041extern void nv50_instmem_flush(struct drm_device *);
1044extern void nv50_instmem_finish_access(struct drm_device *);
1045 1042
1046/* nv04_mc.c */ 1043/* nv04_mc.c */
1047extern int nv04_mc_init(struct drm_device *); 1044extern int nv04_mc_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 4b42bf218f61..5152c0a7e6f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -143,7 +143,6 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
143 phys |= 0x30; 143 phys |= 0x30;
144 } 144 }
145 145
146 dev_priv->engine.instmem.prepare_access(dev, true);
147 while (size) { 146 while (size) {
148 unsigned offset_h = upper_32_bits(phys); 147 unsigned offset_h = upper_32_bits(phys);
149 unsigned offset_l = lower_32_bits(phys); 148 unsigned offset_l = lower_32_bits(phys);
@@ -175,7 +174,7 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
175 } 174 }
176 } 175 }
177 } 176 }
178 dev_priv->engine.instmem.finish_access(dev); 177 dev_priv->engine.instmem.flush(dev);
179 178
180 nv_wr32(dev, 0x100c80, 0x00050001); 179 nv_wr32(dev, 0x100c80, 0x00050001);
181 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 180 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
@@ -218,7 +217,6 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
218 virt -= dev_priv->vm_vram_base; 217 virt -= dev_priv->vm_vram_base;
219 pages = (size >> 16) << 1; 218 pages = (size >> 16) << 1;
220 219
221 dev_priv->engine.instmem.prepare_access(dev, true);
222 while (pages) { 220 while (pages) {
223 pgt = dev_priv->vm_vram_pt[virt >> 29]; 221 pgt = dev_priv->vm_vram_pt[virt >> 29];
224 pte = (virt & 0x1ffe0000ULL) >> 15; 222 pte = (virt & 0x1ffe0000ULL) >> 15;
@@ -232,7 +230,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
232 while (pte < end) 230 while (pte < end)
233 nv_wo32(dev, pgt, pte++, 0); 231 nv_wo32(dev, pgt, pte++, 0);
234 } 232 }
235 dev_priv->engine.instmem.finish_access(dev); 233 dev_priv->engine.instmem.flush(dev);
236 234
237 nv_wr32(dev, 0x100c80, 0x00050001); 235 nv_wr32(dev, 0x100c80, 0x00050001);
238 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 236 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 5624f37e4c75..7d86e05ac883 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
132 } 132 }
133 } 133 }
134 134
135 instmem->prepare_access(dev, true);
136 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); 135 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
137 do { 136 do {
138 if (!nouveau_ramht_entry_valid(dev, ramht, co)) { 137 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
143 nv_wo32(dev, ramht, (co + 4)/4, ctx); 142 nv_wo32(dev, ramht, (co + 4)/4, ctx);
144 143
145 list_add_tail(&ref->list, &chan->ramht_refs); 144 list_add_tail(&ref->list, &chan->ramht_refs);
146 instmem->finish_access(dev); 145 instmem->flush(dev);
147 return 0; 146 return 0;
148 } 147 }
149 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", 148 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
153 if (co >= dev_priv->ramht_size) 152 if (co >= dev_priv->ramht_size)
154 co = 0; 153 co = 0;
155 } while (co != ho); 154 } while (co != ho);
156 instmem->finish_access(dev);
157 155
158 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); 156 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
159 return -ENOMEM; 157 return -ENOMEM;
@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
173 return; 171 return;
174 } 172 }
175 173
176 instmem->prepare_access(dev, true);
177 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); 174 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
178 do { 175 do {
179 if (nouveau_ramht_entry_valid(dev, ramht, co) && 176 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
186 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); 183 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
187 184
188 list_del(&ref->list); 185 list_del(&ref->list);
189 instmem->finish_access(dev); 186 instmem->flush(dev);
190 return; 187 return;
191 } 188 }
192 189
@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
195 co = 0; 192 co = 0;
196 } while (co != ho); 193 } while (co != ho);
197 list_del(&ref->list); 194 list_del(&ref->list);
198 instmem->finish_access(dev);
199 195
200 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", 196 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan->id, ref->handle); 197 chan->id, ref->handle);
@@ -280,10 +276,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
280 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 276 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
281 int i; 277 int i;
282 278
283 engine->instmem.prepare_access(dev, true);
284 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 279 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
285 nv_wo32(dev, gpuobj, i/4, 0); 280 nv_wo32(dev, gpuobj, i/4, 0);
286 engine->instmem.finish_access(dev); 281 engine->instmem.flush(dev);
287 } 282 }
288 283
289 *gpuobj_ret = gpuobj; 284 *gpuobj_ret = gpuobj;
@@ -371,10 +366,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
371 } 366 }
372 367
373 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { 368 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
374 engine->instmem.prepare_access(dev, true);
375 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 369 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
376 nv_wo32(dev, gpuobj, i/4, 0); 370 nv_wo32(dev, gpuobj, i/4, 0);
377 engine->instmem.finish_access(dev); 371 engine->instmem.flush(dev);
378 } 372 }
379 373
380 if (gpuobj->dtor) 374 if (gpuobj->dtor)
@@ -606,10 +600,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
606 } 600 }
607 601
608 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 602 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
609 dev_priv->engine.instmem.prepare_access(dev, true);
610 for (i = 0; i < gpuobj->im_pramin->size; i += 4) 603 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
611 nv_wo32(dev, gpuobj, i/4, 0); 604 nv_wo32(dev, gpuobj, i/4, 0);
612 dev_priv->engine.instmem.finish_access(dev); 605 dev_priv->engine.instmem.flush(dev);
613 } 606 }
614 607
615 if (pref) { 608 if (pref) {
@@ -697,8 +690,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
697 return ret; 690 return ret;
698 } 691 }
699 692
700 instmem->prepare_access(dev, true);
701
702 if (dev_priv->card_type < NV_50) { 693 if (dev_priv->card_type < NV_50) {
703 uint32_t frame, adjust, pte_flags = 0; 694 uint32_t frame, adjust, pte_flags = 0;
704 695
@@ -735,7 +726,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
735 nv_wo32(dev, *gpuobj, 5, flags5); 726 nv_wo32(dev, *gpuobj, 5, flags5);
736 } 727 }
737 728
738 instmem->finish_access(dev); 729 instmem->flush(dev);
739 730
740 (*gpuobj)->engine = NVOBJ_ENGINE_SW; 731 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
741 (*gpuobj)->class = class; 732 (*gpuobj)->class = class;
@@ -850,7 +841,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
850 return ret; 841 return ret;
851 } 842 }
852 843
853 dev_priv->engine.instmem.prepare_access(dev, true);
854 if (dev_priv->card_type >= NV_50) { 844 if (dev_priv->card_type >= NV_50) {
855 nv_wo32(dev, *gpuobj, 0, class); 845 nv_wo32(dev, *gpuobj, 0, class);
856 nv_wo32(dev, *gpuobj, 5, 0x00010000); 846 nv_wo32(dev, *gpuobj, 5, 0x00010000);
@@ -875,7 +865,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
875 } 865 }
876 } 866 }
877 } 867 }
878 dev_priv->engine.instmem.finish_access(dev); 868 dev_priv->engine.instmem.flush(dev);
879 869
880 (*gpuobj)->engine = NVOBJ_ENGINE_GR; 870 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
881 (*gpuobj)->class = class; 871 (*gpuobj)->class = class;
@@ -988,17 +978,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
988 if (dev_priv->card_type >= NV_50) { 978 if (dev_priv->card_type >= NV_50) {
989 uint32_t vm_offset, pde; 979 uint32_t vm_offset, pde;
990 980
991 instmem->prepare_access(dev, true);
992
993 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; 981 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
994 vm_offset += chan->ramin->gpuobj->im_pramin->start; 982 vm_offset += chan->ramin->gpuobj->im_pramin->start;
995 983
996 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, 984 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
997 0, &chan->vm_pd, NULL); 985 0, &chan->vm_pd, NULL);
998 if (ret) { 986 if (ret)
999 instmem->finish_access(dev);
1000 return ret; 987 return ret;
1001 }
1002 for (i = 0; i < 0x4000; i += 8) { 988 for (i = 0; i < 0x4000; i += 8) {
1003 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); 989 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
1004 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); 990 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
@@ -1008,10 +994,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1008 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 994 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1009 dev_priv->gart_info.sg_ctxdma, 995 dev_priv->gart_info.sg_ctxdma,
1010 &chan->vm_gart_pt); 996 &chan->vm_gart_pt);
1011 if (ret) { 997 if (ret)
1012 instmem->finish_access(dev);
1013 return ret; 998 return ret;
1014 }
1015 nv_wo32(dev, chan->vm_pd, pde++, 999 nv_wo32(dev, chan->vm_pd, pde++,
1016 chan->vm_gart_pt->instance | 0x03); 1000 chan->vm_gart_pt->instance | 0x03);
1017 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); 1001 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
@@ -1021,17 +1005,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
1021 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, 1005 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1022 dev_priv->vm_vram_pt[i], 1006 dev_priv->vm_vram_pt[i],
1023 &chan->vm_vram_pt[i]); 1007 &chan->vm_vram_pt[i]);
1024 if (ret) { 1008 if (ret)
1025 instmem->finish_access(dev);
1026 return ret; 1009 return ret;
1027 }
1028 1010
1029 nv_wo32(dev, chan->vm_pd, pde++, 1011 nv_wo32(dev, chan->vm_pd, pde++,
1030 chan->vm_vram_pt[i]->instance | 0x61); 1012 chan->vm_vram_pt[i]->instance | 0x61);
1031 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); 1013 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1032 } 1014 }
1033 1015
1034 instmem->finish_access(dev); 1016 instmem->flush(dev);
1035 } 1017 }
1036 1018
1037 /* RAMHT */ 1019 /* RAMHT */
@@ -1164,10 +1146,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
1164 return -ENOMEM; 1146 return -ENOMEM;
1165 } 1147 }
1166 1148
1167 dev_priv->engine.instmem.prepare_access(dev, false);
1168 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 1149 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1169 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); 1150 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
1170 dev_priv->engine.instmem.finish_access(dev);
1171 } 1151 }
1172 1152
1173 return 0; 1153 return 0;
@@ -1212,10 +1192,9 @@ nouveau_gpuobj_resume(struct drm_device *dev)
1212 if (!gpuobj->im_backing_suspend) 1192 if (!gpuobj->im_backing_suspend)
1213 continue; 1193 continue;
1214 1194
1215 dev_priv->engine.instmem.prepare_access(dev, true);
1216 for (i = 0; i < gpuobj->im_pramin->size / 4; i++) 1195 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1217 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); 1196 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
1218 dev_priv->engine.instmem.finish_access(dev); 1197 dev_priv->engine.instmem.flush(dev);
1219 } 1198 }
1220 1199
1221 nouveau_gpuobj_suspend_cleanup(dev); 1200 nouveau_gpuobj_suspend_cleanup(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 1d6ee8b55154..1b2ab5a714ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
97 97
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); 98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99 99
100 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
101 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); 100 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
102 nvbe->pte_start = pte; 101 nvbe->pte_start = pte;
103 for (i = 0; i < nvbe->nr_pages; i++) { 102 for (i = 0; i < nvbe->nr_pages; i++) {
@@ -116,7 +115,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
116 dma_offset += NV_CTXDMA_PAGE_SIZE; 115 dma_offset += NV_CTXDMA_PAGE_SIZE;
117 } 116 }
118 } 117 }
119 dev_priv->engine.instmem.finish_access(nvbe->dev); 118 dev_priv->engine.instmem.flush(nvbe->dev);
120 119
121 if (dev_priv->card_type == NV_50) { 120 if (dev_priv->card_type == NV_50) {
122 nv_wr32(dev, 0x100c80, 0x00050001); 121 nv_wr32(dev, 0x100c80, 0x00050001);
@@ -154,7 +153,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
154 if (!nvbe->bound) 153 if (!nvbe->bound)
155 return 0; 154 return 0;
156 155
157 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
158 pte = nvbe->pte_start; 156 pte = nvbe->pte_start;
159 for (i = 0; i < nvbe->nr_pages; i++) { 157 for (i = 0; i < nvbe->nr_pages; i++) {
160 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; 158 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
@@ -170,7 +168,7 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
170 dma_offset += NV_CTXDMA_PAGE_SIZE; 168 dma_offset += NV_CTXDMA_PAGE_SIZE;
171 } 169 }
172 } 170 }
173 dev_priv->engine.instmem.finish_access(nvbe->dev); 171 dev_priv->engine.instmem.flush(nvbe->dev);
174 172
175 if (dev_priv->card_type == NV_50) { 173 if (dev_priv->card_type == NV_50) {
176 nv_wr32(dev, 0x100c80, 0x00050001); 174 nv_wr32(dev, 0x100c80, 0x00050001);
@@ -272,7 +270,6 @@ nouveau_sgdma_init(struct drm_device *dev)
272 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, 270 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
273 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 271 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
274 272
275 dev_priv->engine.instmem.prepare_access(dev, true);
276 if (dev_priv->card_type < NV_50) { 273 if (dev_priv->card_type < NV_50) {
277 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and 274 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
278 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE 275 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
@@ -294,7 +291,7 @@ nouveau_sgdma_init(struct drm_device *dev)
294 nv_wo32(dev, gpuobj, (i+4)/4, 0); 291 nv_wo32(dev, gpuobj, (i+4)/4, 0);
295 } 292 }
296 } 293 }
297 dev_priv->engine.instmem.finish_access(dev); 294 dev_priv->engine.instmem.flush(dev);
298 295
299 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; 296 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
300 dev_priv->gart_info.aper_base = 0; 297 dev_priv->gart_info.aper_base = 0;
@@ -325,14 +322,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
325{ 322{
326 struct drm_nouveau_private *dev_priv = dev->dev_private; 323 struct drm_nouveau_private *dev_priv = dev->dev_private;
327 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 324 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
328 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
329 int pte; 325 int pte;
330 326
331 pte = (offset >> NV_CTXDMA_PAGE_SHIFT); 327 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
332 if (dev_priv->card_type < NV_50) { 328 if (dev_priv->card_type < NV_50) {
333 instmem->prepare_access(dev, false);
334 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; 329 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
335 instmem->finish_access(dev);
336 return 0; 330 return 0;
337 } 331 }
338 332
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 6fd99f10eed6..67ee32fbba9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -54,8 +54,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
54 engine->instmem.clear = nv04_instmem_clear; 54 engine->instmem.clear = nv04_instmem_clear;
55 engine->instmem.bind = nv04_instmem_bind; 55 engine->instmem.bind = nv04_instmem_bind;
56 engine->instmem.unbind = nv04_instmem_unbind; 56 engine->instmem.unbind = nv04_instmem_unbind;
57 engine->instmem.prepare_access = nv04_instmem_prepare_access; 57 engine->instmem.flush = nv04_instmem_flush;
58 engine->instmem.finish_access = nv04_instmem_finish_access;
59 engine->mc.init = nv04_mc_init; 58 engine->mc.init = nv04_mc_init;
60 engine->mc.takedown = nv04_mc_takedown; 59 engine->mc.takedown = nv04_mc_takedown;
61 engine->timer.init = nv04_timer_init; 60 engine->timer.init = nv04_timer_init;
@@ -95,8 +94,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
95 engine->instmem.clear = nv04_instmem_clear; 94 engine->instmem.clear = nv04_instmem_clear;
96 engine->instmem.bind = nv04_instmem_bind; 95 engine->instmem.bind = nv04_instmem_bind;
97 engine->instmem.unbind = nv04_instmem_unbind; 96 engine->instmem.unbind = nv04_instmem_unbind;
98 engine->instmem.prepare_access = nv04_instmem_prepare_access; 97 engine->instmem.flush = nv04_instmem_flush;
99 engine->instmem.finish_access = nv04_instmem_finish_access;
100 engine->mc.init = nv04_mc_init; 98 engine->mc.init = nv04_mc_init;
101 engine->mc.takedown = nv04_mc_takedown; 99 engine->mc.takedown = nv04_mc_takedown;
102 engine->timer.init = nv04_timer_init; 100 engine->timer.init = nv04_timer_init;
@@ -138,8 +136,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
138 engine->instmem.clear = nv04_instmem_clear; 136 engine->instmem.clear = nv04_instmem_clear;
139 engine->instmem.bind = nv04_instmem_bind; 137 engine->instmem.bind = nv04_instmem_bind;
140 engine->instmem.unbind = nv04_instmem_unbind; 138 engine->instmem.unbind = nv04_instmem_unbind;
141 engine->instmem.prepare_access = nv04_instmem_prepare_access; 139 engine->instmem.flush = nv04_instmem_flush;
142 engine->instmem.finish_access = nv04_instmem_finish_access;
143 engine->mc.init = nv04_mc_init; 140 engine->mc.init = nv04_mc_init;
144 engine->mc.takedown = nv04_mc_takedown; 141 engine->mc.takedown = nv04_mc_takedown;
145 engine->timer.init = nv04_timer_init; 142 engine->timer.init = nv04_timer_init;
@@ -181,8 +178,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
181 engine->instmem.clear = nv04_instmem_clear; 178 engine->instmem.clear = nv04_instmem_clear;
182 engine->instmem.bind = nv04_instmem_bind; 179 engine->instmem.bind = nv04_instmem_bind;
183 engine->instmem.unbind = nv04_instmem_unbind; 180 engine->instmem.unbind = nv04_instmem_unbind;
184 engine->instmem.prepare_access = nv04_instmem_prepare_access; 181 engine->instmem.flush = nv04_instmem_flush;
185 engine->instmem.finish_access = nv04_instmem_finish_access;
186 engine->mc.init = nv04_mc_init; 182 engine->mc.init = nv04_mc_init;
187 engine->mc.takedown = nv04_mc_takedown; 183 engine->mc.takedown = nv04_mc_takedown;
188 engine->timer.init = nv04_timer_init; 184 engine->timer.init = nv04_timer_init;
@@ -225,8 +221,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
225 engine->instmem.clear = nv04_instmem_clear; 221 engine->instmem.clear = nv04_instmem_clear;
226 engine->instmem.bind = nv04_instmem_bind; 222 engine->instmem.bind = nv04_instmem_bind;
227 engine->instmem.unbind = nv04_instmem_unbind; 223 engine->instmem.unbind = nv04_instmem_unbind;
228 engine->instmem.prepare_access = nv04_instmem_prepare_access; 224 engine->instmem.flush = nv04_instmem_flush;
229 engine->instmem.finish_access = nv04_instmem_finish_access;
230 engine->mc.init = nv40_mc_init; 225 engine->mc.init = nv40_mc_init;
231 engine->mc.takedown = nv40_mc_takedown; 226 engine->mc.takedown = nv40_mc_takedown;
232 engine->timer.init = nv04_timer_init; 227 engine->timer.init = nv04_timer_init;
@@ -271,8 +266,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
271 engine->instmem.clear = nv50_instmem_clear; 266 engine->instmem.clear = nv50_instmem_clear;
272 engine->instmem.bind = nv50_instmem_bind; 267 engine->instmem.bind = nv50_instmem_bind;
273 engine->instmem.unbind = nv50_instmem_unbind; 268 engine->instmem.unbind = nv50_instmem_unbind;
274 engine->instmem.prepare_access = nv50_instmem_prepare_access; 269 engine->instmem.flush = nv50_instmem_flush;
275 engine->instmem.finish_access = nv50_instmem_finish_access;
276 engine->mc.init = nv50_mc_init; 270 engine->mc.init = nv50_mc_init;
277 engine->mc.takedown = nv50_mc_takedown; 271 engine->mc.takedown = nv50_mc_takedown;
278 engine->timer.init = nv04_timer_init; 272 engine->timer.init = nv04_timer_init;
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 611c83e6d9f4..b2c01fe899e9 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -137,7 +137,6 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
138 138
139 /* Setup initial state */ 139 /* Setup initial state */
140 dev_priv->engine.instmem.prepare_access(dev, true);
141 RAMFC_WR(DMA_PUT, chan->pushbuf_base); 140 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
142 RAMFC_WR(DMA_GET, chan->pushbuf_base); 141 RAMFC_WR(DMA_GET, chan->pushbuf_base);
143 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); 142 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
@@ -145,7 +144,6 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
145 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 144 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
146 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 145 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
147 DMA_FETCH_ENDIANNESS)); 146 DMA_FETCH_ENDIANNESS));
148 dev_priv->engine.instmem.finish_access(dev);
149 147
150 /* enable the fifo dma operation */ 148 /* enable the fifo dma operation */
151 nv_wr32(dev, NV04_PFIFO_MODE, 149 nv_wr32(dev, NV04_PFIFO_MODE,
@@ -172,8 +170,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
172 struct drm_nouveau_private *dev_priv = dev->dev_private; 170 struct drm_nouveau_private *dev_priv = dev->dev_private;
173 uint32_t fc = NV04_RAMFC(chid), tmp; 171 uint32_t fc = NV04_RAMFC(chid), tmp;
174 172
175 dev_priv->engine.instmem.prepare_access(dev, false);
176
177 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); 173 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
178 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); 174 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
179 tmp = nv_ri32(dev, fc + 8); 175 tmp = nv_ri32(dev, fc + 8);
@@ -184,8 +180,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
184 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20)); 180 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
185 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24)); 181 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
186 182
187 dev_priv->engine.instmem.finish_access(dev);
188
189 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 183 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
190 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); 184 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
191} 185}
@@ -226,7 +220,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
226 return -EINVAL; 220 return -EINVAL;
227 } 221 }
228 222
229 dev_priv->engine.instmem.prepare_access(dev, true);
230 RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); 223 RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
231 RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); 224 RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
232 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; 225 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
@@ -236,7 +229,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
236 RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); 229 RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
237 RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); 230 RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
238 RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); 231 RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
239 dev_priv->engine.instmem.finish_access(dev);
240 232
241 nv04_fifo_do_load_context(dev, pfifo->channels - 1); 233 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
242 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); 234 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 17af702d6ddc..4408232d33f1 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -49,10 +49,8 @@ nv04_instmem_determine_amount(struct drm_device *dev)
49 NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10); 49 NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
50 50
51 /* Clear all of it, except the BIOS image that's in the first 64KiB */ 51 /* Clear all of it, except the BIOS image that's in the first 64KiB */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) 52 for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
54 nv_wi32(dev, i, 0x00000000); 53 nv_wi32(dev, i, 0x00000000);
55 dev_priv->engine.instmem.finish_access(dev);
56} 54}
57 55
58static void 56static void
@@ -186,12 +184,7 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
186} 184}
187 185
188void 186void
189nv04_instmem_prepare_access(struct drm_device *dev, bool write) 187nv04_instmem_flush(struct drm_device *dev)
190{
191}
192
193void
194nv04_instmem_finish_access(struct drm_device *dev)
195{ 188{
196} 189}
197 190
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index 7aeabf262bc0..7a4069cf5d0b 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -55,7 +55,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
55 /* Fill entries that are seen filled in dumps of nvidia driver just 55 /* Fill entries that are seen filled in dumps of nvidia driver just
56 * after channel's is put into DMA mode 56 * after channel's is put into DMA mode
57 */ 57 */
58 dev_priv->engine.instmem.prepare_access(dev, true);
59 nv_wi32(dev, fc + 0, chan->pushbuf_base); 58 nv_wi32(dev, fc + 0, chan->pushbuf_base);
60 nv_wi32(dev, fc + 4, chan->pushbuf_base); 59 nv_wi32(dev, fc + 4, chan->pushbuf_base);
61 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); 60 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -66,7 +65,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
66 NV_PFIFO_CACHE1_BIG_ENDIAN | 65 NV_PFIFO_CACHE1_BIG_ENDIAN |
67#endif 66#endif
68 0); 67 0);
69 dev_priv->engine.instmem.finish_access(dev);
70 68
71 /* enable the fifo dma operation */ 69 /* enable the fifo dma operation */
72 nv_wr32(dev, NV04_PFIFO_MODE, 70 nv_wr32(dev, NV04_PFIFO_MODE,
@@ -91,8 +89,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
91 struct drm_nouveau_private *dev_priv = dev->dev_private; 89 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t fc = NV10_RAMFC(chid), tmp; 90 uint32_t fc = NV10_RAMFC(chid), tmp;
93 91
94 dev_priv->engine.instmem.prepare_access(dev, false);
95
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); 92 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
97 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); 93 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
98 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); 94 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -117,8 +113,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
117 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48)); 113 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
118 114
119out: 115out:
120 dev_priv->engine.instmem.finish_access(dev);
121
122 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 116 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
123 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); 117 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
124} 118}
@@ -155,8 +149,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
155 return 0; 149 return 0;
156 fc = NV10_RAMFC(chid); 150 fc = NV10_RAMFC(chid);
157 151
158 dev_priv->engine.instmem.prepare_access(dev, true);
159
160 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); 152 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
161 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); 153 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
162 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); 154 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -179,8 +171,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
179 nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); 171 nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
180 172
181out: 173out:
182 dev_priv->engine.instmem.finish_access(dev);
183
184 nv10_fifo_do_load_context(dev, pfifo->channels - 1); 174 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
185 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); 175 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
186 return 0; 176 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index fe2349b115f0..f3e6dd70d22e 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -421,7 +421,6 @@ nv20_graph_create_context(struct nouveau_channel *chan)
421 return ret; 421 return ret;
422 422
423 /* Initialise default context values */ 423 /* Initialise default context values */
424 dev_priv->engine.instmem.prepare_access(dev, true);
425 ctx_init(dev, chan->ramin_grctx->gpuobj); 424 ctx_init(dev, chan->ramin_grctx->gpuobj);
426 425
427 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ 426 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
@@ -430,8 +429,6 @@ nv20_graph_create_context(struct nouveau_channel *chan)
430 429
431 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 430 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
432 chan->ramin_grctx->instance >> 4); 431 chan->ramin_grctx->instance >> 4);
433
434 dev_priv->engine.instmem.finish_access(dev);
435 return 0; 432 return 0;
436} 433}
437 434
@@ -444,9 +441,7 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
444 if (chan->ramin_grctx) 441 if (chan->ramin_grctx)
445 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); 442 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
446 443
447 dev_priv->engine.instmem.prepare_access(dev, true);
448 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0); 444 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
449 dev_priv->engine.instmem.finish_access(dev);
450} 445}
451 446
452int 447int
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 500ccfd3a0b8..2b67f1835c39 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -48,7 +48,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
48 48
49 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 49 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
50 50
51 dev_priv->engine.instmem.prepare_access(dev, true);
52 nv_wi32(dev, fc + 0, chan->pushbuf_base); 51 nv_wi32(dev, fc + 0, chan->pushbuf_base);
53 nv_wi32(dev, fc + 4, chan->pushbuf_base); 52 nv_wi32(dev, fc + 4, chan->pushbuf_base);
54 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); 53 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -61,7 +60,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
61 0x30000000 /* no idea.. */); 60 0x30000000 /* no idea.. */);
62 nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); 61 nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
63 nv_wi32(dev, fc + 60, 0x0001FFFF); 62 nv_wi32(dev, fc + 60, 0x0001FFFF);
64 dev_priv->engine.instmem.finish_access(dev);
65 63
66 /* enable the fifo dma operation */ 64 /* enable the fifo dma operation */
67 nv_wr32(dev, NV04_PFIFO_MODE, 65 nv_wr32(dev, NV04_PFIFO_MODE,
@@ -89,8 +87,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
89 struct drm_nouveau_private *dev_priv = dev->dev_private; 87 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; 88 uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
91 89
92 dev_priv->engine.instmem.prepare_access(dev, false);
93
94 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); 90 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
95 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); 91 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
96 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); 92 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -127,8 +123,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
127 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); 123 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
128 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); 124 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
129 125
130 dev_priv->engine.instmem.finish_access(dev);
131
132 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 126 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
133 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); 127 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
134} 128}
@@ -166,7 +160,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
166 return 0; 160 return 0;
167 fc = NV40_RAMFC(chid); 161 fc = NV40_RAMFC(chid);
168 162
169 dev_priv->engine.instmem.prepare_access(dev, true);
170 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); 163 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
171 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); 164 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
172 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); 165 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -200,7 +193,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
200 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); 193 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
201 nv_wi32(dev, fc + 72, tmp); 194 nv_wi32(dev, fc + 72, tmp);
202#endif 195#endif
203 dev_priv->engine.instmem.finish_access(dev);
204 196
205 nv40_fifo_do_load_context(dev, pfifo->channels - 1); 197 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
206 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 198 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 65b13b54c5ae..2608c34eca82 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -67,7 +67,6 @@ nv40_graph_create_context(struct nouveau_channel *chan)
67 return ret; 67 return ret;
68 68
69 /* Initialise default context values */ 69 /* Initialise default context values */
70 dev_priv->engine.instmem.prepare_access(dev, true);
71 if (!pgraph->ctxprog) { 70 if (!pgraph->ctxprog) {
72 struct nouveau_grctx ctx = {}; 71 struct nouveau_grctx ctx = {};
73 72
@@ -80,7 +79,6 @@ nv40_graph_create_context(struct nouveau_channel *chan)
80 } 79 }
81 nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, 80 nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
82 chan->ramin_grctx->gpuobj->im_pramin->start); 81 chan->ramin_grctx->gpuobj->im_pramin->start);
83 dev_priv->engine.instmem.finish_access(dev);
84 return 0; 82 return 0;
85} 83}
86 84
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 711128c42de8..6a293c818b61 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -71,14 +71,13 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
71 return ret; 71 return ret;
72 } 72 }
73 73
74 dev_priv->engine.instmem.prepare_access(dev, true);
75 nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); 74 nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
76 nv_wo32(dev, obj, 1, limit); 75 nv_wo32(dev, obj, 1, limit);
77 nv_wo32(dev, obj, 2, offset); 76 nv_wo32(dev, obj, 2, offset);
78 nv_wo32(dev, obj, 3, 0x00000000); 77 nv_wo32(dev, obj, 3, 0x00000000);
79 nv_wo32(dev, obj, 4, 0x00000000); 78 nv_wo32(dev, obj, 4, 0x00000000);
80 nv_wo32(dev, obj, 5, 0x00010000); 79 nv_wo32(dev, obj, 5, 0x00010000);
81 dev_priv->engine.instmem.finish_access(dev); 80 dev_priv->engine.instmem.flush(dev);
82 81
83 return 0; 82 return 0;
84} 83}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index e20c0e2474f3..d2d4fd0044f8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -49,12 +49,11 @@ nv50_fifo_init_thingo(struct drm_device *dev)
49 priv->cur_thingo = !priv->cur_thingo; 49 priv->cur_thingo = !priv->cur_thingo;
50 50
51 /* We never schedule channel 0 or 127 */ 51 /* We never schedule channel 0 or 127 */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 1, nr = 0; i < 127; i++) { 52 for (i = 1, nr = 0; i < 127; i++) {
54 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) 53 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
55 nv_wo32(dev, cur->gpuobj, nr++, i); 54 nv_wo32(dev, cur->gpuobj, nr++, i);
56 } 55 }
57 dev_priv->engine.instmem.finish_access(dev); 56 dev_priv->engine.instmem.flush(dev);
58 57
59 nv_wr32(dev, 0x32f4, cur->instance >> 12); 58 nv_wr32(dev, 0x32f4, cur->instance >> 12);
60 nv_wr32(dev, 0x32ec, nr); 59 nv_wr32(dev, 0x32ec, nr);
@@ -281,8 +280,6 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
281 280
282 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 281 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
283 282
284 dev_priv->engine.instmem.prepare_access(dev, true);
285
286 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); 283 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
287 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); 284 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); 285 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
@@ -304,7 +301,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
304 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); 301 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
305 } 302 }
306 303
307 dev_priv->engine.instmem.finish_access(dev); 304 dev_priv->engine.instmem.flush(dev);
308 305
309 ret = nv50_fifo_channel_enable(dev, chan->id, false); 306 ret = nv50_fifo_channel_enable(dev, chan->id, false);
310 if (ret) { 307 if (ret) {
@@ -349,8 +346,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
349 346
350 NV_DEBUG(dev, "ch%d\n", chan->id); 347 NV_DEBUG(dev, "ch%d\n", chan->id);
351 348
352 dev_priv->engine.instmem.prepare_access(dev, false);
353
354 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); 349 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
355 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); 350 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
356 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); 351 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
@@ -404,8 +399,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
404 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); 399 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
405 } 400 }
406 401
407 dev_priv->engine.instmem.finish_access(dev);
408
409 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); 402 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
410 return 0; 403 return 0;
411} 404}
@@ -434,8 +427,6 @@ nv50_fifo_unload_context(struct drm_device *dev)
434 ramfc = chan->ramfc->gpuobj; 427 ramfc = chan->ramfc->gpuobj;
435 cache = chan->cache->gpuobj; 428 cache = chan->cache->gpuobj;
436 429
437 dev_priv->engine.instmem.prepare_access(dev, true);
438
439 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); 430 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
440 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); 431 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
441 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); 432 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
@@ -491,7 +482,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
491 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); 482 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
492 } 483 }
493 484
494 dev_priv->engine.instmem.finish_access(dev); 485 dev_priv->engine.instmem.flush(dev);
495 486
496 /*XXX: probably reload ch127 (NULL) state back too */ 487 /*XXX: probably reload ch127 (NULL) state back too */
497 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); 488 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index b04e7c8449a5..5dc3be5696a9 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -226,7 +226,6 @@ nv50_graph_create_context(struct nouveau_channel *chan)
226 obj = chan->ramin_grctx->gpuobj; 226 obj = chan->ramin_grctx->gpuobj;
227 227
228 hdr = IS_G80 ? 0x200 : 0x20; 228 hdr = IS_G80 ? 0x200 : 0x20;
229 dev_priv->engine.instmem.prepare_access(dev, true);
230 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); 229 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
231 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + 230 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
232 pgraph->grctx_size - 1); 231 pgraph->grctx_size - 1);
@@ -234,9 +233,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
234 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); 233 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
235 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); 234 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
236 nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); 235 nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
237 dev_priv->engine.instmem.finish_access(dev);
238 236
239 dev_priv->engine.instmem.prepare_access(dev, true);
240 if (!pgraph->ctxprog) { 237 if (!pgraph->ctxprog) {
241 struct nouveau_grctx ctx = {}; 238 struct nouveau_grctx ctx = {};
242 ctx.dev = chan->dev; 239 ctx.dev = chan->dev;
@@ -247,8 +244,8 @@ nv50_graph_create_context(struct nouveau_channel *chan)
247 nouveau_grctx_vals_load(dev, obj); 244 nouveau_grctx_vals_load(dev, obj);
248 } 245 }
249 nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); 246 nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
250 dev_priv->engine.instmem.finish_access(dev);
251 247
248 dev_priv->engine.instmem.flush(dev);
252 return 0; 249 return 0;
253} 250}
254 251
@@ -264,10 +261,9 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
264 if (!chan->ramin || !chan->ramin->gpuobj) 261 if (!chan->ramin || !chan->ramin->gpuobj)
265 return; 262 return;
266 263
267 dev_priv->engine.instmem.prepare_access(dev, true);
268 for (i = hdr; i < hdr + 24; i += 4) 264 for (i = hdr; i < hdr + 24; i += 4)
269 nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); 265 nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
270 dev_priv->engine.instmem.finish_access(dev); 266 dev_priv->engine.instmem.flush(dev);
271 267
272 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); 268 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
273} 269}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a361d1612bd7..d9feee3b9f58 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -35,8 +35,6 @@ struct nv50_instmem_priv {
35 struct nouveau_gpuobj_ref *pramin_pt; 35 struct nouveau_gpuobj_ref *pramin_pt;
36 struct nouveau_gpuobj_ref *pramin_bar; 36 struct nouveau_gpuobj_ref *pramin_bar;
37 struct nouveau_gpuobj_ref *fb_bar; 37 struct nouveau_gpuobj_ref *fb_bar;
38
39 bool last_access_wr;
40}; 38};
41 39
42#define NV50_INSTMEM_PAGE_SHIFT 12 40#define NV50_INSTMEM_PAGE_SHIFT 12
@@ -262,16 +260,13 @@ nv50_instmem_init(struct drm_device *dev)
262 260
263 /* Assume that praying isn't enough, check that we can re-read the 261 /* Assume that praying isn't enough, check that we can re-read the
264 * entire fake channel back from the PRAMIN BAR */ 262 * entire fake channel back from the PRAMIN BAR */
265 dev_priv->engine.instmem.prepare_access(dev, false);
266 for (i = 0; i < c_size; i += 4) { 263 for (i = 0; i < c_size; i += 4) {
267 if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) { 264 if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
268 NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n", 265 NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
269 i); 266 i);
270 dev_priv->engine.instmem.finish_access(dev);
271 return -EINVAL; 267 return -EINVAL;
272 } 268 }
273 } 269 }
274 dev_priv->engine.instmem.finish_access(dev);
275 270
276 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); 271 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
277 272
@@ -451,13 +446,12 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
451 vram |= 0x30; 446 vram |= 0x30;
452 } 447 }
453 448
454 dev_priv->engine.instmem.prepare_access(dev, true);
455 while (pte < pte_end) { 449 while (pte < pte_end) {
456 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); 450 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
457 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); 451 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
458 vram += NV50_INSTMEM_PAGE_SIZE; 452 vram += NV50_INSTMEM_PAGE_SIZE;
459 } 453 }
460 dev_priv->engine.instmem.finish_access(dev); 454 dev_priv->engine.instmem.flush(dev);
461 455
462 nv_wr32(dev, 0x100c80, 0x00040001); 456 nv_wr32(dev, 0x100c80, 0x00040001);
463 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 457 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
@@ -490,36 +484,21 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
490 pte = (gpuobj->im_pramin->start >> 12) << 1; 484 pte = (gpuobj->im_pramin->start >> 12) << 1;
491 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; 485 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
492 486
493 dev_priv->engine.instmem.prepare_access(dev, true);
494 while (pte < pte_end) { 487 while (pte < pte_end) {
495 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); 488 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
496 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); 489 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
497 } 490 }
498 dev_priv->engine.instmem.finish_access(dev); 491 dev_priv->engine.instmem.flush(dev);
499 492
500 gpuobj->im_bound = 0; 493 gpuobj->im_bound = 0;
501 return 0; 494 return 0;
502} 495}
503 496
504void 497void
505nv50_instmem_prepare_access(struct drm_device *dev, bool write) 498nv50_instmem_flush(struct drm_device *dev)
506{
507 struct drm_nouveau_private *dev_priv = dev->dev_private;
508 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
509
510 priv->last_access_wr = write;
511}
512
513void
514nv50_instmem_finish_access(struct drm_device *dev)
515{ 499{
516 struct drm_nouveau_private *dev_priv = dev->dev_private; 500 nv_wr32(dev, 0x070000, 0x00000001);
517 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 501 if (!nv_wait(0x070000, 0x00000001, 0x00000000))
518 502 NV_ERROR(dev, "PRAMIN flush timeout\n");
519 if (priv->last_access_wr) {
520 nv_wr32(dev, 0x070000, 0x00000001);
521 if (!nv_wait(0x070000, 0x00000001, 0x00000000))
522 NV_ERROR(dev, "PRAMIN flush timeout\n");
523 }
524} 503}
525 504