aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-07-07 21:29:10 -0400
committerBen Skeggs <bskeggs@redhat.com>2010-07-12 20:13:40 -0400
commitf56cb86f9abd229418f894a8ffedfb9ff465c181 (patch)
treeb29420af53d096e49ff573790c6161ed71599858 /drivers/gpu/drm/nouveau/nouveau_sgdma.c
parent2107cce3056dccf37ae5cbfc95df348959b2c717 (diff)
drm/nouveau: add instmem flush() hook
This removes the previous prepare_access() and finish_access() hooks, and replaces it with a much simpler flush() hook. All the chipset-specific code before nv50 has its use removed completely, as it's not required there at all. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 1d6ee8b55154..1b2ab5a714ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
97 97
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); 98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99 99
100 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
101 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); 100 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
102 nvbe->pte_start = pte; 101 nvbe->pte_start = pte;
103 for (i = 0; i < nvbe->nr_pages; i++) { 102 for (i = 0; i < nvbe->nr_pages; i++) {
@@ -116,7 +115,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
116 dma_offset += NV_CTXDMA_PAGE_SIZE; 115 dma_offset += NV_CTXDMA_PAGE_SIZE;
117 } 116 }
118 } 117 }
119 dev_priv->engine.instmem.finish_access(nvbe->dev); 118 dev_priv->engine.instmem.flush(nvbe->dev);
120 119
121 if (dev_priv->card_type == NV_50) { 120 if (dev_priv->card_type == NV_50) {
122 nv_wr32(dev, 0x100c80, 0x00050001); 121 nv_wr32(dev, 0x100c80, 0x00050001);
@@ -154,7 +153,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
154 if (!nvbe->bound) 153 if (!nvbe->bound)
155 return 0; 154 return 0;
156 155
157 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
158 pte = nvbe->pte_start; 156 pte = nvbe->pte_start;
159 for (i = 0; i < nvbe->nr_pages; i++) { 157 for (i = 0; i < nvbe->nr_pages; i++) {
160 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; 158 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
@@ -170,7 +168,7 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
170 dma_offset += NV_CTXDMA_PAGE_SIZE; 168 dma_offset += NV_CTXDMA_PAGE_SIZE;
171 } 169 }
172 } 170 }
173 dev_priv->engine.instmem.finish_access(nvbe->dev); 171 dev_priv->engine.instmem.flush(nvbe->dev);
174 172
175 if (dev_priv->card_type == NV_50) { 173 if (dev_priv->card_type == NV_50) {
176 nv_wr32(dev, 0x100c80, 0x00050001); 174 nv_wr32(dev, 0x100c80, 0x00050001);
@@ -272,7 +270,6 @@ nouveau_sgdma_init(struct drm_device *dev)
272 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, 270 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
273 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 271 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
274 272
275 dev_priv->engine.instmem.prepare_access(dev, true);
276 if (dev_priv->card_type < NV_50) { 273 if (dev_priv->card_type < NV_50) {
277 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and 274 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
278 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE 275 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
@@ -294,7 +291,7 @@ nouveau_sgdma_init(struct drm_device *dev)
294 nv_wo32(dev, gpuobj, (i+4)/4, 0); 291 nv_wo32(dev, gpuobj, (i+4)/4, 0);
295 } 292 }
296 } 293 }
297 dev_priv->engine.instmem.finish_access(dev); 294 dev_priv->engine.instmem.flush(dev);
298 295
299 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; 296 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
300 dev_priv->gart_info.aper_base = 0; 297 dev_priv->gart_info.aper_base = 0;
@@ -325,14 +322,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
325{ 322{
326 struct drm_nouveau_private *dev_priv = dev->dev_private; 323 struct drm_nouveau_private *dev_priv = dev->dev_private;
327 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 324 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
328 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
329 int pte; 325 int pte;
330 326
331 pte = (offset >> NV_CTXDMA_PAGE_SHIFT); 327 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
332 if (dev_priv->card_type < NV_50) { 328 if (dev_priv->card_type < NV_50) {
333 instmem->prepare_access(dev, false);
334 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; 329 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
335 instmem->finish_access(dev);
336 return 0; 330 return 0;
337 } 331 }
338 332