diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 38 |
2 files changed, 3 insertions, 37 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e1619c674380..d76d2c09049d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -672,8 +672,6 @@ struct drm_nouveau_private { | |||
672 | uint64_t aper_free; | 672 | uint64_t aper_free; |
673 | 673 | ||
674 | struct nouveau_gpuobj *sg_ctxdma; | 674 | struct nouveau_gpuobj *sg_ctxdma; |
675 | struct page *sg_dummy_page; | ||
676 | dma_addr_t sg_dummy_bus; | ||
677 | } gart_info; | 675 | } gart_info; |
678 | 676 | ||
679 | /* nv10-nv40 tiling regions */ | 677 | /* nv10-nv40 tiling regions */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index d4ac97007038..54af7608d45c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -144,19 +144,15 @@ nouveau_sgdma_unbind(struct ttm_backend *be) | |||
144 | 144 | ||
145 | pte = nvbe->pte_start; | 145 | pte = nvbe->pte_start; |
146 | for (i = 0; i < nvbe->nr_pages; i++) { | 146 | for (i = 0; i < nvbe->nr_pages; i++) { |
147 | dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; | ||
148 | |||
149 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { | 147 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { |
150 | if (dev_priv->card_type < NV_50) { | 148 | if (dev_priv->card_type < NV_50) { |
151 | nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3); | 149 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); |
152 | pte += 1; | 150 | pte += 1; |
153 | } else { | 151 | } else { |
154 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); | 152 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); |
155 | nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); | 153 | nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); |
156 | pte += 2; | 154 | pte += 2; |
157 | } | 155 | } |
158 | |||
159 | dma_offset += NV_CTXDMA_PAGE_SIZE; | ||
160 | } | 156 | } |
161 | } | 157 | } |
162 | dev_priv->engine.instmem.flush(nvbe->dev); | 158 | dev_priv->engine.instmem.flush(nvbe->dev); |
@@ -218,7 +214,6 @@ int | |||
218 | nouveau_sgdma_init(struct drm_device *dev) | 214 | nouveau_sgdma_init(struct drm_device *dev) |
219 | { | 215 | { |
220 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 216 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
221 | struct pci_dev *pdev = dev->pdev; | ||
222 | struct nouveau_gpuobj *gpuobj = NULL; | 217 | struct nouveau_gpuobj *gpuobj = NULL; |
223 | uint32_t aper_size, obj_size; | 218 | uint32_t aper_size, obj_size; |
224 | int i, ret; | 219 | int i, ret; |
@@ -245,22 +240,6 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
245 | return ret; | 240 | return ret; |
246 | } | 241 | } |
247 | 242 | ||
248 | dev_priv->gart_info.sg_dummy_page = | ||
249 | alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO); | ||
250 | if (!dev_priv->gart_info.sg_dummy_page) { | ||
251 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
252 | return -ENOMEM; | ||
253 | } | ||
254 | |||
255 | set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); | ||
256 | dev_priv->gart_info.sg_dummy_bus = | ||
257 | pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, | ||
258 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
259 | if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { | ||
260 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
261 | return -EFAULT; | ||
262 | } | ||
263 | |||
264 | if (dev_priv->card_type < NV_50) { | 243 | if (dev_priv->card_type < NV_50) { |
265 | /* special case, allocated from global instmem heap so | 244 | /* special case, allocated from global instmem heap so |
266 | * cinst is invalid, we use it on all channels though so | 245 | * cinst is invalid, we use it on all channels though so |
@@ -277,10 +256,8 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
277 | (NV_DMA_ACCESS_RW << 14) | | 256 | (NV_DMA_ACCESS_RW << 14) | |
278 | (NV_DMA_TARGET_PCI << 16)); | 257 | (NV_DMA_TARGET_PCI << 16)); |
279 | nv_wo32(gpuobj, 4, aper_size - 1); | 258 | nv_wo32(gpuobj, 4, aper_size - 1); |
280 | for (i = 2; i < 2 + (aper_size >> 12); i++) { | 259 | for (i = 2; i < 2 + (aper_size >> 12); i++) |
281 | nv_wo32(gpuobj, i * 4, | 260 | nv_wo32(gpuobj, i * 4, 0x00000000); |
282 | dev_priv->gart_info.sg_dummy_bus | 3); | ||
283 | } | ||
284 | } else { | 261 | } else { |
285 | for (i = 0; i < obj_size; i += 8) { | 262 | for (i = 0; i < obj_size; i += 8) { |
286 | nv_wo32(gpuobj, i + 0, 0x00000000); | 263 | nv_wo32(gpuobj, i + 0, 0x00000000); |
@@ -301,15 +278,6 @@ nouveau_sgdma_takedown(struct drm_device *dev) | |||
301 | { | 278 | { |
302 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 279 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
303 | 280 | ||
304 | if (dev_priv->gart_info.sg_dummy_page) { | ||
305 | pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, | ||
306 | NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
307 | unlock_page(dev_priv->gart_info.sg_dummy_page); | ||
308 | __free_page(dev_priv->gart_info.sg_dummy_page); | ||
309 | dev_priv->gart_info.sg_dummy_page = NULL; | ||
310 | dev_priv->gart_info.sg_dummy_bus = 0; | ||
311 | } | ||
312 | |||
313 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); | 281 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); |
314 | } | 282 | } |
315 | 283 | ||