aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/gma500
diff options
context:
space:
mode:
authorPatrik Jakobsson <patrik.r.jakobsson@gmail.com>2014-01-04 16:11:17 -0500
committerPatrik Jakobsson <patrik.r.jakobsson@gmail.com>2014-03-17 15:11:53 -0400
commitae012bdc5799aafe88798f864bc05e90778229af (patch)
treef0b8360d13c718d2f3180e6d1c516405c25c4812 /drivers/gpu/drm/gma500
parent1c6b5d17d6ed124afd55027a72d64b6f6eca501e (diff)
drm/gma500: Hook up the MMU
Properly init the MMU and add MMU entries when adding GTT entries Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/gma500')
-rw-r--r--drivers/gpu/drm/gma500/gtt.c41
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c27
2 files changed, 55 insertions, 13 deletions
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 2db731f00930..a30f6ee1f407 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -22,6 +22,7 @@
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <linux/shmem_fs.h> 23#include <linux/shmem_fs.h>
24#include "psb_drv.h" 24#include "psb_drv.h"
25#include "blitter.h"
25 26
26 27
27/* 28/*
@@ -105,11 +106,13 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
105 106
106 /* Write our page entries into the GTT itself */ 107 /* Write our page entries into the GTT itself */
107 for (i = r->roll; i < r->npage; i++) { 108 for (i = r->roll; i < r->npage; i++) {
108 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
110 PSB_MMU_CACHED_MEMORY);
109 iowrite32(pte, gtt_slot++); 111 iowrite32(pte, gtt_slot++);
110 } 112 }
111 for (i = 0; i < r->roll; i++) { 113 for (i = 0; i < r->roll; i++) {
112 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
115 PSB_MMU_CACHED_MEMORY);
113 iowrite32(pte, gtt_slot++); 116 iowrite32(pte, gtt_slot++);
114 } 117 }
115 /* Make sure all the entries are set before we return */ 118 /* Make sure all the entries are set before we return */
@@ -127,7 +130,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
127 * page table entries with the dummy page. This is protected via the gtt 130 * page table entries with the dummy page. This is protected via the gtt
128 * mutex which the caller must hold. 131 * mutex which the caller must hold.
129 */ 132 */
130static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) 133void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
131{ 134{
132 struct drm_psb_private *dev_priv = dev->dev_private; 135 struct drm_psb_private *dev_priv = dev->dev_private;
133 u32 __iomem *gtt_slot; 136 u32 __iomem *gtt_slot;
@@ -137,7 +140,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
137 WARN_ON(r->stolen); 140 WARN_ON(r->stolen);
138 141
139 gtt_slot = psb_gtt_entry(dev, r); 142 gtt_slot = psb_gtt_entry(dev, r);
140 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0); 143 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
144 PSB_MMU_CACHED_MEMORY);
141 145
142 for (i = 0; i < r->npage; i++) 146 for (i = 0; i < r->npage; i++)
143 iowrite32(pte, gtt_slot++); 147 iowrite32(pte, gtt_slot++);
@@ -176,11 +180,13 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
176 gtt_slot = psb_gtt_entry(dev, r); 180 gtt_slot = psb_gtt_entry(dev, r);
177 181
178 for (i = r->roll; i < r->npage; i++) { 182 for (i = r->roll; i < r->npage; i++) {
179 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
184 PSB_MMU_CACHED_MEMORY);
180 iowrite32(pte, gtt_slot++); 185 iowrite32(pte, gtt_slot++);
181 } 186 }
182 for (i = 0; i < r->roll; i++) { 187 for (i = 0; i < r->roll; i++) {
183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); 188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
189 PSB_MMU_CACHED_MEMORY);
184 iowrite32(pte, gtt_slot++); 190 iowrite32(pte, gtt_slot++);
185 } 191 }
186 ioread32(gtt_slot - 1); 192 ioread32(gtt_slot - 1);
@@ -240,6 +246,7 @@ int psb_gtt_pin(struct gtt_range *gt)
240 int ret = 0; 246 int ret = 0;
241 struct drm_device *dev = gt->gem.dev; 247 struct drm_device *dev = gt->gem.dev;
242 struct drm_psb_private *dev_priv = dev->dev_private; 248 struct drm_psb_private *dev_priv = dev->dev_private;
249 u32 gpu_base = dev_priv->gtt.gatt_start;
243 250
244 mutex_lock(&dev_priv->gtt_mutex); 251 mutex_lock(&dev_priv->gtt_mutex);
245 252
@@ -252,6 +259,9 @@ int psb_gtt_pin(struct gtt_range *gt)
252 psb_gtt_detach_pages(gt); 259 psb_gtt_detach_pages(gt);
253 goto out; 260 goto out;
254 } 261 }
262 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
263 gt->pages, (gpu_base + gt->offset),
264 gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
255 } 265 }
256 gt->in_gart++; 266 gt->in_gart++;
257out: 267out:
@@ -274,16 +284,30 @@ void psb_gtt_unpin(struct gtt_range *gt)
274{ 284{
275 struct drm_device *dev = gt->gem.dev; 285 struct drm_device *dev = gt->gem.dev;
276 struct drm_psb_private *dev_priv = dev->dev_private; 286 struct drm_psb_private *dev_priv = dev->dev_private;
287 u32 gpu_base = dev_priv->gtt.gatt_start;
288 int ret;
277 289
290 /* While holding the gtt_mutex no new blits can be initiated */
278 mutex_lock(&dev_priv->gtt_mutex); 291 mutex_lock(&dev_priv->gtt_mutex);
279 292
293 /* Wait for any possible usage of the memory to be finished */
294 ret = gma_blt_wait_idle(dev_priv);
295 if (ret) {
296 DRM_ERROR("Failed to idle the blitter, unpin failed!");
297 goto out;
298 }
299
280 WARN_ON(!gt->in_gart); 300 WARN_ON(!gt->in_gart);
281 301
282 gt->in_gart--; 302 gt->in_gart--;
283 if (gt->in_gart == 0 && gt->stolen == 0) { 303 if (gt->in_gart == 0 && gt->stolen == 0) {
304 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
305 (gpu_base + gt->offset), gt->npage, 0, 0);
284 psb_gtt_remove(dev, gt); 306 psb_gtt_remove(dev, gt);
285 psb_gtt_detach_pages(gt); 307 psb_gtt_detach_pages(gt);
286 } 308 }
309
310out:
287 mutex_unlock(&dev_priv->gtt_mutex); 311 mutex_unlock(&dev_priv->gtt_mutex);
288} 312}
289 313
@@ -497,6 +521,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
497 if (!resume) 521 if (!resume)
498 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, 522 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
499 stolen_size); 523 stolen_size);
524
500 if (!dev_priv->vram_addr) { 525 if (!dev_priv->vram_addr) {
501 dev_err(dev->dev, "Failure to map stolen base.\n"); 526 dev_err(dev->dev, "Failure to map stolen base.\n");
502 ret = -ENOMEM; 527 ret = -ENOMEM;
@@ -512,7 +537,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
512 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", 537 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
513 num_pages, pfn_base << PAGE_SHIFT, 0); 538 num_pages, pfn_base << PAGE_SHIFT, 0);
514 for (i = 0; i < num_pages; ++i) { 539 for (i = 0; i < num_pages; ++i) {
515 pte = psb_gtt_mask_pte(pfn_base + i, 0); 540 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
516 iowrite32(pte, dev_priv->gtt_map + i); 541 iowrite32(pte, dev_priv->gtt_map + i);
517 } 542 }
518 543
@@ -521,7 +546,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
521 */ 546 */
522 547
523 pfn_base = page_to_pfn(dev_priv->scratch_page); 548 pfn_base = page_to_pfn(dev_priv->scratch_page);
524 pte = psb_gtt_mask_pte(pfn_base, 0); 549 pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
525 for (; i < gtt_pages; ++i) 550 for (; i < gtt_pages; ++i)
526 iowrite32(pte, dev_priv->gtt_map + i); 551 iowrite32(pte, dev_priv->gtt_map + i);
527 552
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 55eef4d6cef8..89804fddb852 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -192,12 +192,18 @@ static int psb_do_init(struct drm_device *dev)
192 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); 192 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
193 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); 193 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
194 PSB_RSGX32(PSB_CR_BIF_BANK1); 194 PSB_RSGX32(PSB_CR_BIF_BANK1);
195 PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK, 195
196 PSB_CR_BIF_CTRL); 196 /* Do not bypass any MMU access, let them pagefault instead */
197 PSB_WSGX32((PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_MMU_ER_MASK),
198 PSB_CR_BIF_CTRL);
199 PSB_RSGX32(PSB_CR_BIF_CTRL);
200
197 psb_spank(dev_priv); 201 psb_spank(dev_priv);
198 202
199 /* mmu_gatt ?? */ 203 /* mmu_gatt ?? */
200 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); 204 PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
205 PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); /* Post */
206
201 return 0; 207 return 0;
202out_err: 208out_err:
203 return ret; 209 return ret;
@@ -277,6 +283,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
277 int ret = -ENOMEM; 283 int ret = -ENOMEM;
278 struct drm_connector *connector; 284 struct drm_connector *connector;
279 struct gma_encoder *gma_encoder; 285 struct gma_encoder *gma_encoder;
286 struct psb_gtt *pg;
280 287
281 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 288 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
282 if (dev_priv == NULL) 289 if (dev_priv == NULL)
@@ -286,6 +293,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
286 dev_priv->dev = dev; 293 dev_priv->dev = dev;
287 dev->dev_private = (void *) dev_priv; 294 dev->dev_private = (void *) dev_priv;
288 295
296 pg = &dev_priv->gtt;
297
289 pci_set_master(dev->pdev); 298 pci_set_master(dev->pdev);
290 299
291 dev_priv->num_pipe = dev_priv->ops->pipes; 300 dev_priv->num_pipe = dev_priv->ops->pipes;
@@ -355,13 +364,21 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
355 if (!dev_priv->pf_pd) 364 if (!dev_priv->pf_pd)
356 goto out_err; 365 goto out_err;
357 366
358 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
359 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
360
361 ret = psb_do_init(dev); 367 ret = psb_do_init(dev);
362 if (ret) 368 if (ret)
363 return ret; 369 return ret;
364 370
371 /* Add stolen memory to SGX MMU */
372 down_read(&pg->sem);
373 ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
374 dev_priv->stolen_base >> PAGE_SHIFT,
375 pg->gatt_start,
376 pg->stolen_size >> PAGE_SHIFT, 0);
377 up_read(&pg->sem);
378
379 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
380 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
381
365 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE); 382 PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
366 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE); 383 PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
367 384