aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/gma500
diff options
context:
space:
mode:
authorPatrik Jakobsson <patrik.r.jakobsson@gmail.com>2014-01-01 19:27:00 -0500
committerPatrik Jakobsson <patrik.r.jakobsson@gmail.com>2014-03-17 15:04:01 -0400
commitb219372dff810fec82c7671b93e1f8dc05e10af4 (patch)
tree8cf2d0e87e0c54e6f324f14081e68c9f6020169f /drivers/gpu/drm/gma500
parent786a7828bc74b9b1466e83abb200b75f80f94121 (diff)
drm/gma500: Make SGX MMU driver actually do something
Old MMU code never wrote PDs or PTEs to any registers. Now we do, and that's a good start. Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/gma500')
-rw-r--r--drivers/gpu/drm/gma500/mmu.c258
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h10
3 files changed, 138 insertions, 134 deletions
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 49bac41beefb..0bfc9f7b343b 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -59,15 +59,14 @@ struct psb_mmu_driver {
59 spinlock_t lock; 59 spinlock_t lock;
60 60
61 atomic_t needs_tlbflush; 61 atomic_t needs_tlbflush;
62 62 atomic_t *msvdx_mmu_invaldc;
63 uint8_t __iomem *register_map;
64 struct psb_mmu_pd *default_pd; 63 struct psb_mmu_pd *default_pd;
65 /*uint32_t bif_ctrl;*/ 64 uint32_t bif_ctrl;
66 int has_clflush; 65 int has_clflush;
67 int clflush_add; 66 int clflush_add;
68 unsigned long clflush_mask; 67 unsigned long clflush_mask;
69 68
70 struct drm_psb_private *dev_priv; 69 struct drm_device *dev;
71}; 70};
72 71
73struct psb_mmu_pd; 72struct psb_mmu_pd;
@@ -102,13 +101,13 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
102 return offset >> PSB_PDE_SHIFT; 101 return offset >> PSB_PDE_SHIFT;
103} 102}
104 103
104#if defined(CONFIG_X86)
105static inline void psb_clflush(void *addr) 105static inline void psb_clflush(void *addr)
106{ 106{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); 107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108} 108}
109 109
110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, 110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
111 void *addr)
112{ 111{
113 if (!driver->has_clflush) 112 if (!driver->has_clflush)
114 return; 113 return;
@@ -117,62 +116,77 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
117 psb_clflush(addr); 116 psb_clflush(addr);
118 mb(); 117 mb();
119} 118}
119#else
120 120
121static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) 121static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
122{ 122{;
123 uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
124 uint32_t clflush_count = PAGE_SIZE / clflush_add;
125 int i;
126 uint8_t *clf;
127
128 clf = kmap_atomic(page);
129 mb();
130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf);
132 clf += clflush_add;
133 }
134 mb();
135 kunmap_atomic(clf);
136} 123}
137 124
138static void psb_pages_clflush(struct psb_mmu_driver *driver, 125#endif
139 struct page *page[], unsigned long num_pages)
140{
141 int i;
142
143 if (!driver->has_clflush)
144 return ;
145
146 for (i = 0; i < num_pages; i++)
147 psb_page_clflush(driver, *page++);
148}
149 126
150static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, 127static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
151 int force)
152{ 128{
129 struct drm_device *dev = driver->dev;
130 struct drm_psb_private *dev_priv = dev->dev_private;
131
132 if (atomic_read(&driver->needs_tlbflush) || force) {
133 uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
134 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
135
136 /* Make sure data cache is turned off before enabling it */
137 wmb();
138 PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
139 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
140 if (driver->msvdx_mmu_invaldc)
141 atomic_set(driver->msvdx_mmu_invaldc, 1);
142 }
153 atomic_set(&driver->needs_tlbflush, 0); 143 atomic_set(&driver->needs_tlbflush, 0);
154} 144}
155 145
146#if 0
156static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) 147static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
157{ 148{
158 down_write(&driver->sem); 149 down_write(&driver->sem);
159 psb_mmu_flush_pd_locked(driver, force); 150 psb_mmu_flush_pd_locked(driver, force);
160 up_write(&driver->sem); 151 up_write(&driver->sem);
161} 152}
153#endif
162 154
163void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) 155void psb_mmu_flush(struct psb_mmu_driver *driver)
164{ 156{
165 if (rc_prot) 157 struct drm_device *dev = driver->dev;
166 down_write(&driver->sem); 158 struct drm_psb_private *dev_priv = dev->dev_private;
167 if (rc_prot) 159 uint32_t val;
168 up_write(&driver->sem); 160
161 down_write(&driver->sem);
162 val = PSB_RSGX32(PSB_CR_BIF_CTRL);
163 if (atomic_read(&driver->needs_tlbflush))
164 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
165 else
166 PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
167
168 /* Make sure data cache is turned off and MMU is flushed before
169 restoring bank interface control register */
170 wmb();
171 PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
172 PSB_CR_BIF_CTRL);
173 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
174
175 atomic_set(&driver->needs_tlbflush, 0);
176 if (driver->msvdx_mmu_invaldc)
177 atomic_set(driver->msvdx_mmu_invaldc, 1);
178 up_write(&driver->sem);
169} 179}
170 180
171void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) 181void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
172{ 182{
173 /*ttm_tt_cache_flush(&pd->p, 1);*/ 183 struct drm_device *dev = pd->driver->dev;
174 psb_pages_clflush(pd->driver, &pd->p, 1); 184 struct drm_psb_private *dev_priv = dev->dev_private;
185 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
186 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
187
175 down_write(&pd->driver->sem); 188 down_write(&pd->driver->sem);
189 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
176 wmb(); 190 wmb();
177 psb_mmu_flush_pd_locked(pd->driver, 1); 191 psb_mmu_flush_pd_locked(pd->driver, 1);
178 pd->hw_context = hw_context; 192 pd->hw_context = hw_context;
@@ -183,7 +197,6 @@ void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
183static inline unsigned long psb_pd_addr_end(unsigned long addr, 197static inline unsigned long psb_pd_addr_end(unsigned long addr,
184 unsigned long end) 198 unsigned long end)
185{ 199{
186
187 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; 200 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
188 return (addr < end) ? addr : end; 201 return (addr < end) ? addr : end;
189} 202}
@@ -223,12 +236,10 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
223 goto out_err3; 236 goto out_err3;
224 237
225 if (!trap_pagefaults) { 238 if (!trap_pagefaults) {
226 pd->invalid_pde = 239 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
227 psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), 240 invalid_type);
228 invalid_type); 241 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
229 pd->invalid_pte = 242 invalid_type);
230 psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
231 invalid_type);
232 } else { 243 } else {
233 pd->invalid_pde = 0; 244 pd->invalid_pde = 0;
234 pd->invalid_pte = 0; 245 pd->invalid_pte = 0;
@@ -279,12 +290,16 @@ static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
279void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) 290void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
280{ 291{
281 struct psb_mmu_driver *driver = pd->driver; 292 struct psb_mmu_driver *driver = pd->driver;
293 struct drm_device *dev = driver->dev;
294 struct drm_psb_private *dev_priv = dev->dev_private;
282 struct psb_mmu_pt *pt; 295 struct psb_mmu_pt *pt;
283 int i; 296 int i;
284 297
285 down_write(&driver->sem); 298 down_write(&driver->sem);
286 if (pd->hw_context != -1) 299 if (pd->hw_context != -1) {
300 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
287 psb_mmu_flush_pd_locked(driver, 1); 301 psb_mmu_flush_pd_locked(driver, 1);
302 }
288 303
289 /* Should take the spinlock here, but we don't need to do that 304 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */ 305 since we have the semaphore in write mode. */
@@ -331,7 +346,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 346 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
332 *ptes++ = pd->invalid_pte; 347 *ptes++ = pd->invalid_pte;
333 348
334 349#if defined(CONFIG_X86)
335 if (pd->driver->has_clflush && pd->hw_context != -1) { 350 if (pd->driver->has_clflush && pd->hw_context != -1) {
336 mb(); 351 mb();
337 for (i = 0; i < clflush_count; ++i) { 352 for (i = 0; i < clflush_count; ++i) {
@@ -340,7 +355,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
340 } 355 }
341 mb(); 356 mb();
342 } 357 }
343 358#endif
344 kunmap_atomic(v); 359 kunmap_atomic(v);
345 spin_unlock(lock); 360 spin_unlock(lock);
346 361
@@ -351,7 +366,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
351 return pt; 366 return pt;
352} 367}
353 368
354static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, 369struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr) 370 unsigned long addr)
356{ 371{
357 uint32_t index = psb_mmu_pd_index(addr); 372 uint32_t index = psb_mmu_pd_index(addr);
@@ -383,7 +398,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
383 kunmap_atomic((void *) v); 398 kunmap_atomic((void *) v);
384 399
385 if (pd->hw_context != -1) { 400 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]); 401 psb_mmu_clflush(pd->driver, (void *)&v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1); 402 atomic_set(&pd->driver->needs_tlbflush, 1);
388 } 403 }
389 } 404 }
@@ -420,8 +435,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
420 pd->tables[pt->index] = NULL; 435 pd->tables[pt->index] = NULL;
421 436
422 if (pd->hw_context != -1) { 437 if (pd->hw_context != -1) {
423 psb_mmu_clflush(pd->driver, 438 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1); 439 atomic_set(&pd->driver->needs_tlbflush, 1);
426 } 440 }
427 kunmap_atomic(pt->v); 441 kunmap_atomic(pt->v);
@@ -432,8 +446,8 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
432 spin_unlock(&pd->driver->lock); 446 spin_unlock(&pd->driver->lock);
433} 447}
434 448
435static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, 449static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
436 unsigned long addr, uint32_t pte) 450 uint32_t pte)
437{ 451{
438 pt->v[psb_mmu_pt_index(addr)] = pte; 452 pt->v[psb_mmu_pt_index(addr)] = pte;
439} 453}
@@ -444,69 +458,50 @@ static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; 458 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
445} 459}
446 460
447 461struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
448void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
449 uint32_t mmu_offset, uint32_t gtt_start,
450 uint32_t gtt_pages)
451{ 462{
452 uint32_t *v; 463 struct psb_mmu_pd *pd;
453 uint32_t start = psb_mmu_pd_index(mmu_offset);
454 struct psb_mmu_driver *driver = pd->driver;
455 int num_pages = gtt_pages;
456 464
457 down_read(&driver->sem); 465 down_read(&driver->sem);
458 spin_lock(&driver->lock); 466 pd = driver->default_pd;
459 467 up_read(&driver->sem);
460 v = kmap_atomic(pd->p);
461 v += start;
462
463 while (gtt_pages--) {
464 *v++ = gtt_start | pd->pd_mask;
465 gtt_start += PAGE_SIZE;
466 }
467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v);
471 spin_unlock(&driver->lock);
472
473 if (pd->hw_context != -1)
474 atomic_set(&pd->driver->needs_tlbflush, 1);
475 468
476 up_read(&pd->driver->sem); 469 return pd;
477 psb_mmu_flush_pd(pd->driver, 0);
478} 470}
479 471
480struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) 472/* Returns the physical address of the PD shared by sgx/msvdx */
473uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
481{ 474{
482 struct psb_mmu_pd *pd; 475 struct psb_mmu_pd *pd;
483 476
484 /* down_read(&driver->sem); */ 477 pd = psb_mmu_get_default_pd(driver);
485 pd = driver->default_pd; 478 return page_to_pfn(pd->p) << PAGE_SHIFT;
486 /* up_read(&driver->sem); */
487
488 return pd;
489} 479}
490 480
491void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) 481void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
492{ 482{
483 struct drm_device *dev = driver->dev;
484 struct drm_psb_private *dev_priv = dev->dev_private;
485
486 PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
493 psb_mmu_free_pagedir(driver->default_pd); 487 psb_mmu_free_pagedir(driver->default_pd);
494 kfree(driver); 488 kfree(driver);
495} 489}
496 490
497struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, 491struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
498 int trap_pagefaults, 492 int trap_pagefaults,
499 int invalid_type, 493 int invalid_type,
500 struct drm_psb_private *dev_priv) 494 atomic_t *msvdx_mmu_invaldc)
501{ 495{
502 struct psb_mmu_driver *driver; 496 struct psb_mmu_driver *driver;
497 struct drm_psb_private *dev_priv = dev->dev_private;
503 498
504 driver = kmalloc(sizeof(*driver), GFP_KERNEL); 499 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
505 500
506 if (!driver) 501 if (!driver)
507 return NULL; 502 return NULL;
508 driver->dev_priv = dev_priv;
509 503
504 driver->dev = dev;
510 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, 505 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
511 invalid_type); 506 invalid_type);
512 if (!driver->default_pd) 507 if (!driver->default_pd)
@@ -515,17 +510,24 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
515 spin_lock_init(&driver->lock); 510 spin_lock_init(&driver->lock);
516 init_rwsem(&driver->sem); 511 init_rwsem(&driver->sem);
517 down_write(&driver->sem); 512 down_write(&driver->sem);
518 driver->register_map = registers;
519 atomic_set(&driver->needs_tlbflush, 1); 513 atomic_set(&driver->needs_tlbflush, 1);
514 driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
515
516 driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
517 PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
518 PSB_CR_BIF_CTRL);
519 PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
520 PSB_CR_BIF_CTRL);
520 521
521 driver->has_clflush = 0; 522 driver->has_clflush = 0;
522 523
524#if defined(CONFIG_X86)
523 if (boot_cpu_has(X86_FEATURE_CLFLSH)) { 525 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
524 uint32_t tfms, misc, cap0, cap4, clflush_size; 526 uint32_t tfms, misc, cap0, cap4, clflush_size;
525 527
526 /* 528 /*
527 * clflush size is determined at kernel setup for x86_64 529 * clflush size is determined at kernel setup for x86_64 but not
528 * but not for i386. We have to do it here. 530 * for i386. We have to do it here.
529 */ 531 */
530 532
531 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); 533 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
@@ -536,6 +538,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
536 driver->clflush_mask = driver->clflush_add - 1; 538 driver->clflush_mask = driver->clflush_add - 1;
537 driver->clflush_mask = ~driver->clflush_mask; 539 driver->clflush_mask = ~driver->clflush_mask;
538 } 540 }
541#endif
539 542
540 up_write(&driver->sem); 543 up_write(&driver->sem);
541 return driver; 544 return driver;
@@ -545,9 +548,9 @@ out_err1:
545 return NULL; 548 return NULL;
546} 549}
547 550
548static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, 551#if defined(CONFIG_X86)
549 unsigned long address, uint32_t num_pages, 552static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
550 uint32_t desired_tile_stride, 553 uint32_t num_pages, uint32_t desired_tile_stride,
551 uint32_t hw_tile_stride) 554 uint32_t hw_tile_stride)
552{ 555{
553 struct psb_mmu_pt *pt; 556 struct psb_mmu_pt *pt;
@@ -561,11 +564,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
561 unsigned long clflush_add = pd->driver->clflush_add; 564 unsigned long clflush_add = pd->driver->clflush_add;
562 unsigned long clflush_mask = pd->driver->clflush_mask; 565 unsigned long clflush_mask = pd->driver->clflush_mask;
563 566
564 if (!pd->driver->has_clflush) { 567 if (!pd->driver->has_clflush)
565 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
566 psb_pages_clflush(pd->driver, &pd->p, num_pages);
567 return; 568 return;
568 }
569 569
570 if (hw_tile_stride) 570 if (hw_tile_stride)
571 rows = num_pages / desired_tile_stride; 571 rows = num_pages / desired_tile_stride;
@@ -586,10 +586,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
586 if (!pt) 586 if (!pt)
587 continue; 587 continue;
588 do { 588 do {
589 psb_clflush(&pt->v 589 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
590 [psb_mmu_pt_index(addr)]); 590 } while (addr += clflush_add,
591 } while (addr +=
592 clflush_add,
593 (addr & clflush_mask) < next); 591 (addr & clflush_mask) < next);
594 592
595 psb_mmu_pt_unmap_unlock(pt); 593 psb_mmu_pt_unmap_unlock(pt);
@@ -598,6 +596,14 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
598 } 596 }
599 mb(); 597 mb();
600} 598}
599#else
600static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
601 uint32_t num_pages, uint32_t desired_tile_stride,
602 uint32_t hw_tile_stride)
603{
604 drm_ttm_cache_flush();
605}
606#endif
601 607
602void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, 608void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
603 unsigned long address, uint32_t num_pages) 609 unsigned long address, uint32_t num_pages)
@@ -633,7 +639,7 @@ out:
633 up_read(&pd->driver->sem); 639 up_read(&pd->driver->sem);
634 640
635 if (pd->hw_context != -1) 641 if (pd->hw_context != -1)
636 psb_mmu_flush(pd->driver, 0); 642 psb_mmu_flush(pd->driver);
637 643
638 return; 644 return;
639} 645}
@@ -660,7 +666,7 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
660 add = desired_tile_stride << PAGE_SHIFT; 666 add = desired_tile_stride << PAGE_SHIFT;
661 row_add = hw_tile_stride << PAGE_SHIFT; 667 row_add = hw_tile_stride << PAGE_SHIFT;
662 668
663 /* down_read(&pd->driver->sem); */ 669 down_read(&pd->driver->sem);
664 670
665 /* Make sure we only need to flush this processor's cache */ 671 /* Make sure we only need to flush this processor's cache */
666 672
@@ -688,10 +694,10 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
688 psb_mmu_flush_ptes(pd, f_address, num_pages, 694 psb_mmu_flush_ptes(pd, f_address, num_pages,
689 desired_tile_stride, hw_tile_stride); 695 desired_tile_stride, hw_tile_stride);
690 696
691 /* up_read(&pd->driver->sem); */ 697 up_read(&pd->driver->sem);
692 698
693 if (pd->hw_context != -1) 699 if (pd->hw_context != -1)
694 psb_mmu_flush(pd->driver, 0); 700 psb_mmu_flush(pd->driver);
695} 701}
696 702
697int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, 703int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
@@ -704,7 +710,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
704 unsigned long end; 710 unsigned long end;
705 unsigned long next; 711 unsigned long next;
706 unsigned long f_address = address; 712 unsigned long f_address = address;
707 int ret = 0; 713 int ret = -ENOMEM;
708 714
709 down_read(&pd->driver->sem); 715 down_read(&pd->driver->sem);
710 716
@@ -726,6 +732,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
726 psb_mmu_pt_unmap_unlock(pt); 732 psb_mmu_pt_unmap_unlock(pt);
727 733
728 } while (addr = next, next != end); 734 } while (addr = next, next != end);
735 ret = 0;
729 736
730out: 737out:
731 if (pd->hw_context != -1) 738 if (pd->hw_context != -1)
@@ -734,15 +741,15 @@ out:
734 up_read(&pd->driver->sem); 741 up_read(&pd->driver->sem);
735 742
736 if (pd->hw_context != -1) 743 if (pd->hw_context != -1)
737 psb_mmu_flush(pd->driver, 1); 744 psb_mmu_flush(pd->driver);
738 745
739 return ret; 746 return 0;
740} 747}
741 748
742int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, 749int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
743 unsigned long address, uint32_t num_pages, 750 unsigned long address, uint32_t num_pages,
744 uint32_t desired_tile_stride, 751 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
745 uint32_t hw_tile_stride, int type) 752 int type)
746{ 753{
747 struct psb_mmu_pt *pt; 754 struct psb_mmu_pt *pt;
748 uint32_t rows = 1; 755 uint32_t rows = 1;
@@ -754,7 +761,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
754 unsigned long add; 761 unsigned long add;
755 unsigned long row_add; 762 unsigned long row_add;
756 unsigned long f_address = address; 763 unsigned long f_address = address;
757 int ret = 0; 764 int ret = -ENOMEM;
758 765
759 if (hw_tile_stride) { 766 if (hw_tile_stride) {
760 if (num_pages % desired_tile_stride != 0) 767 if (num_pages % desired_tile_stride != 0)
@@ -777,14 +784,11 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
777 do { 784 do {
778 next = psb_pd_addr_end(addr, end); 785 next = psb_pd_addr_end(addr, end);
779 pt = psb_mmu_pt_alloc_map_lock(pd, addr); 786 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
780 if (!pt) { 787 if (!pt)
781 ret = -ENOMEM;
782 goto out; 788 goto out;
783 }
784 do { 789 do {
785 pte = 790 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
786 psb_mmu_mask_pte(page_to_pfn(*pages++), 791 type);
787 type);
788 psb_mmu_set_pte(pt, addr, pte); 792 psb_mmu_set_pte(pt, addr, pte);
789 pt->count++; 793 pt->count++;
790 } while (addr += PAGE_SIZE, addr < next); 794 } while (addr += PAGE_SIZE, addr < next);
@@ -794,6 +798,8 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
794 798
795 address += row_add; 799 address += row_add;
796 } 800 }
801
802 ret = 0;
797out: 803out:
798 if (pd->hw_context != -1) 804 if (pd->hw_context != -1)
799 psb_mmu_flush_ptes(pd, f_address, num_pages, 805 psb_mmu_flush_ptes(pd, f_address, num_pages,
@@ -802,7 +808,7 @@ out:
802 up_read(&pd->driver->sem); 808 up_read(&pd->driver->sem);
803 809
804 if (pd->hw_context != -1) 810 if (pd->hw_context != -1)
805 psb_mmu_flush(pd->driver, 1); 811 psb_mmu_flush(pd->driver);
806 812
807 return ret; 813 return ret;
808} 814}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1199180667c9..55eef4d6cef8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -347,9 +347,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
347 if (ret) 347 if (ret)
348 goto out_err; 348 goto out_err;
349 349
350 dev_priv->mmu = psb_mmu_driver_init((void *)0, 350 dev_priv->mmu = psb_mmu_driver_init(dev, drm_psb_trap_pagefaults, 0, 0);
351 drm_psb_trap_pagefaults, 0,
352 dev_priv);
353 if (!dev_priv->mmu) 351 if (!dev_priv->mmu)
354 goto out_err; 352 goto out_err;
355 353
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5ad6a03e477e..ac8cc1989b94 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -727,10 +727,10 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
727 * MMU stuff. 727 * MMU stuff.
728 */ 728 */
729 729
730extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, 730extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
731 int trap_pagefaults, 731 int trap_pagefaults,
732 int invalid_type, 732 int invalid_type,
733 struct drm_psb_private *dev_priv); 733 atomic_t *msvdx_mmu_invaldc);
734extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver); 734extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
735extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver 735extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
736 *driver); 736 *driver);
@@ -740,7 +740,7 @@ extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
740 int trap_pagefaults, 740 int trap_pagefaults,
741 int invalid_type); 741 int invalid_type);
742extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd); 742extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
743extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot); 743extern void psb_mmu_flush(struct psb_mmu_driver *driver);
744extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, 744extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
745 unsigned long address, 745 unsigned long address,
746 uint32_t num_pages); 746 uint32_t num_pages);