diff options
Diffstat (limited to 'drivers/gpu/drm/gma500/mmu.c')
-rw-r--r-- | drivers/gpu/drm/gma500/mmu.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index c904d73b1de3..e80ee82f6caf 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c | |||
@@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) | |||
125 | int i; | 125 | int i; |
126 | uint8_t *clf; | 126 | uint8_t *clf; |
127 | 127 | ||
128 | clf = kmap_atomic(page, KM_USER0); | 128 | clf = kmap_atomic(page); |
129 | mb(); | 129 | mb(); |
130 | for (i = 0; i < clflush_count; ++i) { | 130 | for (i = 0; i < clflush_count; ++i) { |
131 | psb_clflush(clf); | 131 | psb_clflush(clf); |
132 | clf += clflush_add; | 132 | clf += clflush_add; |
133 | } | 133 | } |
134 | mb(); | 134 | mb(); |
135 | kunmap_atomic(clf, KM_USER0); | 135 | kunmap_atomic(clf); |
136 | } | 136 | } |
137 | 137 | ||
138 | static void psb_pages_clflush(struct psb_mmu_driver *driver, | 138 | static void psb_pages_clflush(struct psb_mmu_driver *driver, |
@@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) | |||
325 | 325 | ||
326 | spin_lock(lock); | 326 | spin_lock(lock); |
327 | 327 | ||
328 | v = kmap_atomic(pt->p, KM_USER0); | 328 | v = kmap_atomic(pt->p); |
329 | clf = (uint8_t *) v; | 329 | clf = (uint8_t *) v; |
330 | ptes = (uint32_t *) v; | 330 | ptes = (uint32_t *) v; |
331 | for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) | 331 | for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) |
@@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) | |||
341 | mb(); | 341 | mb(); |
342 | } | 342 | } |
343 | 343 | ||
344 | kunmap_atomic(v, KM_USER0); | 344 | kunmap_atomic(v); |
345 | spin_unlock(lock); | 345 | spin_unlock(lock); |
346 | 346 | ||
347 | pt->count = 0; | 347 | pt->count = 0; |
@@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, | |||
376 | continue; | 376 | continue; |
377 | } | 377 | } |
378 | 378 | ||
379 | v = kmap_atomic(pd->p, KM_USER0); | 379 | v = kmap_atomic(pd->p); |
380 | pd->tables[index] = pt; | 380 | pd->tables[index] = pt; |
381 | v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; | 381 | v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; |
382 | pt->index = index; | 382 | pt->index = index; |
383 | kunmap_atomic((void *) v, KM_USER0); | 383 | kunmap_atomic((void *) v); |
384 | 384 | ||
385 | if (pd->hw_context != -1) { | 385 | if (pd->hw_context != -1) { |
386 | psb_mmu_clflush(pd->driver, (void *) &v[index]); | 386 | psb_mmu_clflush(pd->driver, (void *) &v[index]); |
387 | atomic_set(&pd->driver->needs_tlbflush, 1); | 387 | atomic_set(&pd->driver->needs_tlbflush, 1); |
388 | } | 388 | } |
389 | } | 389 | } |
390 | pt->v = kmap_atomic(pt->p, KM_USER0); | 390 | pt->v = kmap_atomic(pt->p); |
391 | return pt; | 391 | return pt; |
392 | } | 392 | } |
393 | 393 | ||
@@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, | |||
404 | spin_unlock(lock); | 404 | spin_unlock(lock); |
405 | return NULL; | 405 | return NULL; |
406 | } | 406 | } |
407 | pt->v = kmap_atomic(pt->p, KM_USER0); | 407 | pt->v = kmap_atomic(pt->p); |
408 | return pt; | 408 | return pt; |
409 | } | 409 | } |
410 | 410 | ||
@@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) | |||
413 | struct psb_mmu_pd *pd = pt->pd; | 413 | struct psb_mmu_pd *pd = pt->pd; |
414 | uint32_t *v; | 414 | uint32_t *v; |
415 | 415 | ||
416 | kunmap_atomic(pt->v, KM_USER0); | 416 | kunmap_atomic(pt->v); |
417 | if (pt->count == 0) { | 417 | if (pt->count == 0) { |
418 | v = kmap_atomic(pd->p, KM_USER0); | 418 | v = kmap_atomic(pd->p); |
419 | v[pt->index] = pd->invalid_pde; | 419 | v[pt->index] = pd->invalid_pde; |
420 | pd->tables[pt->index] = NULL; | 420 | pd->tables[pt->index] = NULL; |
421 | 421 | ||
@@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) | |||
424 | (void *) &v[pt->index]); | 424 | (void *) &v[pt->index]); |
425 | atomic_set(&pd->driver->needs_tlbflush, 1); | 425 | atomic_set(&pd->driver->needs_tlbflush, 1); |
426 | } | 426 | } |
427 | kunmap_atomic(pt->v, KM_USER0); | 427 | kunmap_atomic(pt->v); |
428 | spin_unlock(&pd->driver->lock); | 428 | spin_unlock(&pd->driver->lock); |
429 | psb_mmu_free_pt(pt); | 429 | psb_mmu_free_pt(pt); |
430 | return; | 430 | return; |
@@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, | |||
457 | down_read(&driver->sem); | 457 | down_read(&driver->sem); |
458 | spin_lock(&driver->lock); | 458 | spin_lock(&driver->lock); |
459 | 459 | ||
460 | v = kmap_atomic(pd->p, KM_USER0); | 460 | v = kmap_atomic(pd->p); |
461 | v += start; | 461 | v += start; |
462 | 462 | ||
463 | while (gtt_pages--) { | 463 | while (gtt_pages--) { |
@@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, | |||
467 | 467 | ||
468 | /*ttm_tt_cache_flush(&pd->p, num_pages);*/ | 468 | /*ttm_tt_cache_flush(&pd->p, num_pages);*/ |
469 | psb_pages_clflush(pd->driver, &pd->p, num_pages); | 469 | psb_pages_clflush(pd->driver, &pd->p, num_pages); |
470 | kunmap_atomic(v, KM_USER0); | 470 | kunmap_atomic(v); |
471 | spin_unlock(&driver->lock); | 471 | spin_unlock(&driver->lock); |
472 | 472 | ||
473 | if (pd->hw_context != -1) | 473 | if (pd->hw_context != -1) |
@@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, | |||
830 | uint32_t *v; | 830 | uint32_t *v; |
831 | 831 | ||
832 | spin_lock(lock); | 832 | spin_lock(lock); |
833 | v = kmap_atomic(pd->p, KM_USER0); | 833 | v = kmap_atomic(pd->p); |
834 | tmp = v[psb_mmu_pd_index(virtual)]; | 834 | tmp = v[psb_mmu_pd_index(virtual)]; |
835 | kunmap_atomic(v, KM_USER0); | 835 | kunmap_atomic(v); |
836 | spin_unlock(lock); | 836 | spin_unlock(lock); |
837 | 837 | ||
838 | if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || | 838 | if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || |