aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRob Clark <rob@ti.com>2012-03-05 11:48:40 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-03-07 16:38:08 -0500
commite559895ae99fb99ecb91d5ad9e0b742ad881b341 (patch)
treeaac7767b9ca265d648e5d8a58af3890b453eca9f /drivers
parent5c137797377db64d4cfb429ee8e4a420d9743660 (diff)
staging: drm/omap: mmap of tiled buffers with stride >4kb
Deal with the case of buffers with virtual stride larger than one page in fault_2d(). Signed-off-by: Rob Clark <rob@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/omapdrm/omap_gem.c86
1 files changed, 59 insertions, 27 deletions
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index 5abd294ab2e..921f058cc6a 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -153,10 +153,23 @@ static void evict_entry(struct drm_gem_object *obj,
153 enum tiler_fmt fmt, struct usergart_entry *entry) 153 enum tiler_fmt fmt, struct usergart_entry *entry)
154{ 154{
155 if (obj->dev->dev_mapping) { 155 if (obj->dev->dev_mapping) {
156 size_t size = PAGE_SIZE * usergart[fmt].height; 156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height;
158 size_t size = PAGE_SIZE * n;
157 loff_t off = mmap_offset(obj) + 159 loff_t off = mmap_offset(obj) +
158 (entry->obj_pgoff << PAGE_SHIFT); 160 (entry->obj_pgoff << PAGE_SHIFT);
159 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); 161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162 if (m > 1) {
163 int i;
164 /* if stride > than PAGE_SIZE then sparse mapping: */
165 for (i = n; i > 0; i--) {
166 unmap_mapping_range(obj->dev->dev_mapping,
167 off, PAGE_SIZE, 1);
168 off += PAGE_SIZE * m;
169 }
170 } else {
171 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
172 }
160 } 173 }
161 174
162 entry->obj = NULL; 175 entry->obj = NULL;
@@ -342,26 +355,39 @@ static int fault_2d(struct drm_gem_object *obj,
342 void __user *vaddr; 355 void __user *vaddr;
343 int i, ret, slots; 356 int i, ret, slots;
344 357
345 if (!usergart) 358 /*
346 return -EFAULT; 359 * Note the height of the slot is also equal to the number of pages
347 360 * that need to be mapped in to fill 4kb wide CPU page. If the slot
348 /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers 361 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
349 * that are wider than 4kb 362 */
363 const int n = usergart[fmt].height;
364 const int n_shift = usergart[fmt].height_shift;
365
366 /*
367 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
368 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
369 * into account in some of the math, so figure out virtual stride
370 * in pages
350 */ 371 */
372 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
351 373
352 /* We don't use vmf->pgoff since that has the fake offset: */ 374 /* We don't use vmf->pgoff since that has the fake offset: */
353 pgoff = ((unsigned long)vmf->virtual_address - 375 pgoff = ((unsigned long)vmf->virtual_address -
354 vma->vm_start) >> PAGE_SHIFT; 376 vma->vm_start) >> PAGE_SHIFT;
355 377
356 /* actual address we start mapping at is rounded down to previous slot 378 /*
379 * Actual address we start mapping at is rounded down to previous slot
357 * boundary in the y direction: 380 * boundary in the y direction:
358 */ 381 */
359 base_pgoff = round_down(pgoff, usergart[fmt].height); 382 base_pgoff = round_down(pgoff, m << n_shift);
360 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
361 entry = &usergart[fmt].entry[usergart[fmt].last];
362 383
384 /* figure out buffer width in slots */
363 slots = omap_obj->width >> usergart[fmt].slot_shift; 385 slots = omap_obj->width >> usergart[fmt].slot_shift;
364 386
387 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
388
389 entry = &usergart[fmt].entry[usergart[fmt].last];
390
365 /* evict previous buffer using this usergart entry, if any: */ 391 /* evict previous buffer using this usergart entry, if any: */
366 if (entry->obj) 392 if (entry->obj)
367 evict_entry(entry->obj, fmt, entry); 393 evict_entry(entry->obj, fmt, entry);
@@ -369,23 +395,30 @@ static int fault_2d(struct drm_gem_object *obj,
369 entry->obj = obj; 395 entry->obj = obj;
370 entry->obj_pgoff = base_pgoff; 396 entry->obj_pgoff = base_pgoff;
371 397
372 /* now convert base_pgoff to phys offset from virt offset: 398 /* now convert base_pgoff to phys offset from virt offset: */
373 */ 399 base_pgoff = (base_pgoff >> n_shift) * slots;
374 base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots; 400
375 401 /* for wider-than 4k.. figure out which part of the slot-row we want: */
376 /* map in pages. Note the height of the slot is also equal to the 402 if (m > 1) {
377 * number of pages that need to be mapped in to fill 4kb wide CPU page. 403 int off = pgoff % m;
378 * If the height is 64, then 64 pages fill a 4kb wide by 64 row region. 404 entry->obj_pgoff += off;
379 * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to 405 base_pgoff /= m;
380 * get a dummy page mapped in.. if someone reads/writes it they will get 406 slots = min(slots - (off << n_shift), n);
381 * random/undefined content, but at least it won't be corrupting 407 base_pgoff += off << n_shift;
382 * whatever other random page used to be mapped in, or other undefined 408 vaddr += off << PAGE_SHIFT;
383 * behavior. 409 }
410
411 /*
412 * Map in pages. Beyond the valid pixel part of the buffer, we set
413 * pages[i] to NULL to get a dummy page mapped in.. if someone
414 * reads/writes it they will get random/undefined content, but at
415 * least it won't be corrupting whatever other random page used to
416 * be mapped in, or other undefined behavior.
384 */ 417 */
385 memcpy(pages, &omap_obj->pages[base_pgoff], 418 memcpy(pages, &omap_obj->pages[base_pgoff],
386 sizeof(struct page *) * slots); 419 sizeof(struct page *) * slots);
387 memset(pages + slots, 0, 420 memset(pages + slots, 0,
388 sizeof(struct page *) * (usergart[fmt].height - slots)); 421 sizeof(struct page *) * (n - slots));
389 422
390 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 423 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
391 if (ret) { 424 if (ret) {
@@ -393,16 +426,15 @@ static int fault_2d(struct drm_gem_object *obj,
393 return ret; 426 return ret;
394 } 427 }
395 428
396 i = usergart[fmt].height;
397 pfn = entry->paddr >> PAGE_SHIFT; 429 pfn = entry->paddr >> PAGE_SHIFT;
398 430
399 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 431 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
400 pfn, pfn << PAGE_SHIFT); 432 pfn, pfn << PAGE_SHIFT);
401 433
402 while (i--) { 434 for (i = n; i > 0; i--) {
403 vm_insert_mixed(vma, (unsigned long)vaddr, pfn); 435 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
404 pfn += usergart[fmt].stride_pfn; 436 pfn += usergart[fmt].stride_pfn;
405 vaddr += PAGE_SIZE; 437 vaddr += PAGE_SIZE * m;
406 } 438 }
407 439
408 /* simple round-robin: */ 440 /* simple round-robin: */