diff options
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 51 |
1 files changed, 23 insertions, 28 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 17a53d207978..6030de7ec2ba 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -371,7 +371,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) | |||
371 | */ | 371 | */ |
372 | 372 | ||
373 | /* Normal handling for the case of faulting in non-tiled buffers */ | 373 | /* Normal handling for the case of faulting in non-tiled buffers */ |
374 | static int fault_1d(struct drm_gem_object *obj, | 374 | static vm_fault_t fault_1d(struct drm_gem_object *obj, |
375 | struct vm_area_struct *vma, struct vm_fault *vmf) | 375 | struct vm_area_struct *vma, struct vm_fault *vmf) |
376 | { | 376 | { |
377 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 377 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
@@ -392,11 +392,12 @@ static int fault_1d(struct drm_gem_object *obj, | |||
392 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, | 392 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
393 | pfn, pfn << PAGE_SHIFT); | 393 | pfn, pfn << PAGE_SHIFT); |
394 | 394 | ||
395 | return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); | 395 | return vmf_insert_mixed(vma, vmf->address, |
396 | __pfn_to_pfn_t(pfn, PFN_DEV)); | ||
396 | } | 397 | } |
397 | 398 | ||
398 | /* Special handling for the case of faulting in 2d tiled buffers */ | 399 | /* Special handling for the case of faulting in 2d tiled buffers */ |
399 | static int fault_2d(struct drm_gem_object *obj, | 400 | static vm_fault_t fault_2d(struct drm_gem_object *obj, |
400 | struct vm_area_struct *vma, struct vm_fault *vmf) | 401 | struct vm_area_struct *vma, struct vm_fault *vmf) |
401 | { | 402 | { |
402 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 403 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
@@ -407,7 +408,8 @@ static int fault_2d(struct drm_gem_object *obj, | |||
407 | unsigned long pfn; | 408 | unsigned long pfn; |
408 | pgoff_t pgoff, base_pgoff; | 409 | pgoff_t pgoff, base_pgoff; |
409 | unsigned long vaddr; | 410 | unsigned long vaddr; |
410 | int i, ret, slots; | 411 | int i, err, slots; |
412 | vm_fault_t ret = VM_FAULT_NOPAGE; | ||
411 | 413 | ||
412 | /* | 414 | /* |
413 | * Note the height of the slot is also equal to the number of pages | 415 | * Note the height of the slot is also equal to the number of pages |
@@ -473,9 +475,10 @@ static int fault_2d(struct drm_gem_object *obj, | |||
473 | memset(pages + slots, 0, | 475 | memset(pages + slots, 0, |
474 | sizeof(struct page *) * (n - slots)); | 476 | sizeof(struct page *) * (n - slots)); |
475 | 477 | ||
476 | ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); | 478 | err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); |
477 | if (ret) { | 479 | if (err) { |
478 | dev_err(obj->dev->dev, "failed to pin: %d\n", ret); | 480 | ret = vmf_error(err); |
481 | dev_err(obj->dev->dev, "failed to pin: %d\n", err); | ||
479 | return ret; | 482 | return ret; |
480 | } | 483 | } |
481 | 484 | ||
@@ -485,7 +488,10 @@ static int fault_2d(struct drm_gem_object *obj, | |||
485 | pfn, pfn << PAGE_SHIFT); | 488 | pfn, pfn << PAGE_SHIFT); |
486 | 489 | ||
487 | for (i = n; i > 0; i--) { | 490 | for (i = n; i > 0; i--) { |
488 | vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); | 491 | ret = vmf_insert_mixed(vma, |
492 | vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); | ||
493 | if (ret & VM_FAULT_ERROR) | ||
494 | break; | ||
489 | pfn += priv->usergart[fmt].stride_pfn; | 495 | pfn += priv->usergart[fmt].stride_pfn; |
490 | vaddr += PAGE_SIZE * m; | 496 | vaddr += PAGE_SIZE * m; |
491 | } | 497 | } |
@@ -494,7 +500,7 @@ static int fault_2d(struct drm_gem_object *obj, | |||
494 | priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) | 500 | priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) |
495 | % NUM_USERGART_ENTRIES; | 501 | % NUM_USERGART_ENTRIES; |
496 | 502 | ||
497 | return 0; | 503 | return ret; |
498 | } | 504 | } |
499 | 505 | ||
500 | /** | 506 | /** |
@@ -509,14 +515,15 @@ static int fault_2d(struct drm_gem_object *obj, | |||
509 | * vma->vm_private_data points to the GEM object that is backing this | 515 | * vma->vm_private_data points to the GEM object that is backing this |
510 | * mapping. | 516 | * mapping. |
511 | */ | 517 | */ |
512 | int omap_gem_fault(struct vm_fault *vmf) | 518 | vm_fault_t omap_gem_fault(struct vm_fault *vmf) |
513 | { | 519 | { |
514 | struct vm_area_struct *vma = vmf->vma; | 520 | struct vm_area_struct *vma = vmf->vma; |
515 | struct drm_gem_object *obj = vma->vm_private_data; | 521 | struct drm_gem_object *obj = vma->vm_private_data; |
516 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 522 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
517 | struct drm_device *dev = obj->dev; | 523 | struct drm_device *dev = obj->dev; |
518 | struct page **pages; | 524 | struct page **pages; |
519 | int ret; | 525 | int err; |
526 | vm_fault_t ret; | ||
520 | 527 | ||
521 | /* Make sure we don't parallel update on a fault, nor move or remove | 528 | /* Make sure we don't parallel update on a fault, nor move or remove |
522 | * something from beneath our feet | 529 | * something from beneath our feet |
@@ -524,9 +531,11 @@ int omap_gem_fault(struct vm_fault *vmf) | |||
524 | mutex_lock(&dev->struct_mutex); | 531 | mutex_lock(&dev->struct_mutex); |
525 | 532 | ||
526 | /* if a shmem backed object, make sure we have pages attached now */ | 533 | /* if a shmem backed object, make sure we have pages attached now */ |
527 | ret = get_pages(obj, &pages); | 534 | err = get_pages(obj, &pages); |
528 | if (ret) | 535 | if (err) { |
536 | ret = vmf_error(err); | ||
529 | goto fail; | 537 | goto fail; |
538 | } | ||
530 | 539 | ||
531 | /* where should we do corresponding put_pages().. we are mapping | 540 | /* where should we do corresponding put_pages().. we are mapping |
532 | * the original page, rather than thru a GART, so we can't rely | 541 | * the original page, rather than thru a GART, so we can't rely |
@@ -542,21 +551,7 @@ int omap_gem_fault(struct vm_fault *vmf) | |||
542 | 551 | ||
543 | fail: | 552 | fail: |
544 | mutex_unlock(&dev->struct_mutex); | 553 | mutex_unlock(&dev->struct_mutex); |
545 | switch (ret) { | 554 | return ret; |
546 | case 0: | ||
547 | case -ERESTARTSYS: | ||
548 | case -EINTR: | ||
549 | case -EBUSY: | ||
550 | /* | ||
551 | * EBUSY is ok: this just means that another thread | ||
552 | * already did the job. | ||
553 | */ | ||
554 | return VM_FAULT_NOPAGE; | ||
555 | case -ENOMEM: | ||
556 | return VM_FAULT_OOM; | ||
557 | default: | ||
558 | return VM_FAULT_SIGBUS; | ||
559 | } | ||
560 | } | 555 | } |
561 | 556 | ||
562 | /** We override mainly to fix up some of the vm mapping flags.. */ | 557 | /** We override mainly to fix up some of the vm mapping flags.. */ |