diff options
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_display.c | 181 |
2 files changed, 93 insertions, 91 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 29d9cc04c04e..b7204500a9a6 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -684,10 +684,9 @@ struct radeon_flip_work { | |||
684 | struct work_struct unpin_work; | 684 | struct work_struct unpin_work; |
685 | struct radeon_device *rdev; | 685 | struct radeon_device *rdev; |
686 | int crtc_id; | 686 | int crtc_id; |
687 | struct drm_framebuffer *fb; | 687 | uint64_t base; |
688 | struct drm_pending_vblank_event *event; | 688 | struct drm_pending_vblank_event *event; |
689 | struct radeon_bo *old_rbo; | 689 | struct radeon_bo *old_rbo; |
690 | struct radeon_bo *new_rbo; | ||
691 | struct radeon_fence *fence; | 690 | struct radeon_fence *fence; |
692 | }; | 691 | }; |
693 | 692 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 65501af453be..8de579473645 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -386,11 +386,6 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
386 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; | 386 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; |
387 | 387 | ||
388 | struct drm_crtc *crtc = &radeon_crtc->base; | 388 | struct drm_crtc *crtc = &radeon_crtc->base; |
389 | struct drm_framebuffer *fb = work->fb; | ||
390 | |||
391 | uint32_t tiling_flags, pitch_pixels; | ||
392 | uint64_t base; | ||
393 | |||
394 | unsigned long flags; | 389 | unsigned long flags; |
395 | int r; | 390 | int r; |
396 | 391 | ||
@@ -411,26 +406,94 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
411 | radeon_fence_unref(&work->fence); | 406 | radeon_fence_unref(&work->fence); |
412 | } | 407 | } |
413 | 408 | ||
409 | /* do the flip (mmio) */ | ||
410 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); | ||
411 | |||
412 | /* We borrow the event spin lock for protecting flip_status */ | ||
413 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
414 | |||
415 | /* set the proper interrupt */ | ||
416 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); | ||
417 | |||
418 | radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; | ||
419 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
420 | up_read(&rdev->exclusive_lock); | ||
421 | |||
422 | return; | ||
423 | |||
424 | cleanup: | ||
425 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | ||
426 | radeon_fence_unref(&work->fence); | ||
427 | kfree(work); | ||
428 | up_read(&rdev->exclusive_lock); | ||
429 | } | ||
430 | |||
431 | static int radeon_crtc_page_flip(struct drm_crtc *crtc, | ||
432 | struct drm_framebuffer *fb, | ||
433 | struct drm_pending_vblank_event *event, | ||
434 | uint32_t page_flip_flags) | ||
435 | { | ||
436 | struct drm_device *dev = crtc->dev; | ||
437 | struct radeon_device *rdev = dev->dev_private; | ||
438 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
439 | struct radeon_framebuffer *old_radeon_fb; | ||
440 | struct radeon_framebuffer *new_radeon_fb; | ||
441 | struct drm_gem_object *obj; | ||
442 | struct radeon_flip_work *work; | ||
443 | struct radeon_bo *new_rbo; | ||
444 | uint32_t tiling_flags, pitch_pixels; | ||
445 | uint64_t base; | ||
446 | unsigned long flags; | ||
447 | int r; | ||
448 | |||
449 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
450 | if (work == NULL) | ||
451 | return -ENOMEM; | ||
452 | |||
453 | INIT_WORK(&work->flip_work, radeon_flip_work_func); | ||
454 | INIT_WORK(&work->unpin_work, radeon_unpin_work_func); | ||
455 | |||
456 | work->rdev = rdev; | ||
457 | work->crtc_id = radeon_crtc->crtc_id; | ||
458 | work->event = event; | ||
459 | |||
460 | /* schedule unpin of the old buffer */ | ||
461 | old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); | ||
462 | obj = old_radeon_fb->obj; | ||
463 | |||
464 | /* take a reference to the old object */ | ||
465 | drm_gem_object_reference(obj); | ||
466 | work->old_rbo = gem_to_radeon_bo(obj); | ||
467 | |||
468 | new_radeon_fb = to_radeon_framebuffer(fb); | ||
469 | obj = new_radeon_fb->obj; | ||
470 | new_rbo = gem_to_radeon_bo(obj); | ||
471 | |||
472 | spin_lock(&new_rbo->tbo.bdev->fence_lock); | ||
473 | if (new_rbo->tbo.sync_obj) | ||
474 | work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); | ||
475 | spin_unlock(&new_rbo->tbo.bdev->fence_lock); | ||
476 | |||
414 | /* pin the new buffer */ | 477 | /* pin the new buffer */ |
415 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", | 478 | DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", |
416 | work->old_rbo, work->new_rbo); | 479 | work->old_rbo, new_rbo); |
417 | 480 | ||
418 | r = radeon_bo_reserve(work->new_rbo, false); | 481 | r = radeon_bo_reserve(new_rbo, false); |
419 | if (unlikely(r != 0)) { | 482 | if (unlikely(r != 0)) { |
420 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | 483 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); |
421 | goto cleanup; | 484 | goto cleanup; |
422 | } | 485 | } |
423 | /* Only 27 bit offset for legacy CRTC */ | 486 | /* Only 27 bit offset for legacy CRTC */ |
424 | r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, | 487 | r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, |
425 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); | 488 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); |
426 | if (unlikely(r != 0)) { | 489 | if (unlikely(r != 0)) { |
427 | radeon_bo_unreserve(work->new_rbo); | 490 | radeon_bo_unreserve(new_rbo); |
428 | r = -EINVAL; | 491 | r = -EINVAL; |
429 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 492 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); |
430 | goto cleanup; | 493 | goto cleanup; |
431 | } | 494 | } |
432 | radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); | 495 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
433 | radeon_bo_unreserve(work->new_rbo); | 496 | radeon_bo_unreserve(new_rbo); |
434 | 497 | ||
435 | if (!ASIC_IS_AVIVO(rdev)) { | 498 | if (!ASIC_IS_AVIVO(rdev)) { |
436 | /* crtc offset is from display base addr not FB location */ | 499 | /* crtc offset is from display base addr not FB location */ |
@@ -467,6 +530,7 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
467 | } | 530 | } |
468 | base &= ~7; | 531 | base &= ~7; |
469 | } | 532 | } |
533 | work->base = base; | ||
470 | 534 | ||
471 | r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); | 535 | r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); |
472 | if (r) { | 536 | if (r) { |
@@ -477,100 +541,39 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
477 | /* We borrow the event spin lock for protecting flip_work */ | 541 | /* We borrow the event spin lock for protecting flip_work */ |
478 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 542 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
479 | 543 | ||
480 | /* set the proper interrupt */ | 544 | if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { |
481 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); | 545 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); |
546 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
547 | r = -EBUSY; | ||
548 | goto pflip_cleanup; | ||
549 | } | ||
550 | radeon_crtc->flip_status = RADEON_FLIP_PENDING; | ||
551 | radeon_crtc->flip_work = work; | ||
482 | 552 | ||
483 | /* do the flip (mmio) */ | 553 | /* update crtc fb */ |
484 | radeon_page_flip(rdev, radeon_crtc->crtc_id, base); | 554 | crtc->primary->fb = fb; |
485 | 555 | ||
486 | radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; | ||
487 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 556 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
488 | up_read(&rdev->exclusive_lock); | ||
489 | 557 | ||
490 | return; | 558 | queue_work(radeon_crtc->flip_queue, &work->flip_work); |
559 | return 0; | ||
491 | 560 | ||
492 | pflip_cleanup: | 561 | pflip_cleanup: |
493 | if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { | 562 | if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { |
494 | DRM_ERROR("failed to reserve new rbo in error path\n"); | 563 | DRM_ERROR("failed to reserve new rbo in error path\n"); |
495 | goto cleanup; | 564 | goto cleanup; |
496 | } | 565 | } |
497 | if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { | 566 | if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { |
498 | DRM_ERROR("failed to unpin new rbo in error path\n"); | 567 | DRM_ERROR("failed to unpin new rbo in error path\n"); |
499 | } | 568 | } |
500 | radeon_bo_unreserve(work->new_rbo); | 569 | radeon_bo_unreserve(new_rbo); |
501 | 570 | ||
502 | cleanup: | 571 | cleanup: |
503 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 572 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); |
504 | radeon_fence_unref(&work->fence); | 573 | radeon_fence_unref(&work->fence); |
505 | kfree(work); | 574 | kfree(work); |
506 | up_read(&rdev->exclusive_lock); | ||
507 | } | ||
508 | |||
509 | static int radeon_crtc_page_flip(struct drm_crtc *crtc, | ||
510 | struct drm_framebuffer *fb, | ||
511 | struct drm_pending_vblank_event *event, | ||
512 | uint32_t page_flip_flags) | ||
513 | { | ||
514 | struct drm_device *dev = crtc->dev; | ||
515 | struct radeon_device *rdev = dev->dev_private; | ||
516 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
517 | struct radeon_framebuffer *old_radeon_fb; | ||
518 | struct radeon_framebuffer *new_radeon_fb; | ||
519 | struct drm_gem_object *obj; | ||
520 | struct radeon_flip_work *work; | ||
521 | unsigned long flags; | ||
522 | |||
523 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
524 | if (work == NULL) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | INIT_WORK(&work->flip_work, radeon_flip_work_func); | ||
528 | INIT_WORK(&work->unpin_work, radeon_unpin_work_func); | ||
529 | 575 | ||
530 | work->rdev = rdev; | 576 | return r; |
531 | work->crtc_id = radeon_crtc->crtc_id; | ||
532 | work->fb = fb; | ||
533 | work->event = event; | ||
534 | |||
535 | /* schedule unpin of the old buffer */ | ||
536 | old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); | ||
537 | obj = old_radeon_fb->obj; | ||
538 | |||
539 | /* take a reference to the old object */ | ||
540 | drm_gem_object_reference(obj); | ||
541 | work->old_rbo = gem_to_radeon_bo(obj); | ||
542 | |||
543 | new_radeon_fb = to_radeon_framebuffer(fb); | ||
544 | obj = new_radeon_fb->obj; | ||
545 | work->new_rbo = gem_to_radeon_bo(obj); | ||
546 | |||
547 | spin_lock(&work->new_rbo->tbo.bdev->fence_lock); | ||
548 | if (work->new_rbo->tbo.sync_obj) | ||
549 | work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj); | ||
550 | spin_unlock(&work->new_rbo->tbo.bdev->fence_lock); | ||
551 | |||
552 | /* We borrow the event spin lock for protecting flip_work */ | ||
553 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
554 | |||
555 | if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { | ||
556 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
557 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
558 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | ||
559 | radeon_fence_unref(&work->fence); | ||
560 | kfree(work); | ||
561 | return -EBUSY; | ||
562 | } | ||
563 | radeon_crtc->flip_status = RADEON_FLIP_PENDING; | ||
564 | radeon_crtc->flip_work = work; | ||
565 | |||
566 | /* update crtc fb */ | ||
567 | crtc->primary->fb = fb; | ||
568 | |||
569 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
570 | |||
571 | queue_work(radeon_crtc->flip_queue, &work->flip_work); | ||
572 | |||
573 | return 0; | ||
574 | } | 577 | } |
575 | 578 | ||
576 | static int | 579 | static int |