diff options
| -rw-r--r-- | drivers/gpu/drm/radeon/atombios_crtc.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/atombios_encoders.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/evergreen_reg.h | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_display.c | 198 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/rv515.c | 5 |
7 files changed, 119 insertions, 111 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a03c73411a56..30d242b25078 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 1414 | tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; | 1414 | tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; |
| 1415 | WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); | 1415 | WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); |
| 1416 | 1416 | ||
| 1417 | /* set pageflip to happen anywhere in vblank interval */ | 1417 | /* set pageflip to happen only at start of vblank interval (front porch) */ |
| 1418 | WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); | 1418 | WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); |
| 1419 | 1419 | ||
| 1420 | if (!atomic && fb && fb != crtc->primary->fb) { | 1420 | if (!atomic && fb && fb != crtc->primary->fb) { |
| 1421 | radeon_fb = to_radeon_framebuffer(fb); | 1421 | radeon_fb = to_radeon_framebuffer(fb); |
| @@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 1614 | tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; | 1614 | tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; |
| 1615 | WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); | 1615 | WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); |
| 1616 | 1616 | ||
| 1617 | /* set pageflip to happen anywhere in vblank interval */ | 1617 | /* set pageflip to happen only at start of vblank interval (front porch) */ |
| 1618 | WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); | 1618 | WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); |
| 1619 | 1619 | ||
| 1620 | if (!atomic && fb && fb != crtc->primary->fb) { | 1620 | if (!atomic && fb && fb != crtc->primary->fb) { |
| 1621 | radeon_fb = to_radeon_framebuffer(fb); | 1621 | radeon_fb = to_radeon_framebuffer(fb); |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 2b2908440644..7d68203a3737 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
| @@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, | |||
| 183 | struct backlight_properties props; | 183 | struct backlight_properties props; |
| 184 | struct radeon_backlight_privdata *pdata; | 184 | struct radeon_backlight_privdata *pdata; |
| 185 | struct radeon_encoder_atom_dig *dig; | 185 | struct radeon_encoder_atom_dig *dig; |
| 186 | u8 backlight_level; | ||
| 187 | char bl_name[16]; | 186 | char bl_name[16]; |
| 188 | 187 | ||
| 189 | /* Mac laptops with multiple GPUs use the gmux driver for backlight | 188 | /* Mac laptops with multiple GPUs use the gmux driver for backlight |
| @@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, | |||
| 222 | 221 | ||
| 223 | pdata->encoder = radeon_encoder; | 222 | pdata->encoder = radeon_encoder; |
| 224 | 223 | ||
| 225 | backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); | ||
| 226 | |||
| 227 | dig = radeon_encoder->enc_priv; | 224 | dig = radeon_encoder->enc_priv; |
| 228 | dig->bl_dev = bd; | 225 | dig->bl_dev = bd; |
| 229 | 226 | ||
| 230 | bd->props.brightness = radeon_atom_backlight_get_brightness(bd); | 227 | bd->props.brightness = radeon_atom_backlight_get_brightness(bd); |
| 228 | /* Set a reasonable default here if the level is 0 otherwise | ||
| 229 | * fbdev will attempt to turn the backlight on after console | ||
| 230 | * unblanking and it will try and restore 0 which turns the backlight | ||
| 231 | * off again. | ||
| 232 | */ | ||
| 233 | if (bd->props.brightness == 0) | ||
| 234 | bd->props.brightness = RADEON_MAX_BL_LEVEL; | ||
| 231 | bd->props.power = FB_BLANK_UNBLANK; | 235 | bd->props.power = FB_BLANK_UNBLANK; |
| 232 | backlight_update_status(bd); | 236 | backlight_update_status(bd); |
| 233 | 237 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f7ece0ff431b..250bac3935a4 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s | |||
| 2642 | for (i = 0; i < rdev->num_crtc; i++) { | 2642 | for (i = 0; i < rdev->num_crtc; i++) { |
| 2643 | if (save->crtc_enabled[i]) { | 2643 | if (save->crtc_enabled[i]) { |
| 2644 | tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); | 2644 | tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); |
| 2645 | if ((tmp & 0x3) != 0) { | 2645 | if ((tmp & 0x7) != 3) { |
| 2646 | tmp &= ~0x3; | 2646 | tmp &= ~0x7; |
| 2647 | tmp |= 0x3; | ||
| 2647 | WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); | 2648 | WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); |
| 2648 | } | 2649 | } |
| 2649 | tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); | 2650 | tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 333d143fca2c..23bff590fb6e 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
| @@ -239,7 +239,6 @@ | |||
| 239 | # define EVERGREEN_CRTC_V_BLANK (1 << 0) | 239 | # define EVERGREEN_CRTC_V_BLANK (1 << 0) |
| 240 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 | 240 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 |
| 241 | #define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 | 241 | #define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 |
| 242 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 | ||
| 243 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 | 242 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 |
| 244 | #define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 | 243 | #define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 |
| 245 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 | 244 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 29d9cc04c04e..b7204500a9a6 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -684,10 +684,9 @@ struct radeon_flip_work { | |||
| 684 | struct work_struct unpin_work; | 684 | struct work_struct unpin_work; |
| 685 | struct radeon_device *rdev; | 685 | struct radeon_device *rdev; |
| 686 | int crtc_id; | 686 | int crtc_id; |
| 687 | struct drm_framebuffer *fb; | 687 | uint64_t base; |
| 688 | struct drm_pending_vblank_event *event; | 688 | struct drm_pending_vblank_event *event; |
| 689 | struct radeon_bo *old_rbo; | 689 | struct radeon_bo *old_rbo; |
| 690 | struct radeon_bo *new_rbo; | ||
| 691 | struct radeon_fence *fence; | 690 | struct radeon_fence *fence; |
| 692 | }; | 691 | }; |
| 693 | 692 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 13896edcf0b6..bf25061c8ac4 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) | |||
| 366 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); | 366 | spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
| 367 | 367 | ||
| 368 | drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); | 368 | drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); |
| 369 | radeon_fence_unref(&work->fence); | ||
| 370 | radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); | 369 | radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); |
| 371 | queue_work(radeon_crtc->flip_queue, &work->unpin_work); | 370 | queue_work(radeon_crtc->flip_queue, &work->unpin_work); |
| 372 | } | 371 | } |
| @@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
| 386 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; | 385 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; |
| 387 | 386 | ||
| 388 | struct drm_crtc *crtc = &radeon_crtc->base; | 387 | struct drm_crtc *crtc = &radeon_crtc->base; |
| 389 | struct drm_framebuffer *fb = work->fb; | ||
| 390 | |||
| 391 | uint32_t tiling_flags, pitch_pixels; | ||
| 392 | uint64_t base; | ||
| 393 | |||
| 394 | unsigned long flags; | 388 | unsigned long flags; |
| 395 | int r; | 389 | int r; |
| 396 | 390 | ||
| 397 | down_read(&rdev->exclusive_lock); | 391 | down_read(&rdev->exclusive_lock); |
| 398 | while (work->fence) { | 392 | if (work->fence) { |
| 399 | r = radeon_fence_wait(work->fence, false); | 393 | r = radeon_fence_wait(work->fence, false); |
| 400 | if (r == -EDEADLK) { | 394 | if (r == -EDEADLK) { |
| 401 | up_read(&rdev->exclusive_lock); | 395 | up_read(&rdev->exclusive_lock); |
| 402 | r = radeon_gpu_reset(rdev); | 396 | r = radeon_gpu_reset(rdev); |
| 403 | down_read(&rdev->exclusive_lock); | 397 | down_read(&rdev->exclusive_lock); |
| 404 | } | 398 | } |
| 399 | if (r) | ||
| 400 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); | ||
| 405 | 401 | ||
| 406 | if (r) { | 402 | /* We continue with the page flip even if we failed to wait on |
| 407 | DRM_ERROR("failed to wait on page flip fence (%d)!\n", | 403 | * the fence, otherwise the DRM core and userspace will be |
| 408 | r); | 404 | * confused about which BO the CRTC is scanning out |
| 409 | goto cleanup; | 405 | */ |
| 410 | } else | 406 | |
| 411 | radeon_fence_unref(&work->fence); | 407 | radeon_fence_unref(&work->fence); |
| 412 | } | 408 | } |
| 413 | 409 | ||
| 410 | /* We borrow the event spin lock for protecting flip_status */ | ||
| 411 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
| 412 | |||
| 413 | /* set the proper interrupt */ | ||
| 414 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); | ||
| 415 | |||
| 416 | /* do the flip (mmio) */ | ||
| 417 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); | ||
| 418 | |||
| 419 | radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; | ||
| 420 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 421 | up_read(&rdev->exclusive_lock); | ||
| 422 | } | ||
| 423 | |||
| 424 | static int radeon_crtc_page_flip(struct drm_crtc *crtc, | ||
| 425 | struct drm_framebuffer *fb, | ||
| 426 | struct drm_pending_vblank_event *event, | ||
| 427 | uint32_t page_flip_flags) | ||
| 428 | { | ||
| 429 | struct drm_device *dev = crtc->dev; | ||
| 430 | struct radeon_device *rdev = dev->dev_private; | ||
| 431 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
| 432 | struct radeon_framebuffer *old_radeon_fb; | ||
| 433 | struct radeon_framebuffer *new_radeon_fb; | ||
| 434 | struct drm_gem_object *obj; | ||
| 435 | struct radeon_flip_work *work; | ||
| 436 | struct radeon_bo *new_rbo; | ||
| 437 | uint32_t tiling_flags, pitch_pixels; | ||
| 438 | uint64_t base; | ||
| 439 | unsigned long flags; | ||
| 440 | int r; | ||
| 441 | |||
| 442 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
| 443 | if (work == NULL) | ||
| 444 | return -ENOMEM; | ||
| 445 | |||
| 446 | INIT_WORK(&work->flip_work, radeon_flip_work_func); | ||
| 447 | INIT_WORK(&work->unpin_work, radeon_unpin_work_func); | ||
| 448 | |||
| 449 | work->rdev = rdev; | ||
| 450 | work->crtc_id = radeon_crtc->crtc_id; | ||
| 451 | work->event = event; | ||
| 452 | |||
| 453 | /* schedule unpin of the old buffer */ | ||
| 454 | old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); | ||
| 455 | obj = old_radeon_fb->obj; | ||
| 456 | |||
| 457 | /* take a reference to the old object */ | ||
| 458 | drm_gem_object_reference(obj); | ||
| 459 | work->old_rbo = gem_to_radeon_bo(obj); | ||
| 460 | |||
| 461 | new_radeon_fb = to_radeon_framebuffer(fb); | ||
| 462 | obj = new_radeon_fb->obj; | ||
| 463 | new_rbo = gem_to_radeon_bo(obj); | ||
| 464 | |||
| 465 | spin_lock(&new_rbo->tbo.bdev->fence_lock); | ||
| 466 | if (new_rbo->tbo.sync_obj) | ||
| 467 | work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); | ||
| 468 | spin_unlock(&new_rbo->tbo.bdev->fence_lock); | ||
| 469 | |||
| 414 | /* pin the new buffer */ | 470 | /* pin the new buffer */ |
| 415 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", | 471 | DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", |
| 416 | work->old_rbo, work->new_rbo); | 472 | work->old_rbo, new_rbo); |
| 417 | 473 | ||
| 418 | r = radeon_bo_reserve(work->new_rbo, false); | 474 | r = radeon_bo_reserve(new_rbo, false); |
| 419 | if (unlikely(r != 0)) { | 475 | if (unlikely(r != 0)) { |
| 420 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | 476 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); |
| 421 | goto cleanup; | 477 | goto cleanup; |
| 422 | } | 478 | } |
| 423 | /* Only 27 bit offset for legacy CRTC */ | 479 | /* Only 27 bit offset for legacy CRTC */ |
| 424 | r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, | 480 | r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, |
| 425 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); | 481 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); |
| 426 | if (unlikely(r != 0)) { | 482 | if (unlikely(r != 0)) { |
| 427 | radeon_bo_unreserve(work->new_rbo); | 483 | radeon_bo_unreserve(new_rbo); |
| 428 | r = -EINVAL; | 484 | r = -EINVAL; |
| 429 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 485 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); |
| 430 | goto cleanup; | 486 | goto cleanup; |
| 431 | } | 487 | } |
| 432 | radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); | 488 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
| 433 | radeon_bo_unreserve(work->new_rbo); | 489 | radeon_bo_unreserve(new_rbo); |
| 434 | 490 | ||
| 435 | if (!ASIC_IS_AVIVO(rdev)) { | 491 | if (!ASIC_IS_AVIVO(rdev)) { |
| 436 | /* crtc offset is from display base addr not FB location */ | 492 | /* crtc offset is from display base addr not FB location */ |
| @@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
| 467 | } | 523 | } |
| 468 | base &= ~7; | 524 | base &= ~7; |
| 469 | } | 525 | } |
| 526 | work->base = base; | ||
| 470 | 527 | ||
| 471 | r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); | 528 | r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); |
| 472 | if (r) { | 529 | if (r) { |
| @@ -477,100 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
| 477 | /* We borrow the event spin lock for protecting flip_work */ | 534 | /* We borrow the event spin lock for protecting flip_work */ |
| 478 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 535 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
| 479 | 536 | ||
| 480 | /* set the proper interrupt */ | 537 | if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { |
| 481 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); | 538 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); |
| 539 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 540 | r = -EBUSY; | ||
| 541 | goto vblank_cleanup; | ||
| 542 | } | ||
| 543 | radeon_crtc->flip_status = RADEON_FLIP_PENDING; | ||
| 544 | radeon_crtc->flip_work = work; | ||
| 482 | 545 | ||
| 483 | /* do the flip (mmio) */ | 546 | /* update crtc fb */ |
| 484 | radeon_page_flip(rdev, radeon_crtc->crtc_id, base); | 547 | crtc->primary->fb = fb; |
| 485 | 548 | ||
| 486 | radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; | ||
| 487 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 549 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
| 488 | up_read(&rdev->exclusive_lock); | ||
| 489 | 550 | ||
| 490 | return; | 551 | queue_work(radeon_crtc->flip_queue, &work->flip_work); |
| 552 | return 0; | ||
| 553 | |||
| 554 | vblank_cleanup: | ||
| 555 | drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); | ||
| 491 | 556 | ||
| 492 | pflip_cleanup: | 557 | pflip_cleanup: |
| 493 | if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { | 558 | if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { |
| 494 | DRM_ERROR("failed to reserve new rbo in error path\n"); | 559 | DRM_ERROR("failed to reserve new rbo in error path\n"); |
| 495 | goto cleanup; | 560 | goto cleanup; |
| 496 | } | 561 | } |
| 497 | if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { | 562 | if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { |
| 498 | DRM_ERROR("failed to unpin new rbo in error path\n"); | 563 | DRM_ERROR("failed to unpin new rbo in error path\n"); |
| 499 | } | 564 | } |
| 500 | radeon_bo_unreserve(work->new_rbo); | 565 | radeon_bo_unreserve(new_rbo); |
| 501 | 566 | ||
| 502 | cleanup: | 567 | cleanup: |
| 503 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 568 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); |
| 504 | radeon_fence_unref(&work->fence); | 569 | radeon_fence_unref(&work->fence); |
| 505 | kfree(work); | 570 | kfree(work); |
| 506 | up_read(&rdev->exclusive_lock); | ||
| 507 | } | ||
| 508 | |||
| 509 | static int radeon_crtc_page_flip(struct drm_crtc *crtc, | ||
| 510 | struct drm_framebuffer *fb, | ||
| 511 | struct drm_pending_vblank_event *event, | ||
| 512 | uint32_t page_flip_flags) | ||
| 513 | { | ||
| 514 | struct drm_device *dev = crtc->dev; | ||
| 515 | struct radeon_device *rdev = dev->dev_private; | ||
| 516 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
| 517 | struct radeon_framebuffer *old_radeon_fb; | ||
| 518 | struct radeon_framebuffer *new_radeon_fb; | ||
| 519 | struct drm_gem_object *obj; | ||
| 520 | struct radeon_flip_work *work; | ||
| 521 | unsigned long flags; | ||
| 522 | |||
| 523 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
| 524 | if (work == NULL) | ||
| 525 | return -ENOMEM; | ||
| 526 | |||
| 527 | INIT_WORK(&work->flip_work, radeon_flip_work_func); | ||
| 528 | INIT_WORK(&work->unpin_work, radeon_unpin_work_func); | ||
| 529 | |||
| 530 | work->rdev = rdev; | ||
| 531 | work->crtc_id = radeon_crtc->crtc_id; | ||
| 532 | work->fb = fb; | ||
| 533 | work->event = event; | ||
| 534 | |||
| 535 | /* schedule unpin of the old buffer */ | ||
| 536 | old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); | ||
| 537 | obj = old_radeon_fb->obj; | ||
| 538 | |||
| 539 | /* take a reference to the old object */ | ||
| 540 | drm_gem_object_reference(obj); | ||
| 541 | work->old_rbo = gem_to_radeon_bo(obj); | ||
| 542 | |||
| 543 | new_radeon_fb = to_radeon_framebuffer(fb); | ||
| 544 | obj = new_radeon_fb->obj; | ||
| 545 | work->new_rbo = gem_to_radeon_bo(obj); | ||
| 546 | |||
| 547 | spin_lock(&work->new_rbo->tbo.bdev->fence_lock); | ||
| 548 | if (work->new_rbo->tbo.sync_obj) | ||
| 549 | work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj); | ||
| 550 | spin_unlock(&work->new_rbo->tbo.bdev->fence_lock); | ||
| 551 | |||
| 552 | /* We borrow the event spin lock for protecting flip_work */ | ||
| 553 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
| 554 | 571 | ||
| 555 | if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { | 572 | return r; |
| 556 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
| 557 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 558 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | ||
| 559 | radeon_fence_unref(&work->fence); | ||
| 560 | kfree(work); | ||
| 561 | return -EBUSY; | ||
| 562 | } | ||
| 563 | radeon_crtc->flip_status = RADEON_FLIP_PENDING; | ||
| 564 | radeon_crtc->flip_work = work; | ||
| 565 | |||
| 566 | /* update crtc fb */ | ||
| 567 | crtc->primary->fb = fb; | ||
| 568 | |||
| 569 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
| 570 | |||
| 571 | queue_work(radeon_crtc->flip_queue, &work->flip_work); | ||
| 572 | |||
| 573 | return 0; | ||
| 574 | } | 573 | } |
| 575 | 574 | ||
| 576 | static int | 575 | static int |
| @@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
| 830 | struct radeon_device *rdev = dev->dev_private; | 829 | struct radeon_device *rdev = dev->dev_private; |
| 831 | int ret = 0; | 830 | int ret = 0; |
| 832 | 831 | ||
| 832 | /* don't leak the edid if we already fetched it in detect() */ | ||
| 833 | if (radeon_connector->edid) | ||
| 834 | goto got_edid; | ||
| 835 | |||
| 833 | /* on hw with routers, select right port */ | 836 | /* on hw with routers, select right port */ |
| 834 | if (radeon_connector->router.ddc_valid) | 837 | if (radeon_connector->router.ddc_valid) |
| 835 | radeon_router_select_ddc_port(radeon_connector); | 838 | radeon_router_select_ddc_port(radeon_connector); |
| @@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
| 868 | radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); | 871 | radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); |
| 869 | } | 872 | } |
| 870 | if (radeon_connector->edid) { | 873 | if (radeon_connector->edid) { |
| 874 | got_edid: | ||
| 871 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); | 875 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); |
| 872 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); | 876 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); |
| 873 | drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); | 877 | drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 237dd29d9f1c..3e21e869015f 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) | |||
| 406 | for (i = 0; i < rdev->num_crtc; i++) { | 406 | for (i = 0; i < rdev->num_crtc; i++) { |
| 407 | if (save->crtc_enabled[i]) { | 407 | if (save->crtc_enabled[i]) { |
| 408 | tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); | 408 | tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); |
| 409 | if ((tmp & 0x3) != 0) { | 409 | if ((tmp & 0x7) != 3) { |
| 410 | tmp &= ~0x3; | 410 | tmp &= ~0x7; |
| 411 | tmp |= 0x3; | ||
| 411 | WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); | 412 | WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); |
| 412 | } | 413 | } |
| 413 | tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); | 414 | tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); |
