aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-11-25 19:45:03 -0500
committerDave Airlie <airlied@redhat.com>2010-11-25 19:45:03 -0500
commite536fb6f9dc3908ad4c642414002ec9daf590ed7 (patch)
tree5c7e1b14654ac246bbbc3be15e27b609cfef4c13 /drivers/gpu
parenta235e4c9302509ac5956bbbffa22eb5ed9fcdc54 (diff)
parentba84cd1f2b5dd49bda9300c5a11373f7e14c3c66 (diff)
Merge remote branch 'intel/drm-intel-fixes' of /ssd/git/drm-next into drm-fixes
* 'intel/drm-intel-fixes' of /ssd/git/drm-next: drm/i915/sdvo: Always add a 30ms delay to make SDVO TV detection reliable MAINTAINERS: INTEL DRM DRIVERS list (intel-gfx) is subscribers-only drm/i915/sdvo: Always fallback to querying the shared DDC line drm/i915: Handle pagefaults in execbuffer user relocations drm/i915/sdvo: Only enable HDMI encodings only if the commandset is supported drm/i915: Only save/restore cursor regs if !KMS drm/i915: Prevent integer overflow when validating the execbuffer
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c470
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c40
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c84
3 files changed, 346 insertions, 248 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 17b1cba3b5f..92b097dbe4f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3254,192 +3254,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3254 return 0; 3254 return 0;
3255} 3255}
3256 3256
3257/**
3258 * Pin an object to the GTT and evaluate the relocations landing in it.
3259 */
3260static int 3257static int
3261i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, 3258i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
3262 struct drm_file *file_priv, 3259 struct drm_file *file_priv,
3263 struct drm_i915_gem_exec_object2 *entry) 3260 struct drm_i915_gem_exec_object2 *entry,
3261 struct drm_i915_gem_relocation_entry *reloc)
3264{ 3262{
3265 struct drm_device *dev = obj->base.dev; 3263 struct drm_device *dev = obj->base.dev;
3266 drm_i915_private_t *dev_priv = dev->dev_private; 3264 struct drm_gem_object *target_obj;
3267 struct drm_i915_gem_relocation_entry __user *user_relocs; 3265 uint32_t target_offset;
3268 struct drm_gem_object *target_obj = NULL; 3266 int ret = -EINVAL;
3269 uint32_t target_handle = 0;
3270 int i, ret = 0;
3271 3267
3272 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 3268 target_obj = drm_gem_object_lookup(dev, file_priv,
3273 for (i = 0; i < entry->relocation_count; i++) { 3269 reloc->target_handle);
3274 struct drm_i915_gem_relocation_entry reloc; 3270 if (target_obj == NULL)
3275 uint32_t target_offset; 3271 return -ENOENT;
3276 3272
3277 if (__copy_from_user_inatomic(&reloc, 3273 target_offset = to_intel_bo(target_obj)->gtt_offset;
3278 user_relocs+i,
3279 sizeof(reloc))) {
3280 ret = -EFAULT;
3281 break;
3282 }
3283 3274
3284 if (reloc.target_handle != target_handle) { 3275#if WATCH_RELOC
3285 drm_gem_object_unreference(target_obj); 3276 DRM_INFO("%s: obj %p offset %08x target %d "
3277 "read %08x write %08x gtt %08x "
3278 "presumed %08x delta %08x\n",
3279 __func__,
3280 obj,
3281 (int) reloc->offset,
3282 (int) reloc->target_handle,
3283 (int) reloc->read_domains,
3284 (int) reloc->write_domain,
3285 (int) target_offset,
3286 (int) reloc->presumed_offset,
3287 reloc->delta);
3288#endif
3286 3289
3287 target_obj = drm_gem_object_lookup(dev, file_priv, 3290 /* The target buffer should have appeared before us in the
3288 reloc.target_handle); 3291 * exec_object list, so it should have a GTT space bound by now.
3289 if (target_obj == NULL) { 3292 */
3290 ret = -ENOENT; 3293 if (target_offset == 0) {
3291 break; 3294 DRM_ERROR("No GTT space found for object %d\n",
3292 } 3295 reloc->target_handle);
3296 goto err;
3297 }
3293 3298
3294 target_handle = reloc.target_handle; 3299 /* Validate that the target is in a valid r/w GPU domain */
3295 } 3300 if (reloc->write_domain & (reloc->write_domain - 1)) {
3296 target_offset = to_intel_bo(target_obj)->gtt_offset; 3301 DRM_ERROR("reloc with multiple write domains: "
3302 "obj %p target %d offset %d "
3303 "read %08x write %08x",
3304 obj, reloc->target_handle,
3305 (int) reloc->offset,
3306 reloc->read_domains,
3307 reloc->write_domain);
3308 goto err;
3309 }
3310 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3311 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3312 DRM_ERROR("reloc with read/write CPU domains: "
3313 "obj %p target %d offset %d "
3314 "read %08x write %08x",
3315 obj, reloc->target_handle,
3316 (int) reloc->offset,
3317 reloc->read_domains,
3318 reloc->write_domain);
3319 goto err;
3320 }
3321 if (reloc->write_domain && target_obj->pending_write_domain &&
3322 reloc->write_domain != target_obj->pending_write_domain) {
3323 DRM_ERROR("Write domain conflict: "
3324 "obj %p target %d offset %d "
3325 "new %08x old %08x\n",
3326 obj, reloc->target_handle,
3327 (int) reloc->offset,
3328 reloc->write_domain,
3329 target_obj->pending_write_domain);
3330 goto err;
3331 }
3297 3332
3298#if WATCH_RELOC 3333 target_obj->pending_read_domains |= reloc->read_domains;
3299 DRM_INFO("%s: obj %p offset %08x target %d " 3334 target_obj->pending_write_domain |= reloc->write_domain;
3300 "read %08x write %08x gtt %08x "
3301 "presumed %08x delta %08x\n",
3302 __func__,
3303 obj,
3304 (int) reloc.offset,
3305 (int) reloc.target_handle,
3306 (int) reloc.read_domains,
3307 (int) reloc.write_domain,
3308 (int) target_offset,
3309 (int) reloc.presumed_offset,
3310 reloc.delta);
3311#endif
3312 3335
3313 /* The target buffer should have appeared before us in the 3336 /* If the relocation already has the right value in it, no
3314 * exec_object list, so it should have a GTT space bound by now. 3337 * more work needs to be done.
3315 */ 3338 */
3316 if (target_offset == 0) { 3339 if (target_offset == reloc->presumed_offset)
3317 DRM_ERROR("No GTT space found for object %d\n", 3340 goto out;
3318 reloc.target_handle);
3319 ret = -EINVAL;
3320 break;
3321 }
3322 3341
3323 /* Validate that the target is in a valid r/w GPU domain */ 3342 /* Check that the relocation address is valid... */
3324 if (reloc.write_domain & (reloc.write_domain - 1)) { 3343 if (reloc->offset > obj->base.size - 4) {
3325 DRM_ERROR("reloc with multiple write domains: " 3344 DRM_ERROR("Relocation beyond object bounds: "
3326 "obj %p target %d offset %d " 3345 "obj %p target %d offset %d size %d.\n",
3327 "read %08x write %08x", 3346 obj, reloc->target_handle,
3328 obj, reloc.target_handle, 3347 (int) reloc->offset,
3329 (int) reloc.offset, 3348 (int) obj->base.size);
3330 reloc.read_domains, 3349 goto err;
3331 reloc.write_domain); 3350 }
3332 ret = -EINVAL; 3351 if (reloc->offset & 3) {
3333 break; 3352 DRM_ERROR("Relocation not 4-byte aligned: "
3334 } 3353 "obj %p target %d offset %d.\n",
3335 if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 3354 obj, reloc->target_handle,
3336 reloc.read_domains & I915_GEM_DOMAIN_CPU) { 3355 (int) reloc->offset);
3337 DRM_ERROR("reloc with read/write CPU domains: " 3356 goto err;
3338 "obj %p target %d offset %d " 3357 }
3339 "read %08x write %08x",
3340 obj, reloc.target_handle,
3341 (int) reloc.offset,
3342 reloc.read_domains,
3343 reloc.write_domain);
3344 ret = -EINVAL;
3345 break;
3346 }
3347 if (reloc.write_domain && target_obj->pending_write_domain &&
3348 reloc.write_domain != target_obj->pending_write_domain) {
3349 DRM_ERROR("Write domain conflict: "
3350 "obj %p target %d offset %d "
3351 "new %08x old %08x\n",
3352 obj, reloc.target_handle,
3353 (int) reloc.offset,
3354 reloc.write_domain,
3355 target_obj->pending_write_domain);
3356 ret = -EINVAL;
3357 break;
3358 }
3359 3358
3360 target_obj->pending_read_domains |= reloc.read_domains; 3359 /* and points to somewhere within the target object. */
3361 target_obj->pending_write_domain |= reloc.write_domain; 3360 if (reloc->delta >= target_obj->size) {
3361 DRM_ERROR("Relocation beyond target object bounds: "
3362 "obj %p target %d delta %d size %d.\n",
3363 obj, reloc->target_handle,
3364 (int) reloc->delta,
3365 (int) target_obj->size);
3366 goto err;
3367 }
3362 3368
3363 /* If the relocation already has the right value in it, no 3369 reloc->delta += target_offset;
3364 * more work needs to be done. 3370 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3365 */ 3371 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
3366 if (target_offset == reloc.presumed_offset) 3372 char *vaddr;
3367 continue;
3368 3373
3369 /* Check that the relocation address is valid... */ 3374 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
3370 if (reloc.offset > obj->base.size - 4) { 3375 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
3371 DRM_ERROR("Relocation beyond object bounds: " 3376 kunmap_atomic(vaddr);
3372 "obj %p target %d offset %d size %d.\n", 3377 } else {
3373 obj, reloc.target_handle, 3378 struct drm_i915_private *dev_priv = dev->dev_private;
3374 (int) reloc.offset, (int) obj->base.size); 3379 uint32_t __iomem *reloc_entry;
3375 ret = -EINVAL; 3380 void __iomem *reloc_page;
3376 break;
3377 }
3378 if (reloc.offset & 3) {
3379 DRM_ERROR("Relocation not 4-byte aligned: "
3380 "obj %p target %d offset %d.\n",
3381 obj, reloc.target_handle,
3382 (int) reloc.offset);
3383 ret = -EINVAL;
3384 break;
3385 }
3386 3381
3387 /* and points to somewhere within the target object. */ 3382 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3388 if (reloc.delta >= target_obj->size) { 3383 if (ret)
3389 DRM_ERROR("Relocation beyond target object bounds: " 3384 goto err;
3390 "obj %p target %d delta %d size %d.\n",
3391 obj, reloc.target_handle,
3392 (int) reloc.delta, (int) target_obj->size);
3393 ret = -EINVAL;
3394 break;
3395 }
3396 3385
3397 reloc.delta += target_offset; 3386 /* Map the page containing the relocation we're going to perform. */
3398 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { 3387 reloc->offset += obj->gtt_offset;
3399 uint32_t page_offset = reloc.offset & ~PAGE_MASK; 3388 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3400 char *vaddr; 3389 reloc->offset & PAGE_MASK);
3390 reloc_entry = (uint32_t __iomem *)
3391 (reloc_page + (reloc->offset & ~PAGE_MASK));
3392 iowrite32(reloc->delta, reloc_entry);
3393 io_mapping_unmap_atomic(reloc_page);
3394 }
3401 3395
3402 vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); 3396 /* and update the user's relocation entry */
3403 *(uint32_t *)(vaddr + page_offset) = reloc.delta; 3397 reloc->presumed_offset = target_offset;
3404 kunmap_atomic(vaddr);
3405 } else {
3406 uint32_t __iomem *reloc_entry;
3407 void __iomem *reloc_page;
3408 3398
3409 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); 3399out:
3410 if (ret) 3400 ret = 0;
3411 break; 3401err:
3402 drm_gem_object_unreference(target_obj);
3403 return ret;
3404}
3412 3405
3413 /* Map the page containing the relocation we're going to perform. */ 3406static int
3414 reloc.offset += obj->gtt_offset; 3407i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
3415 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 3408 struct drm_file *file_priv,
3416 reloc.offset & PAGE_MASK); 3409 struct drm_i915_gem_exec_object2 *entry)
3417 reloc_entry = (uint32_t __iomem *) 3410{
3418 (reloc_page + (reloc.offset & ~PAGE_MASK)); 3411 struct drm_i915_gem_relocation_entry __user *user_relocs;
3419 iowrite32(reloc.delta, reloc_entry); 3412 int i, ret;
3420 io_mapping_unmap_atomic(reloc_page); 3413
3421 } 3414 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3415 for (i = 0; i < entry->relocation_count; i++) {
3416 struct drm_i915_gem_relocation_entry reloc;
3417
3418 if (__copy_from_user_inatomic(&reloc,
3419 user_relocs+i,
3420 sizeof(reloc)))
3421 return -EFAULT;
3422
3423 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
3424 if (ret)
3425 return ret;
3422 3426
3423 /* and update the user's relocation entry */
3424 reloc.presumed_offset = target_offset;
3425 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, 3427 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3426 &reloc.presumed_offset, 3428 &reloc.presumed_offset,
3427 sizeof(reloc.presumed_offset))) { 3429 sizeof(reloc.presumed_offset)))
3428 ret = -EFAULT; 3430 return -EFAULT;
3429 break;
3430 }
3431 } 3431 }
3432 3432
3433 drm_gem_object_unreference(target_obj); 3433 return 0;
3434 return ret;
3435} 3434}
3436 3435
3437static int 3436static int
3438i915_gem_execbuffer_pin(struct drm_device *dev, 3437i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
3439 struct drm_file *file, 3438 struct drm_file *file_priv,
3440 struct drm_gem_object **object_list, 3439 struct drm_i915_gem_exec_object2 *entry,
3441 struct drm_i915_gem_exec_object2 *exec_list, 3440 struct drm_i915_gem_relocation_entry *relocs)
3442 int count) 3441{
3442 int i, ret;
3443
3444 for (i = 0; i < entry->relocation_count; i++) {
3445 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
3446 if (ret)
3447 return ret;
3448 }
3449
3450 return 0;
3451}
3452
3453static int
3454i915_gem_execbuffer_relocate(struct drm_device *dev,
3455 struct drm_file *file,
3456 struct drm_gem_object **object_list,
3457 struct drm_i915_gem_exec_object2 *exec_list,
3458 int count)
3459{
3460 int i, ret;
3461
3462 for (i = 0; i < count; i++) {
3463 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3464 obj->base.pending_read_domains = 0;
3465 obj->base.pending_write_domain = 0;
3466 ret = i915_gem_execbuffer_relocate_object(obj, file,
3467 &exec_list[i]);
3468 if (ret)
3469 return ret;
3470 }
3471
3472 return 0;
3473}
3474
3475static int
3476i915_gem_execbuffer_reserve(struct drm_device *dev,
3477 struct drm_file *file,
3478 struct drm_gem_object **object_list,
3479 struct drm_i915_gem_exec_object2 *exec_list,
3480 int count)
3443{ 3481{
3444 struct drm_i915_private *dev_priv = dev->dev_private; 3482 struct drm_i915_private *dev_priv = dev->dev_private;
3445 int ret, i, retry; 3483 int ret, i, retry;
@@ -3502,6 +3540,87 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3502} 3540}
3503 3541
3504static int 3542static int
3543i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3544 struct drm_file *file,
3545 struct drm_gem_object **object_list,
3546 struct drm_i915_gem_exec_object2 *exec_list,
3547 int count)
3548{
3549 struct drm_i915_gem_relocation_entry *reloc;
3550 int i, total, ret;
3551
3552 for (i = 0; i < count; i++) {
3553 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3554 obj->in_execbuffer = false;
3555 }
3556
3557 mutex_unlock(&dev->struct_mutex);
3558
3559 total = 0;
3560 for (i = 0; i < count; i++)
3561 total += exec_list[i].relocation_count;
3562
3563 reloc = drm_malloc_ab(total, sizeof(*reloc));
3564 if (reloc == NULL) {
3565 mutex_lock(&dev->struct_mutex);
3566 return -ENOMEM;
3567 }
3568
3569 total = 0;
3570 for (i = 0; i < count; i++) {
3571 struct drm_i915_gem_relocation_entry __user *user_relocs;
3572
3573 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3574
3575 if (copy_from_user(reloc+total, user_relocs,
3576 exec_list[i].relocation_count *
3577 sizeof(*reloc))) {
3578 ret = -EFAULT;
3579 mutex_lock(&dev->struct_mutex);
3580 goto err;
3581 }
3582
3583 total += exec_list[i].relocation_count;
3584 }
3585
3586 ret = i915_mutex_lock_interruptible(dev);
3587 if (ret) {
3588 mutex_lock(&dev->struct_mutex);
3589 goto err;
3590 }
3591
3592 ret = i915_gem_execbuffer_reserve(dev, file,
3593 object_list, exec_list,
3594 count);
3595 if (ret)
3596 goto err;
3597
3598 total = 0;
3599 for (i = 0; i < count; i++) {
3600 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3601 obj->base.pending_read_domains = 0;
3602 obj->base.pending_write_domain = 0;
3603 ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
3604 &exec_list[i],
3605 reloc + total);
3606 if (ret)
3607 goto err;
3608
3609 total += exec_list[i].relocation_count;
3610 }
3611
3612 /* Leave the user relocations as are, this is the painfully slow path,
3613 * and we want to avoid the complication of dropping the lock whilst
3614 * having buffers reserved in the aperture and so causing spurious
3615 * ENOSPC for random operations.
3616 */
3617
3618err:
3619 drm_free_large(reloc);
3620 return ret;
3621}
3622
3623static int
3505i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, 3624i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3506 struct drm_file *file, 3625 struct drm_file *file,
3507 struct intel_ring_buffer *ring, 3626 struct intel_ring_buffer *ring,
@@ -3630,8 +3749,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3630 3749
3631 for (i = 0; i < count; i++) { 3750 for (i = 0; i < count; i++) {
3632 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 3751 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3633 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); 3752 int length; /* limited by fault_in_pages_readable() */
3634 3753
3754 /* First check for malicious input causing overflow */
3755 if (exec[i].relocation_count >
3756 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
3757 return -EINVAL;
3758
3759 length = exec[i].relocation_count *
3760 sizeof(struct drm_i915_gem_relocation_entry);
3635 if (!access_ok(VERIFY_READ, ptr, length)) 3761 if (!access_ok(VERIFY_READ, ptr, length))
3636 return -EFAULT; 3762 return -EFAULT;
3637 3763
@@ -3774,18 +3900,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3774 } 3900 }
3775 3901
3776 /* Move the objects en-masse into the GTT, evicting if necessary. */ 3902 /* Move the objects en-masse into the GTT, evicting if necessary. */
3777 ret = i915_gem_execbuffer_pin(dev, file, 3903 ret = i915_gem_execbuffer_reserve(dev, file,
3778 object_list, exec_list, 3904 object_list, exec_list,
3779 args->buffer_count); 3905 args->buffer_count);
3780 if (ret) 3906 if (ret)
3781 goto err; 3907 goto err;
3782 3908
3783 /* The objects are in their final locations, apply the relocations. */ 3909 /* The objects are in their final locations, apply the relocations. */
3784 for (i = 0; i < args->buffer_count; i++) { 3910 ret = i915_gem_execbuffer_relocate(dev, file,
3785 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); 3911 object_list, exec_list,
3786 obj->base.pending_read_domains = 0; 3912 args->buffer_count);
3787 obj->base.pending_write_domain = 0; 3913 if (ret) {
3788 ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); 3914 if (ret == -EFAULT) {
3915 ret = i915_gem_execbuffer_relocate_slow(dev, file,
3916 object_list,
3917 exec_list,
3918 args->buffer_count);
3919 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
3920 }
3789 if (ret) 3921 if (ret)
3790 goto err; 3922 goto err;
3791 } 3923 }
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 454c064f8ef..42729d25da5 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 /* Cursor state */
243 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
244 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
245 dev_priv->saveCURABASE = I915_READ(CURABASE);
246 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
247 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
248 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
249 if (IS_GEN2(dev))
250 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
251
242 if (HAS_PCH_SPLIT(dev)) { 252 if (HAS_PCH_SPLIT(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 253 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 254 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
529 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 539 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
530 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 540 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
531 541
542 /* Cursor state */
543 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
544 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
545 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
546 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
547 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
548 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
549 if (IS_GEN2(dev))
550 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
551
532 return; 552 return;
533} 553}
534 554
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
543 /* Don't save them in KMS mode */ 563 /* Don't save them in KMS mode */
544 i915_save_modeset_reg(dev); 564 i915_save_modeset_reg(dev);
545 565
546 /* Cursor state */
547 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
548 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
549 dev_priv->saveCURABASE = I915_READ(CURABASE);
550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
551 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
552 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
553 if (IS_GEN2(dev))
554 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
555
556 /* CRT state */ 566 /* CRT state */
557 if (HAS_PCH_SPLIT(dev)) { 567 if (HAS_PCH_SPLIT(dev)) {
558 dev_priv->saveADPA = I915_READ(PCH_ADPA); 568 dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
657 /* Don't restore them in KMS mode */ 667 /* Don't restore them in KMS mode */
658 i915_restore_modeset_reg(dev); 668 i915_restore_modeset_reg(dev);
659 669
660 /* Cursor state */
661 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
662 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
663 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
667 if (IS_GEN2(dev))
668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
669
670 /* CRT state */ 670 /* CRT state */
671 if (HAS_PCH_SPLIT(dev)) 671 if (HAS_PCH_SPLIT(dev))
672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd..d97e6cb52d3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
107 * This is set if we treat the device as HDMI, instead of DVI. 107 * This is set if we treat the device as HDMI, instead of DVI.
108 */ 108 */
109 bool is_hdmi; 109 bool is_hdmi;
110 bool has_audio; 110 bool has_hdmi_monitor;
111 bool has_hdmi_audio;
111 112
112 /** 113 /**
113 * This is set if we detect output of sdvo device as LVDS and 114 * This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1023 if (!intel_sdvo_set_target_input(intel_sdvo)) 1024 if (!intel_sdvo_set_target_input(intel_sdvo))
1024 return; 1025 return;
1025 1026
1026 if (intel_sdvo->is_hdmi && 1027 if (intel_sdvo->has_hdmi_monitor &&
1027 !intel_sdvo_set_avi_infoframe(intel_sdvo)) 1028 !intel_sdvo_set_avi_infoframe(intel_sdvo))
1028 return; 1029 return;
1029 1030
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1063 } 1064 }
1064 if (intel_crtc->pipe == 1) 1065 if (intel_crtc->pipe == 1)
1065 sdvox |= SDVO_PIPE_B_SELECT; 1066 sdvox |= SDVO_PIPE_B_SELECT;
1066 if (intel_sdvo->has_audio) 1067 if (intel_sdvo->has_hdmi_audio)
1067 sdvox |= SDVO_AUDIO_ENABLE; 1068 sdvox |= SDVO_AUDIO_ENABLE;
1068 1069
1069 if (INTEL_INFO(dev)->gen >= 4) { 1070 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
1295 return drm_get_edid(connector, &sdvo->ddc); 1296 return drm_get_edid(connector, &sdvo->ddc);
1296} 1297}
1297 1298
1298static struct drm_connector *
1299intel_find_analog_connector(struct drm_device *dev)
1300{
1301 struct drm_connector *connector;
1302 struct intel_sdvo *encoder;
1303
1304 list_for_each_entry(encoder,
1305 &dev->mode_config.encoder_list,
1306 base.base.head) {
1307 if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
1308 list_for_each_entry(connector,
1309 &dev->mode_config.connector_list,
1310 head) {
1311 if (&encoder->base ==
1312 intel_attached_encoder(connector))
1313 return connector;
1314 }
1315 }
1316 }
1317
1318 return NULL;
1319}
1320
1321static int
1322intel_analog_is_connected(struct drm_device *dev)
1323{
1324 struct drm_connector *analog_connector;
1325
1326 analog_connector = intel_find_analog_connector(dev);
1327 if (!analog_connector)
1328 return false;
1329
1330 if (analog_connector->funcs->detect(analog_connector, false) ==
1331 connector_status_disconnected)
1332 return false;
1333
1334 return true;
1335}
1336
1337/* Mac mini hack -- use the same DDC as the analog connector */ 1299/* Mac mini hack -- use the same DDC as the analog connector */
1338static struct edid * 1300static struct edid *
1339intel_sdvo_get_analog_edid(struct drm_connector *connector) 1301intel_sdvo_get_analog_edid(struct drm_connector *connector)
1340{ 1302{
1341 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1303 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1342 1304
1343 if (!intel_analog_is_connected(connector->dev)) 1305 return drm_get_edid(connector,
1344 return NULL; 1306 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1345
1346 return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1347} 1307}
1348 1308
1349enum drm_connector_status 1309enum drm_connector_status
@@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1388 /* DDC bus is shared, match EDID to connector type */ 1348 /* DDC bus is shared, match EDID to connector type */
1389 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1349 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1390 status = connector_status_connected; 1350 status = connector_status_connected;
1391 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); 1351 if (intel_sdvo->is_hdmi) {
1392 intel_sdvo->has_audio = drm_detect_monitor_audio(edid); 1352 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1353 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1354 }
1393 } 1355 }
1394 connector->display_info.raw_edid = NULL; 1356 connector->display_info.raw_edid = NULL;
1395 kfree(edid); 1357 kfree(edid);
@@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1398 if (status == connector_status_connected) { 1360 if (status == connector_status_connected) {
1399 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1361 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1400 if (intel_sdvo_connector->force_audio) 1362 if (intel_sdvo_connector->force_audio)
1401 intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; 1363 intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
1402 } 1364 }
1403 1365
1404 return status; 1366 return status;
@@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1415 if (!intel_sdvo_write_cmd(intel_sdvo, 1377 if (!intel_sdvo_write_cmd(intel_sdvo,
1416 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1378 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1417 return connector_status_unknown; 1379 return connector_status_unknown;
1418 if (intel_sdvo->is_tv) { 1380
1419 /* add 30ms delay when the output type is SDVO-TV */ 1381 /* add 30ms delay when the output type might be TV */
1382 if (intel_sdvo->caps.output_flags &
1383 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1420 mdelay(30); 1384 mdelay(30);
1421 } 1385
1422 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1386 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1423 return connector_status_unknown; 1387 return connector_status_unknown;
1424 1388
@@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1472 edid = intel_sdvo_get_analog_edid(connector); 1436 edid = intel_sdvo_get_analog_edid(connector);
1473 1437
1474 if (edid != NULL) { 1438 if (edid != NULL) {
1475 drm_mode_connector_update_edid_property(connector, edid); 1439 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1476 drm_add_edid_modes(connector, edid); 1440 drm_mode_connector_update_edid_property(connector, edid);
1441 drm_add_edid_modes(connector, edid);
1442 }
1477 connector->display_info.raw_edid = NULL; 1443 connector->display_info.raw_edid = NULL;
1478 kfree(edid); 1444 kfree(edid);
1479 } 1445 }
@@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
1713 1679
1714 intel_sdvo_connector->force_audio = val; 1680 intel_sdvo_connector->force_audio = val;
1715 1681
1716 if (val > 0 && intel_sdvo->has_audio) 1682 if (val > 0 && intel_sdvo->has_hdmi_audio)
1717 return 0; 1683 return 0;
1718 if (val < 0 && !intel_sdvo->has_audio) 1684 if (val < 0 && !intel_sdvo->has_hdmi_audio)
1719 return 0; 1685 return 0;
1720 1686
1721 intel_sdvo->has_audio = val > 0; 1687 intel_sdvo->has_hdmi_audio = val > 0;
1722 goto done; 1688 goto done;
1723 } 1689 }
1724 1690
@@ -2070,6 +2036,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2070 intel_sdvo_set_colorimetry(intel_sdvo, 2036 intel_sdvo_set_colorimetry(intel_sdvo,
2071 SDVO_COLORIMETRY_RGB256); 2037 SDVO_COLORIMETRY_RGB256);
2072 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2038 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2039
2040 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2073 intel_sdvo->is_hdmi = true; 2041 intel_sdvo->is_hdmi = true;
2074 } 2042 }
2075 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2043 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -2077,8 +2045,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2077 2045
2078 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2046 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2079 2047
2080 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2081
2082 return true; 2048 return true;
2083} 2049}
2084 2050