diff options
author | Dave Airlie <airlied@gmail.com> | 2013-01-10 16:47:25 -0500 |
---|---|---|
committer | Dave Airlie <airlied@gmail.com> | 2013-01-10 16:52:48 -0500 |
commit | 82ba789f48de669fd0bbc84c326f07571d078572 (patch) | |
tree | 8c99366d7e66497b2ee4cfa35b8eb91d253fc541 /drivers/gpu | |
parent | ecf02a607bd801e742d7bb35c6e40f7ca15edf03 (diff) | |
parent | 93927ca52a55c23e0a6a305e7e9082e8411ac9fa (diff) |
Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel
Daniel writes:
"Pretty much all just major fixes:
- 2 pieces of duct-tape for the ilk bug.
- Sprite regression fixes from Chris.
- OOPS fix for a div-by-zero from Chris, regression due to the modeset
rework in 3.7, now brought to light by a benign change in 3.8.
- Fix interrupted bo pinning, used to work around CS coherency issues on
i830/i845 (kernel also has a w/a newly in 3.8, but pinning is more efficient if
possible)."
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_mm.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sprite.c | 10 |
6 files changed, 80 insertions, 66 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 2bf9670ba29b..2aa331499f81 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
221 | 221 | ||
222 | BUG_ON(!hole_node->hole_follows || node->allocated); | 222 | BUG_ON(!hole_node->hole_follows || node->allocated); |
223 | 223 | ||
224 | if (mm->color_adjust) | ||
225 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); | ||
226 | |||
227 | if (adj_start < start) | 224 | if (adj_start < start) |
228 | adj_start = start; | 225 | adj_start = start; |
226 | if (adj_end > end) | ||
227 | adj_end = end; | ||
228 | |||
229 | if (mm->color_adjust) | ||
230 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); | ||
229 | 231 | ||
230 | if (alignment) { | 232 | if (alignment) { |
231 | unsigned tmp = adj_start % alignment; | 233 | unsigned tmp = adj_start % alignment; |
@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm, | |||
506 | mm->scan_size = size; | 508 | mm->scan_size = size; |
507 | mm->scanned_blocks = 0; | 509 | mm->scanned_blocks = 0; |
508 | mm->scan_hit_start = 0; | 510 | mm->scan_hit_start = 0; |
509 | mm->scan_hit_size = 0; | 511 | mm->scan_hit_end = 0; |
510 | mm->scan_check_range = 0; | 512 | mm->scan_check_range = 0; |
511 | mm->prev_scanned_node = NULL; | 513 | mm->prev_scanned_node = NULL; |
512 | } | 514 | } |
@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, | |||
533 | mm->scan_size = size; | 535 | mm->scan_size = size; |
534 | mm->scanned_blocks = 0; | 536 | mm->scanned_blocks = 0; |
535 | mm->scan_hit_start = 0; | 537 | mm->scan_hit_start = 0; |
536 | mm->scan_hit_size = 0; | 538 | mm->scan_hit_end = 0; |
537 | mm->scan_start = start; | 539 | mm->scan_start = start; |
538 | mm->scan_end = end; | 540 | mm->scan_end = end; |
539 | mm->scan_check_range = 1; | 541 | mm->scan_check_range = 1; |
@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) | |||
552 | struct drm_mm *mm = node->mm; | 554 | struct drm_mm *mm = node->mm; |
553 | struct drm_mm_node *prev_node; | 555 | struct drm_mm_node *prev_node; |
554 | unsigned long hole_start, hole_end; | 556 | unsigned long hole_start, hole_end; |
555 | unsigned long adj_start; | 557 | unsigned long adj_start, adj_end; |
556 | unsigned long adj_end; | ||
557 | 558 | ||
558 | mm->scanned_blocks++; | 559 | mm->scanned_blocks++; |
559 | 560 | ||
@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) | |||
570 | node->node_list.next = &mm->prev_scanned_node->node_list; | 571 | node->node_list.next = &mm->prev_scanned_node->node_list; |
571 | mm->prev_scanned_node = node; | 572 | mm->prev_scanned_node = node; |
572 | 573 | ||
573 | hole_start = drm_mm_hole_node_start(prev_node); | 574 | adj_start = hole_start = drm_mm_hole_node_start(prev_node); |
574 | hole_end = drm_mm_hole_node_end(prev_node); | 575 | adj_end = hole_end = drm_mm_hole_node_end(prev_node); |
575 | |||
576 | adj_start = hole_start; | ||
577 | adj_end = hole_end; | ||
578 | |||
579 | if (mm->color_adjust) | ||
580 | mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end); | ||
581 | 576 | ||
582 | if (mm->scan_check_range) { | 577 | if (mm->scan_check_range) { |
583 | if (adj_start < mm->scan_start) | 578 | if (adj_start < mm->scan_start) |
@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) | |||
586 | adj_end = mm->scan_end; | 581 | adj_end = mm->scan_end; |
587 | } | 582 | } |
588 | 583 | ||
584 | if (mm->color_adjust) | ||
585 | mm->color_adjust(prev_node, mm->scan_color, | ||
586 | &adj_start, &adj_end); | ||
587 | |||
589 | if (check_free_hole(adj_start, adj_end, | 588 | if (check_free_hole(adj_start, adj_end, |
590 | mm->scan_size, mm->scan_alignment)) { | 589 | mm->scan_size, mm->scan_alignment)) { |
591 | mm->scan_hit_start = hole_start; | 590 | mm->scan_hit_start = hole_start; |
592 | mm->scan_hit_size = hole_end; | 591 | mm->scan_hit_end = hole_end; |
593 | |||
594 | return 1; | 592 | return 1; |
595 | } | 593 | } |
596 | 594 | ||
@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node) | |||
626 | node_list); | 624 | node_list); |
627 | 625 | ||
628 | prev_node->hole_follows = node->scanned_preceeds_hole; | 626 | prev_node->hole_follows = node->scanned_preceeds_hole; |
629 | INIT_LIST_HEAD(&node->node_list); | ||
630 | list_add(&node->node_list, &prev_node->node_list); | 627 | list_add(&node->node_list, &prev_node->node_list); |
631 | 628 | ||
632 | /* Only need to check for containement because start&size for the | 629 | return (drm_mm_hole_node_end(node) > mm->scan_hit_start && |
633 | * complete resulting free block (not just the desired part) is | 630 | node->start < mm->scan_hit_end); |
634 | * stored. */ | ||
635 | if (node->start >= mm->scan_hit_start && | ||
636 | node->start + node->size | ||
637 | <= mm->scan_hit_start + mm->scan_hit_size) { | ||
638 | return 1; | ||
639 | } | ||
640 | |||
641 | return 0; | ||
642 | } | 631 | } |
643 | EXPORT_SYMBOL(drm_mm_scan_remove_block); | 632 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
644 | 633 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index da3c82e301b1..8febea6daa08 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) | |||
1717 | } | 1717 | } |
1718 | 1718 | ||
1719 | static long | 1719 | static long |
1720 | i915_gem_purge(struct drm_i915_private *dev_priv, long target) | 1720 | __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, |
1721 | bool purgeable_only) | ||
1721 | { | 1722 | { |
1722 | struct drm_i915_gem_object *obj, *next; | 1723 | struct drm_i915_gem_object *obj, *next; |
1723 | long count = 0; | 1724 | long count = 0; |
@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) | |||
1725 | list_for_each_entry_safe(obj, next, | 1726 | list_for_each_entry_safe(obj, next, |
1726 | &dev_priv->mm.unbound_list, | 1727 | &dev_priv->mm.unbound_list, |
1727 | gtt_list) { | 1728 | gtt_list) { |
1728 | if (i915_gem_object_is_purgeable(obj) && | 1729 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && |
1729 | i915_gem_object_put_pages(obj) == 0) { | 1730 | i915_gem_object_put_pages(obj) == 0) { |
1730 | count += obj->base.size >> PAGE_SHIFT; | 1731 | count += obj->base.size >> PAGE_SHIFT; |
1731 | if (count >= target) | 1732 | if (count >= target) |
@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) | |||
1736 | list_for_each_entry_safe(obj, next, | 1737 | list_for_each_entry_safe(obj, next, |
1737 | &dev_priv->mm.inactive_list, | 1738 | &dev_priv->mm.inactive_list, |
1738 | mm_list) { | 1739 | mm_list) { |
1739 | if (i915_gem_object_is_purgeable(obj) && | 1740 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && |
1740 | i915_gem_object_unbind(obj) == 0 && | 1741 | i915_gem_object_unbind(obj) == 0 && |
1741 | i915_gem_object_put_pages(obj) == 0) { | 1742 | i915_gem_object_put_pages(obj) == 0) { |
1742 | count += obj->base.size >> PAGE_SHIFT; | 1743 | count += obj->base.size >> PAGE_SHIFT; |
@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) | |||
1748 | return count; | 1749 | return count; |
1749 | } | 1750 | } |
1750 | 1751 | ||
1752 | static long | ||
1753 | i915_gem_purge(struct drm_i915_private *dev_priv, long target) | ||
1754 | { | ||
1755 | return __i915_gem_shrink(dev_priv, target, true); | ||
1756 | } | ||
1757 | |||
1751 | static void | 1758 | static void |
1752 | i915_gem_shrink_all(struct drm_i915_private *dev_priv) | 1759 | i915_gem_shrink_all(struct drm_i915_private *dev_priv) |
1753 | { | 1760 | { |
@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
3522 | goto out; | 3529 | goto out; |
3523 | } | 3530 | } |
3524 | 3531 | ||
3525 | obj->user_pin_count++; | 3532 | if (obj->user_pin_count == 0) { |
3526 | obj->pin_filp = file; | ||
3527 | if (obj->user_pin_count == 1) { | ||
3528 | ret = i915_gem_object_pin(obj, args->alignment, true, false); | 3533 | ret = i915_gem_object_pin(obj, args->alignment, true, false); |
3529 | if (ret) | 3534 | if (ret) |
3530 | goto out; | 3535 | goto out; |
3531 | } | 3536 | } |
3532 | 3537 | ||
3538 | obj->user_pin_count++; | ||
3539 | obj->pin_filp = file; | ||
3540 | |||
3533 | /* XXX - flush the CPU caches for pinned objects | 3541 | /* XXX - flush the CPU caches for pinned objects |
3534 | * as the X server doesn't manage domains yet | 3542 | * as the X server doesn't manage domains yet |
3535 | */ | 3543 | */ |
@@ -4395,6 +4403,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | |||
4395 | if (nr_to_scan) { | 4403 | if (nr_to_scan) { |
4396 | nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); | 4404 | nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); |
4397 | if (nr_to_scan > 0) | 4405 | if (nr_to_scan > 0) |
4406 | nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan, | ||
4407 | false); | ||
4408 | if (nr_to_scan > 0) | ||
4398 | i915_gem_shrink_all(dev_priv); | 4409 | i915_gem_shrink_all(dev_priv); |
4399 | } | 4410 | } |
4400 | 4411 | ||
@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | |||
4402 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) | 4413 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) |
4403 | if (obj->pages_pin_count == 0) | 4414 | if (obj->pages_pin_count == 0) |
4404 | cnt += obj->base.size >> PAGE_SHIFT; | 4415 | cnt += obj->base.size >> PAGE_SHIFT; |
4405 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) | 4416 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) |
4406 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) | 4417 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) |
4407 | cnt += obj->base.size >> PAGE_SHIFT; | 4418 | cnt += obj->base.size >> PAGE_SHIFT; |
4408 | 4419 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a9fb046b94a1..da1ad9c80bb5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
8598 | { | 8598 | { |
8599 | int ret; | 8599 | int ret; |
8600 | 8600 | ||
8601 | if (obj->tiling_mode == I915_TILING_Y) | 8601 | if (obj->tiling_mode == I915_TILING_Y) { |
8602 | DRM_DEBUG("hardware does not support tiling Y\n"); | ||
8602 | return -EINVAL; | 8603 | return -EINVAL; |
8604 | } | ||
8603 | 8605 | ||
8604 | if (mode_cmd->pitches[0] & 63) | 8606 | if (mode_cmd->pitches[0] & 63) { |
8607 | DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", | ||
8608 | mode_cmd->pitches[0]); | ||
8605 | return -EINVAL; | 8609 | return -EINVAL; |
8610 | } | ||
8606 | 8611 | ||
8607 | /* FIXME <= Gen4 stride limits are bit unclear */ | 8612 | /* FIXME <= Gen4 stride limits are bit unclear */ |
8608 | if (mode_cmd->pitches[0] > 32768) | 8613 | if (mode_cmd->pitches[0] > 32768) { |
8614 | DRM_DEBUG("pitch (%d) must be at less than 32768\n", | ||
8615 | mode_cmd->pitches[0]); | ||
8609 | return -EINVAL; | 8616 | return -EINVAL; |
8617 | } | ||
8610 | 8618 | ||
8611 | if (obj->tiling_mode != I915_TILING_NONE && | 8619 | if (obj->tiling_mode != I915_TILING_NONE && |
8612 | mode_cmd->pitches[0] != obj->stride) | 8620 | mode_cmd->pitches[0] != obj->stride) { |
8621 | DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", | ||
8622 | mode_cmd->pitches[0], obj->stride); | ||
8613 | return -EINVAL; | 8623 | return -EINVAL; |
8624 | } | ||
8614 | 8625 | ||
8615 | /* Reject formats not supported by any plane early. */ | 8626 | /* Reject formats not supported by any plane early. */ |
8616 | switch (mode_cmd->pixel_format) { | 8627 | switch (mode_cmd->pixel_format) { |
@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
8621 | break; | 8632 | break; |
8622 | case DRM_FORMAT_XRGB1555: | 8633 | case DRM_FORMAT_XRGB1555: |
8623 | case DRM_FORMAT_ARGB1555: | 8634 | case DRM_FORMAT_ARGB1555: |
8624 | if (INTEL_INFO(dev)->gen > 3) | 8635 | if (INTEL_INFO(dev)->gen > 3) { |
8636 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); | ||
8625 | return -EINVAL; | 8637 | return -EINVAL; |
8638 | } | ||
8626 | break; | 8639 | break; |
8627 | case DRM_FORMAT_XBGR8888: | 8640 | case DRM_FORMAT_XBGR8888: |
8628 | case DRM_FORMAT_ABGR8888: | 8641 | case DRM_FORMAT_ABGR8888: |
@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
8630 | case DRM_FORMAT_ARGB2101010: | 8643 | case DRM_FORMAT_ARGB2101010: |
8631 | case DRM_FORMAT_XBGR2101010: | 8644 | case DRM_FORMAT_XBGR2101010: |
8632 | case DRM_FORMAT_ABGR2101010: | 8645 | case DRM_FORMAT_ABGR2101010: |
8633 | if (INTEL_INFO(dev)->gen < 4) | 8646 | if (INTEL_INFO(dev)->gen < 4) { |
8647 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); | ||
8634 | return -EINVAL; | 8648 | return -EINVAL; |
8649 | } | ||
8635 | break; | 8650 | break; |
8636 | case DRM_FORMAT_YUYV: | 8651 | case DRM_FORMAT_YUYV: |
8637 | case DRM_FORMAT_UYVY: | 8652 | case DRM_FORMAT_UYVY: |
8638 | case DRM_FORMAT_YVYU: | 8653 | case DRM_FORMAT_YVYU: |
8639 | case DRM_FORMAT_VYUY: | 8654 | case DRM_FORMAT_VYUY: |
8640 | if (INTEL_INFO(dev)->gen < 6) | 8655 | if (INTEL_INFO(dev)->gen < 5) { |
8656 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); | ||
8641 | return -EINVAL; | 8657 | return -EINVAL; |
8658 | } | ||
8642 | break; | 8659 | break; |
8643 | default: | 8660 | default: |
8644 | DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); | 8661 | DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); |
8645 | return -EINVAL; | 8662 | return -EINVAL; |
8646 | } | 8663 | } |
8647 | 8664 | ||
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b9a660a53677..17aee74258ad 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -776,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
776 | }, | 776 | }, |
777 | { | 777 | { |
778 | .callback = intel_no_lvds_dmi_callback, | 778 | .callback = intel_no_lvds_dmi_callback, |
779 | .ident = "ZOTAC ZBOXSD-ID12/ID13", | ||
780 | .matches = { | ||
781 | DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"), | ||
782 | DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), | ||
783 | }, | ||
784 | }, | ||
785 | { | ||
786 | .callback = intel_no_lvds_dmi_callback, | ||
787 | .ident = "Gigabyte GA-D525TUD", | 779 | .ident = "Gigabyte GA-D525TUD", |
788 | .matches = { | 780 | .matches = { |
789 | DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), | 781 | DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e6f54ffab3ba..e83a11794172 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -44,6 +44,14 @@ | |||
44 | * i915.i915_enable_fbc parameter | 44 | * i915.i915_enable_fbc parameter |
45 | */ | 45 | */ |
46 | 46 | ||
47 | static bool intel_crtc_active(struct drm_crtc *crtc) | ||
48 | { | ||
49 | /* Be paranoid as we can arrive here with only partial | ||
50 | * state retrieved from the hardware during setup. | ||
51 | */ | ||
52 | return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock; | ||
53 | } | ||
54 | |||
47 | static void i8xx_disable_fbc(struct drm_device *dev) | 55 | static void i8xx_disable_fbc(struct drm_device *dev) |
48 | { | 56 | { |
49 | struct drm_i915_private *dev_priv = dev->dev_private; | 57 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev) | |||
405 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 413 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
406 | */ | 414 | */ |
407 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | 415 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
408 | if (to_intel_crtc(tmp_crtc)->active && | 416 | if (intel_crtc_active(tmp_crtc) && |
409 | !to_intel_crtc(tmp_crtc)->primary_disabled && | 417 | !to_intel_crtc(tmp_crtc)->primary_disabled) { |
410 | tmp_crtc->fb) { | ||
411 | if (crtc) { | 418 | if (crtc) { |
412 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | 419 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); |
413 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | 420 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; |
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) | |||
992 | struct drm_crtc *crtc, *enabled = NULL; | 999 | struct drm_crtc *crtc, *enabled = NULL; |
993 | 1000 | ||
994 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 1001 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
995 | if (to_intel_crtc(crtc)->active && crtc->fb) { | 1002 | if (intel_crtc_active(crtc)) { |
996 | if (enabled) | 1003 | if (enabled) |
997 | return NULL; | 1004 | return NULL; |
998 | enabled = crtc; | 1005 | enabled = crtc; |
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, | |||
1086 | int entries, tlb_miss; | 1093 | int entries, tlb_miss; |
1087 | 1094 | ||
1088 | crtc = intel_get_crtc_for_plane(dev, plane); | 1095 | crtc = intel_get_crtc_for_plane(dev, plane); |
1089 | if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) { | 1096 | if (!intel_crtc_active(crtc)) { |
1090 | *cursor_wm = cursor->guard_size; | 1097 | *cursor_wm = cursor->guard_size; |
1091 | *plane_wm = display->guard_size; | 1098 | *plane_wm = display->guard_size; |
1092 | return false; | 1099 | return false; |
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev, | |||
1215 | int entries; | 1222 | int entries; |
1216 | 1223 | ||
1217 | crtc = intel_get_crtc_for_plane(dev, plane); | 1224 | crtc = intel_get_crtc_for_plane(dev, plane); |
1218 | if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) | 1225 | if (!intel_crtc_active(crtc)) |
1219 | return false; | 1226 | return false; |
1220 | 1227 | ||
1221 | clock = crtc->mode.clock; /* VESA DOT Clock */ | 1228 | clock = crtc->mode.clock; /* VESA DOT Clock */ |
@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev) | |||
1476 | 1483 | ||
1477 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 1484 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
1478 | crtc = intel_get_crtc_for_plane(dev, 0); | 1485 | crtc = intel_get_crtc_for_plane(dev, 0); |
1479 | if (to_intel_crtc(crtc)->active && crtc->fb) { | 1486 | if (intel_crtc_active(crtc)) { |
1480 | int cpp = crtc->fb->bits_per_pixel / 8; | 1487 | int cpp = crtc->fb->bits_per_pixel / 8; |
1481 | if (IS_GEN2(dev)) | 1488 | if (IS_GEN2(dev)) |
1482 | cpp = 4; | 1489 | cpp = 4; |
@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev) | |||
1490 | 1497 | ||
1491 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | 1498 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
1492 | crtc = intel_get_crtc_for_plane(dev, 1); | 1499 | crtc = intel_get_crtc_for_plane(dev, 1); |
1493 | if (to_intel_crtc(crtc)->active && crtc->fb) { | 1500 | if (intel_crtc_active(crtc)) { |
1494 | int cpp = crtc->fb->bits_per_pixel / 8; | 1501 | int cpp = crtc->fb->bits_per_pixel / 8; |
1495 | if (IS_GEN2(dev)) | 1502 | if (IS_GEN2(dev)) |
1496 | cpp = 4; | 1503 | cpp = 4; |
@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, | |||
2044 | int entries, tlb_miss; | 2051 | int entries, tlb_miss; |
2045 | 2052 | ||
2046 | crtc = intel_get_crtc_for_plane(dev, plane); | 2053 | crtc = intel_get_crtc_for_plane(dev, plane); |
2047 | if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) { | 2054 | if (!intel_crtc_active(crtc)) { |
2048 | *sprite_wm = display->guard_size; | 2055 | *sprite_wm = display->guard_size; |
2049 | return false; | 2056 | return false; |
2050 | } | 2057 | } |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 827dcd4edf1c..d7b060e0a231 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
120 | I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); | 120 | I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); |
121 | I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); | 121 | I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); |
122 | 122 | ||
123 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | 123 | linear_offset = y * fb->pitches[0] + x * pixel_size; |
124 | sprsurf_offset = | 124 | sprsurf_offset = |
125 | intel_gen4_compute_offset_xtiled(&x, &y, | 125 | intel_gen4_compute_offset_xtiled(&x, &y, |
126 | fb->bits_per_pixel / 8, | 126 | pixel_size, fb->pitches[0]); |
127 | fb->pitches[0]); | ||
128 | linear_offset -= sprsurf_offset; | 127 | linear_offset -= sprsurf_offset; |
129 | 128 | ||
130 | /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET | 129 | /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET |
@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
286 | I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); | 285 | I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); |
287 | I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); | 286 | I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); |
288 | 287 | ||
289 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | 288 | linear_offset = y * fb->pitches[0] + x * pixel_size; |
290 | dvssurf_offset = | 289 | dvssurf_offset = |
291 | intel_gen4_compute_offset_xtiled(&x, &y, | 290 | intel_gen4_compute_offset_xtiled(&x, &y, |
292 | fb->bits_per_pixel / 8, | 291 | pixel_size, fb->pitches[0]); |
293 | fb->pitches[0]); | ||
294 | linear_offset -= dvssurf_offset; | 292 | linear_offset -= dvssurf_offset; |
295 | 293 | ||
296 | if (obj->tiling_mode != I915_TILING_NONE) | 294 | if (obj->tiling_mode != I915_TILING_NONE) |