aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-17 22:48:18 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-17 22:48:18 -0500
commited408f7c0fab7ecc72f94f204f0d2607b2749f69 (patch)
tree8c85eb47814fff6c9ea0aa6177e47555c0f9c5b1 /drivers/gpu
parent52f7a82b59ff385da86a3ed17c8d9f6a83531004 (diff)
parent7d1f9aeff1ee4a20b1aeb377dd0f579fe9647619 (diff)
Merge 3.9-rc4 into driver-core-next
This is to fix up a build problem with a wireless driver due to the dynamic-debug patches in this branch. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_mm.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c25
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c8
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c17
27 files changed, 229 insertions, 129 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2bf9670ba29b..2aa331499f81 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
221 221
222 BUG_ON(!hole_node->hole_follows || node->allocated); 222 BUG_ON(!hole_node->hole_follows || node->allocated);
223 223
224 if (mm->color_adjust)
225 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
226
227 if (adj_start < start) 224 if (adj_start < start)
228 adj_start = start; 225 adj_start = start;
226 if (adj_end > end)
227 adj_end = end;
228
229 if (mm->color_adjust)
230 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
229 231
230 if (alignment) { 232 if (alignment) {
231 unsigned tmp = adj_start % alignment; 233 unsigned tmp = adj_start % alignment;
@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
506 mm->scan_size = size; 508 mm->scan_size = size;
507 mm->scanned_blocks = 0; 509 mm->scanned_blocks = 0;
508 mm->scan_hit_start = 0; 510 mm->scan_hit_start = 0;
509 mm->scan_hit_size = 0; 511 mm->scan_hit_end = 0;
510 mm->scan_check_range = 0; 512 mm->scan_check_range = 0;
511 mm->prev_scanned_node = NULL; 513 mm->prev_scanned_node = NULL;
512} 514}
@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
533 mm->scan_size = size; 535 mm->scan_size = size;
534 mm->scanned_blocks = 0; 536 mm->scanned_blocks = 0;
535 mm->scan_hit_start = 0; 537 mm->scan_hit_start = 0;
536 mm->scan_hit_size = 0; 538 mm->scan_hit_end = 0;
537 mm->scan_start = start; 539 mm->scan_start = start;
538 mm->scan_end = end; 540 mm->scan_end = end;
539 mm->scan_check_range = 1; 541 mm->scan_check_range = 1;
@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
552 struct drm_mm *mm = node->mm; 554 struct drm_mm *mm = node->mm;
553 struct drm_mm_node *prev_node; 555 struct drm_mm_node *prev_node;
554 unsigned long hole_start, hole_end; 556 unsigned long hole_start, hole_end;
555 unsigned long adj_start; 557 unsigned long adj_start, adj_end;
556 unsigned long adj_end;
557 558
558 mm->scanned_blocks++; 559 mm->scanned_blocks++;
559 560
@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
570 node->node_list.next = &mm->prev_scanned_node->node_list; 571 node->node_list.next = &mm->prev_scanned_node->node_list;
571 mm->prev_scanned_node = node; 572 mm->prev_scanned_node = node;
572 573
573 hole_start = drm_mm_hole_node_start(prev_node); 574 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
574 hole_end = drm_mm_hole_node_end(prev_node); 575 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
575
576 adj_start = hole_start;
577 adj_end = hole_end;
578
579 if (mm->color_adjust)
580 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
581 576
582 if (mm->scan_check_range) { 577 if (mm->scan_check_range) {
583 if (adj_start < mm->scan_start) 578 if (adj_start < mm->scan_start)
@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
586 adj_end = mm->scan_end; 581 adj_end = mm->scan_end;
587 } 582 }
588 583
584 if (mm->color_adjust)
585 mm->color_adjust(prev_node, mm->scan_color,
586 &adj_start, &adj_end);
587
589 if (check_free_hole(adj_start, adj_end, 588 if (check_free_hole(adj_start, adj_end,
590 mm->scan_size, mm->scan_alignment)) { 589 mm->scan_size, mm->scan_alignment)) {
591 mm->scan_hit_start = hole_start; 590 mm->scan_hit_start = hole_start;
592 mm->scan_hit_size = hole_end; 591 mm->scan_hit_end = hole_end;
593
594 return 1; 592 return 1;
595 } 593 }
596 594
@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
626 node_list); 624 node_list);
627 625
628 prev_node->hole_follows = node->scanned_preceeds_hole; 626 prev_node->hole_follows = node->scanned_preceeds_hole;
629 INIT_LIST_HEAD(&node->node_list);
630 list_add(&node->node_list, &prev_node->node_list); 627 list_add(&node->node_list, &prev_node->node_list);
631 628
632 /* Only need to check for containement because start&size for the 629 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
633 * complete resulting free block (not just the desired part) is 630 node->start < mm->scan_hit_end);
634 * stored. */
635 if (node->start >= mm->scan_hit_start &&
636 node->start + node->size
637 <= mm->scan_hit_start + mm->scan_hit_size) {
638 return 1;
639 }
640
641 return 0;
642} 631}
643EXPORT_SYMBOL(drm_mm_scan_remove_block); 632EXPORT_SYMBOL(drm_mm_scan_remove_block);
644 633
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index da3c82e301b1..8febea6daa08 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1717} 1717}
1718 1718
1719static long 1719static long
1720i915_gem_purge(struct drm_i915_private *dev_priv, long target) 1720__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1721 bool purgeable_only)
1721{ 1722{
1722 struct drm_i915_gem_object *obj, *next; 1723 struct drm_i915_gem_object *obj, *next;
1723 long count = 0; 1724 long count = 0;
@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1725 list_for_each_entry_safe(obj, next, 1726 list_for_each_entry_safe(obj, next,
1726 &dev_priv->mm.unbound_list, 1727 &dev_priv->mm.unbound_list,
1727 gtt_list) { 1728 gtt_list) {
1728 if (i915_gem_object_is_purgeable(obj) && 1729 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1729 i915_gem_object_put_pages(obj) == 0) { 1730 i915_gem_object_put_pages(obj) == 0) {
1730 count += obj->base.size >> PAGE_SHIFT; 1731 count += obj->base.size >> PAGE_SHIFT;
1731 if (count >= target) 1732 if (count >= target)
@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1736 list_for_each_entry_safe(obj, next, 1737 list_for_each_entry_safe(obj, next,
1737 &dev_priv->mm.inactive_list, 1738 &dev_priv->mm.inactive_list,
1738 mm_list) { 1739 mm_list) {
1739 if (i915_gem_object_is_purgeable(obj) && 1740 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1740 i915_gem_object_unbind(obj) == 0 && 1741 i915_gem_object_unbind(obj) == 0 &&
1741 i915_gem_object_put_pages(obj) == 0) { 1742 i915_gem_object_put_pages(obj) == 0) {
1742 count += obj->base.size >> PAGE_SHIFT; 1743 count += obj->base.size >> PAGE_SHIFT;
@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1748 return count; 1749 return count;
1749} 1750}
1750 1751
1752static long
1753i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1754{
1755 return __i915_gem_shrink(dev_priv, target, true);
1756}
1757
1751static void 1758static void
1752i915_gem_shrink_all(struct drm_i915_private *dev_priv) 1759i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1753{ 1760{
@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3522 goto out; 3529 goto out;
3523 } 3530 }
3524 3531
3525 obj->user_pin_count++; 3532 if (obj->user_pin_count == 0) {
3526 obj->pin_filp = file;
3527 if (obj->user_pin_count == 1) {
3528 ret = i915_gem_object_pin(obj, args->alignment, true, false); 3533 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3529 if (ret) 3534 if (ret)
3530 goto out; 3535 goto out;
3531 } 3536 }
3532 3537
3538 obj->user_pin_count++;
3539 obj->pin_filp = file;
3540
3533 /* XXX - flush the CPU caches for pinned objects 3541 /* XXX - flush the CPU caches for pinned objects
3534 * as the X server doesn't manage domains yet 3542 * as the X server doesn't manage domains yet
3535 */ 3543 */
@@ -4395,6 +4403,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4395 if (nr_to_scan) { 4403 if (nr_to_scan) {
4396 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); 4404 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4397 if (nr_to_scan > 0) 4405 if (nr_to_scan > 0)
4406 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4407 false);
4408 if (nr_to_scan > 0)
4398 i915_gem_shrink_all(dev_priv); 4409 i915_gem_shrink_all(dev_priv);
4399 } 4410 }
4400 4411
@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4402 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) 4413 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4403 if (obj->pages_pin_count == 0) 4414 if (obj->pages_pin_count == 0)
4404 cnt += obj->base.size >> PAGE_SHIFT; 4415 cnt += obj->base.size >> PAGE_SHIFT;
4405 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 4416 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
4406 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4417 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4407 cnt += obj->base.size >> PAGE_SHIFT; 4418 cnt += obj->base.size >> PAGE_SHIFT;
4408 4419
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a9fb046b94a1..da1ad9c80bb5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
8598{ 8598{
8599 int ret; 8599 int ret;
8600 8600
8601 if (obj->tiling_mode == I915_TILING_Y) 8601 if (obj->tiling_mode == I915_TILING_Y) {
8602 DRM_DEBUG("hardware does not support tiling Y\n");
8602 return -EINVAL; 8603 return -EINVAL;
8604 }
8603 8605
8604 if (mode_cmd->pitches[0] & 63) 8606 if (mode_cmd->pitches[0] & 63) {
8607 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
8608 mode_cmd->pitches[0]);
8605 return -EINVAL; 8609 return -EINVAL;
8610 }
8606 8611
8607 /* FIXME <= Gen4 stride limits are bit unclear */ 8612 /* FIXME <= Gen4 stride limits are bit unclear */
8608 if (mode_cmd->pitches[0] > 32768) 8613 if (mode_cmd->pitches[0] > 32768) {
8614 DRM_DEBUG("pitch (%d) must be at less than 32768\n",
8615 mode_cmd->pitches[0]);
8609 return -EINVAL; 8616 return -EINVAL;
8617 }
8610 8618
8611 if (obj->tiling_mode != I915_TILING_NONE && 8619 if (obj->tiling_mode != I915_TILING_NONE &&
8612 mode_cmd->pitches[0] != obj->stride) 8620 mode_cmd->pitches[0] != obj->stride) {
8621 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
8622 mode_cmd->pitches[0], obj->stride);
8613 return -EINVAL; 8623 return -EINVAL;
8624 }
8614 8625
8615 /* Reject formats not supported by any plane early. */ 8626 /* Reject formats not supported by any plane early. */
8616 switch (mode_cmd->pixel_format) { 8627 switch (mode_cmd->pixel_format) {
@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
8621 break; 8632 break;
8622 case DRM_FORMAT_XRGB1555: 8633 case DRM_FORMAT_XRGB1555:
8623 case DRM_FORMAT_ARGB1555: 8634 case DRM_FORMAT_ARGB1555:
8624 if (INTEL_INFO(dev)->gen > 3) 8635 if (INTEL_INFO(dev)->gen > 3) {
8636 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8625 return -EINVAL; 8637 return -EINVAL;
8638 }
8626 break; 8639 break;
8627 case DRM_FORMAT_XBGR8888: 8640 case DRM_FORMAT_XBGR8888:
8628 case DRM_FORMAT_ABGR8888: 8641 case DRM_FORMAT_ABGR8888:
@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
8630 case DRM_FORMAT_ARGB2101010: 8643 case DRM_FORMAT_ARGB2101010:
8631 case DRM_FORMAT_XBGR2101010: 8644 case DRM_FORMAT_XBGR2101010:
8632 case DRM_FORMAT_ABGR2101010: 8645 case DRM_FORMAT_ABGR2101010:
8633 if (INTEL_INFO(dev)->gen < 4) 8646 if (INTEL_INFO(dev)->gen < 4) {
8647 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8634 return -EINVAL; 8648 return -EINVAL;
8649 }
8635 break; 8650 break;
8636 case DRM_FORMAT_YUYV: 8651 case DRM_FORMAT_YUYV:
8637 case DRM_FORMAT_UYVY: 8652 case DRM_FORMAT_UYVY:
8638 case DRM_FORMAT_YVYU: 8653 case DRM_FORMAT_YVYU:
8639 case DRM_FORMAT_VYUY: 8654 case DRM_FORMAT_VYUY:
8640 if (INTEL_INFO(dev)->gen < 6) 8655 if (INTEL_INFO(dev)->gen < 5) {
8656 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8641 return -EINVAL; 8657 return -EINVAL;
8658 }
8642 break; 8659 break;
8643 default: 8660 default:
8644 DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); 8661 DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
8645 return -EINVAL; 8662 return -EINVAL;
8646 } 8663 }
8647 8664
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b9a660a53677..17aee74258ad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -776,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
776 }, 776 },
777 { 777 {
778 .callback = intel_no_lvds_dmi_callback, 778 .callback = intel_no_lvds_dmi_callback,
779 .ident = "ZOTAC ZBOXSD-ID12/ID13",
780 .matches = {
781 DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
782 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
783 },
784 },
785 {
786 .callback = intel_no_lvds_dmi_callback,
787 .ident = "Gigabyte GA-D525TUD", 779 .ident = "Gigabyte GA-D525TUD",
788 .matches = { 780 .matches = {
789 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), 781 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e6f54ffab3ba..e83a11794172 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,6 +44,14 @@
44 * i915.i915_enable_fbc parameter 44 * i915.i915_enable_fbc parameter
45 */ 45 */
46 46
47static bool intel_crtc_active(struct drm_crtc *crtc)
48{
49 /* Be paranoid as we can arrive here with only partial
50 * state retrieved from the hardware during setup.
51 */
52 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
53}
54
47static void i8xx_disable_fbc(struct drm_device *dev) 55static void i8xx_disable_fbc(struct drm_device *dev)
48{ 56{
49 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
405 * - going to an unsupported config (interlace, pixel multiply, etc.) 413 * - going to an unsupported config (interlace, pixel multiply, etc.)
406 */ 414 */
407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 415 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408 if (to_intel_crtc(tmp_crtc)->active && 416 if (intel_crtc_active(tmp_crtc) &&
409 !to_intel_crtc(tmp_crtc)->primary_disabled && 417 !to_intel_crtc(tmp_crtc)->primary_disabled) {
410 tmp_crtc->fb) {
411 if (crtc) { 418 if (crtc) {
412 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 419 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
413 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 420 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
992 struct drm_crtc *crtc, *enabled = NULL; 999 struct drm_crtc *crtc, *enabled = NULL;
993 1000
994 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1001 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
995 if (to_intel_crtc(crtc)->active && crtc->fb) { 1002 if (intel_crtc_active(crtc)) {
996 if (enabled) 1003 if (enabled)
997 return NULL; 1004 return NULL;
998 enabled = crtc; 1005 enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1086 int entries, tlb_miss; 1093 int entries, tlb_miss;
1087 1094
1088 crtc = intel_get_crtc_for_plane(dev, plane); 1095 crtc = intel_get_crtc_for_plane(dev, plane);
1089 if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) { 1096 if (!intel_crtc_active(crtc)) {
1090 *cursor_wm = cursor->guard_size; 1097 *cursor_wm = cursor->guard_size;
1091 *plane_wm = display->guard_size; 1098 *plane_wm = display->guard_size;
1092 return false; 1099 return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1215 int entries; 1222 int entries;
1216 1223
1217 crtc = intel_get_crtc_for_plane(dev, plane); 1224 crtc = intel_get_crtc_for_plane(dev, plane);
1218 if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) 1225 if (!intel_crtc_active(crtc))
1219 return false; 1226 return false;
1220 1227
1221 clock = crtc->mode.clock; /* VESA DOT Clock */ 1228 clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1476 1483
1477 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1484 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1478 crtc = intel_get_crtc_for_plane(dev, 0); 1485 crtc = intel_get_crtc_for_plane(dev, 0);
1479 if (to_intel_crtc(crtc)->active && crtc->fb) { 1486 if (intel_crtc_active(crtc)) {
1480 int cpp = crtc->fb->bits_per_pixel / 8; 1487 int cpp = crtc->fb->bits_per_pixel / 8;
1481 if (IS_GEN2(dev)) 1488 if (IS_GEN2(dev))
1482 cpp = 4; 1489 cpp = 4;
@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1490 1497
1491 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1498 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1492 crtc = intel_get_crtc_for_plane(dev, 1); 1499 crtc = intel_get_crtc_for_plane(dev, 1);
1493 if (to_intel_crtc(crtc)->active && crtc->fb) { 1500 if (intel_crtc_active(crtc)) {
1494 int cpp = crtc->fb->bits_per_pixel / 8; 1501 int cpp = crtc->fb->bits_per_pixel / 8;
1495 if (IS_GEN2(dev)) 1502 if (IS_GEN2(dev))
1496 cpp = 4; 1503 cpp = 4;
@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2044 int entries, tlb_miss; 2051 int entries, tlb_miss;
2045 2052
2046 crtc = intel_get_crtc_for_plane(dev, plane); 2053 crtc = intel_get_crtc_for_plane(dev, plane);
2047 if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) { 2054 if (!intel_crtc_active(crtc)) {
2048 *sprite_wm = display->guard_size; 2055 *sprite_wm = display->guard_size;
2049 return false; 2056 return false;
2050 } 2057 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 827dcd4edf1c..d7b060e0a231 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
122 122
123 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 123 linear_offset = y * fb->pitches[0] + x * pixel_size;
124 sprsurf_offset = 124 sprsurf_offset =
125 intel_gen4_compute_offset_xtiled(&x, &y, 125 intel_gen4_compute_offset_xtiled(&x, &y,
126 fb->bits_per_pixel / 8, 126 pixel_size, fb->pitches[0]);
127 fb->pitches[0]);
128 linear_offset -= sprsurf_offset; 127 linear_offset -= sprsurf_offset;
129 128
130 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 129 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
286 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 285 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
287 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 286 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
288 287
289 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 288 linear_offset = y * fb->pitches[0] + x * pixel_size;
290 dvssurf_offset = 289 dvssurf_offset =
291 intel_gen4_compute_offset_xtiled(&x, &y, 290 intel_gen4_compute_offset_xtiled(&x, &y,
292 fb->bits_per_pixel / 8, 291 pixel_size, fb->pitches[0]);
293 fb->pitches[0]);
294 linear_offset -= dvssurf_offset; 292 linear_offset -= dvssurf_offset;
295 293
296 if (obj->tiling_mode != I915_TILING_NONE) 294 if (obj->tiling_mode != I915_TILING_NONE)
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index c617f0480071..8bbb58f94a19 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
66 66
67 ret = nouveau_handle_create(nv_object(client), ~0, ~0, 67 ret = nouveau_handle_create(nv_object(client), ~0, ~0,
68 nv_object(client), &client->root); 68 nv_object(client), &client->root);
69 if (ret) { 69 if (ret)
70 nouveau_namedb_destroy(&client->base);
71 return ret; 70 return ret;
72 }
73 71
74 /* prevent init/fini being called, os in in charge of this */ 72 /* prevent init/fini being called, os in in charge of this */
75 atomic_set(&nv_object(client)->usecount, 2); 73 atomic_set(&nv_object(client)->usecount, 2);
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index b8d2cbf8a7a7..264c2b338ac3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS)) 109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
110 namedb = namedb->parent; 110 namedb = namedb->parent;
111 111
112 handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL); 112 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
113 if (!handle) 113 if (!handle)
114 return -ENOMEM; 114 return -ENOMEM;
115 115
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
146 } 146 }
147 147
148 hprintk(handle, TRACE, "created\n"); 148 hprintk(handle, TRACE, "created\n");
149
150 *phandle = handle;
151
149 return 0; 152 return 0;
150} 153}
151 154
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 0f09af135415..ca1a7d76a95b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -851,20 +851,23 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); 852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
853 853
854 if (nv_device(priv)->chipset < 0x90 || 854 if (!(ctrl & (1 << head))) {
855 nv_device(priv)->chipset == 0x92 || 855 if (nv_device(priv)->chipset < 0x90 ||
856 nv_device(priv)->chipset == 0xa0) { 856 nv_device(priv)->chipset == 0x92 ||
857 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 857 nv_device(priv)->chipset == 0xa0) {
858 ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); 858 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
859 i += 3; 859 ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
860 } else { 860 i += 4;
861 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 861 } else {
862 ctrl = nv_rd32(priv, 0x610798 + (i * 8)); 862 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
863 i += 3; 863 ctrl = nv_rd32(priv, 0x610798 + (i * 8));
864 i += 4;
865 }
864 } 866 }
865 867
866 if (!(ctrl & (1 << head))) 868 if (!(ctrl & (1 << head)))
867 return false; 869 return false;
870 i--;
868 871
869 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); 872 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
870 if (data) { 873 if (data) {
@@ -898,20 +901,23 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
898 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 901 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
899 ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); 902 ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
900 903
901 if (nv_device(priv)->chipset < 0x90 || 904 if (!(ctrl & (1 << head))) {
902 nv_device(priv)->chipset == 0x92 || 905 if (nv_device(priv)->chipset < 0x90 ||
903 nv_device(priv)->chipset == 0xa0) { 906 nv_device(priv)->chipset == 0x92 ||
904 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 907 nv_device(priv)->chipset == 0xa0) {
905 ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); 908 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
906 i += 3; 909 ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
907 } else { 910 i += 4;
908 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 911 } else {
909 ctrl = nv_rd32(priv, 0x610794 + (i * 8)); 912 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
910 i += 3; 913 ctrl = nv_rd32(priv, 0x610794 + (i * 8));
914 i += 4;
915 }
911 } 916 }
912 917
913 if (!(ctrl & (1 << head))) 918 if (!(ctrl & (1 << head)))
914 return 0x0000; 919 return 0x0000;
920 i--;
915 921
916 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); 922 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
917 if (!data) 923 if (!data)
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 0193532ceac9..63acc0346ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
36 36
37int nouveau_client_create_(const char *name, u64 device, const char *cfg, 37int nouveau_client_create_(const char *name, u64 device, const char *cfg,
38 const char *dbg, int, void **); 38 const char *dbg, int, void **);
39#define nouveau_client_destroy(p) \
40 nouveau_namedb_destroy(&(p)->base)
41
39int nouveau_client_init(struct nouveau_client *); 42int nouveau_client_init(struct nouveau_client *);
40int nouveau_client_fini(struct nouveau_client *, bool suspend); 43int nouveau_client_fini(struct nouveau_client *, bool suspend);
41 44
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
index c345097592f2..b2f3d4d0aa49 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
38 PLL_UNK42 = 0x42, 38 PLL_UNK42 = 0x42,
39 PLL_VPLL0 = 0x80, 39 PLL_VPLL0 = 0x80,
40 PLL_VPLL1 = 0x81, 40 PLL_VPLL1 = 0x81,
41 PLL_VPLL2 = 0x82,
42 PLL_VPLL3 = 0x83,
41 PLL_MAX = 0xff 43 PLL_MAX = 0xff
42}; 44};
43 45
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2917d552689b..690ed438b2ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -1534,7 +1534,6 @@ init_io(struct nvbios_init *init)
1534 mdelay(10); 1534 mdelay(10);
1535 init_wr32(init, 0x614100, 0x10000018); 1535 init_wr32(init, 0x614100, 0x10000018);
1536 init_wr32(init, 0x614900, 0x10000018); 1536 init_wr32(init, 0x614900, 0x10000018);
1537 return;
1538 } 1537 }
1539 1538
1540 value = init_rdport(init, port) & mask; 1539 value = init_rdport(init, port) & mask;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index f6962c9b6c36..7c9626258a46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
52 switch (info.type) { 52 switch (info.type) {
53 case PLL_VPLL0: 53 case PLL_VPLL0:
54 case PLL_VPLL1: 54 case PLL_VPLL1:
55 case PLL_VPLL2:
56 case PLL_VPLL3:
55 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100); 57 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
56 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M); 58 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
57 nv_wr32(priv, info.reg + 0x10, fN << 16); 59 nv_wr32(priv, info.reg + 0x10, fN << 16);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 306bdf121452..7606ed15b6fa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -145,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
145 mem->memtype = type; 145 mem->memtype = type;
146 mem->size = size; 146 mem->size = size;
147 147
148 mutex_lock(&mm->mutex); 148 mutex_lock(&pfb->base.mutex);
149 do { 149 do {
150 if (back) 150 if (back)
151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); 151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
152 else 152 else
153 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r); 153 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
154 if (ret) { 154 if (ret) {
155 mutex_unlock(&mm->mutex); 155 mutex_unlock(&pfb->base.mutex);
156 pfb->ram.put(pfb, &mem); 156 pfb->ram.put(pfb, &mem);
157 return ret; 157 return ret;
158 } 158 }
@@ -160,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
160 list_add_tail(&r->rl_entry, &mem->regions); 160 list_add_tail(&r->rl_entry, &mem->regions);
161 size -= r->length; 161 size -= r->length;
162 } while (size); 162 } while (size);
163 mutex_unlock(&mm->mutex); 163 mutex_unlock(&pfb->base.mutex);
164 164
165 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); 165 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
166 mem->offset = (u64)r->offset << 12; 166 mem->offset = (u64)r->offset << 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 1188227ca6aa..6565f3dbbe04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
40 if (ret) 40 if (ret)
41 return ret; 41 return ret;
42 42
43 mutex_lock(&imem->base.mutex);
43 list_add(&iobj->head, &imem->list); 44 list_add(&iobj->head, &imem->list);
45 mutex_unlock(&imem->base.mutex);
44 return 0; 46 return 0;
45} 47}
46 48
47void 49void
48nouveau_instobj_destroy(struct nouveau_instobj *iobj) 50nouveau_instobj_destroy(struct nouveau_instobj *iobj)
49{ 51{
50 if (iobj->head.prev) 52 struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
51 list_del(&iobj->head); 53
54 mutex_lock(&subdev->mutex);
55 list_del(&iobj->head);
56 mutex_unlock(&subdev->mutex);
57
52 return nouveau_object_destroy(&iobj->base); 58 return nouveau_object_destroy(&iobj->base);
53} 59}
54 60
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
88 if (ret) 94 if (ret)
89 return ret; 95 return ret;
90 96
97 mutex_lock(&imem->base.mutex);
98
91 list_for_each_entry(iobj, &imem->list, head) { 99 list_for_each_entry(iobj, &imem->list, head) {
92 if (iobj->suspend) { 100 if (iobj->suspend) {
93 for (i = 0; i < iobj->size; i += 4) 101 for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
97 } 105 }
98 } 106 }
99 107
108 mutex_unlock(&imem->base.mutex);
109
100 return 0; 110 return 0;
101} 111}
102 112
@@ -104,17 +114,26 @@ int
104nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend) 114nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
105{ 115{
106 struct nouveau_instobj *iobj; 116 struct nouveau_instobj *iobj;
107 int i; 117 int i, ret = 0;
108 118
109 if (suspend) { 119 if (suspend) {
120 mutex_lock(&imem->base.mutex);
121
110 list_for_each_entry(iobj, &imem->list, head) { 122 list_for_each_entry(iobj, &imem->list, head) {
111 iobj->suspend = vmalloc(iobj->size); 123 iobj->suspend = vmalloc(iobj->size);
112 if (iobj->suspend) { 124 if (!iobj->suspend) {
113 for (i = 0; i < iobj->size; i += 4) 125 ret = -ENOMEM;
114 iobj->suspend[i / 4] = nv_ro32(iobj, i); 126 break;
115 } else 127 }
116 return -ENOMEM; 128
129 for (i = 0; i < iobj->size; i += 4)
130 iobj->suspend[i / 4] = nv_ro32(iobj, i);
117 } 131 }
132
133 mutex_unlock(&imem->base.mutex);
134
135 if (ret)
136 return ret;
118 } 137 }
119 138
120 return nouveau_subdev_fini(&imem->base, suspend); 139 return nouveau_subdev_fini(&imem->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 082c11b75acb..77c67fc970e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
352 u64 mm_length = (offset + length) - mm_offset; 352 u64 mm_length = (offset + length) - mm_offset;
353 int ret; 353 int ret;
354 354
355 vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL); 355 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
356 if (!vm) 356 if (!vm)
357 return -ENOMEM; 357 return -ENOMEM;
358 358
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
376 return ret; 376 return ret;
377 } 377 }
378 378
379 *pvm = vm;
380
379 return 0; 381 return 0;
380} 382}
381 383
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ac340ba32017..e620ba8271b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -127,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
127 struct nouveau_encoder **pnv_encoder) 127 struct nouveau_encoder **pnv_encoder)
128{ 128{
129 struct drm_device *dev = connector->dev; 129 struct drm_device *dev = connector->dev;
130 struct nouveau_connector *nv_connector = nouveau_connector(connector);
130 struct nouveau_drm *drm = nouveau_drm(dev); 131 struct nouveau_drm *drm = nouveau_drm(dev);
132 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
131 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 133 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
132 int i; 134 struct nouveau_i2c_port *port = NULL;
135 int i, panel = -ENODEV;
136
137 /* eDP panels need powering on by us (if the VBIOS doesn't default it
138 * to on) before doing any AUX channel transactions. LVDS panel power
139 * is handled by the SOR itself, and not required for LVDS DDC.
140 */
141 if (nv_connector->type == DCB_CONNECTOR_eDP) {
142 panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
143 if (panel == 0) {
144 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
145 msleep(300);
146 }
147 }
133 148
134 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 149 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
135 struct nouveau_i2c_port *port = NULL;
136 struct nouveau_encoder *nv_encoder; 150 struct nouveau_encoder *nv_encoder;
137 struct drm_mode_object *obj; 151 struct drm_mode_object *obj;
138 int id; 152 int id;
@@ -150,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
150 port = i2c->find(i2c, nv_encoder->dcb->i2c_index); 164 port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
151 if (port && nv_probe_i2c(port, 0x50)) { 165 if (port && nv_probe_i2c(port, 0x50)) {
152 *pnv_encoder = nv_encoder; 166 *pnv_encoder = nv_encoder;
153 return port; 167 break;
154 } 168 }
169
170 port = NULL;
155 } 171 }
156 172
157 return NULL; 173 /* eDP panel not detected, restore panel power GPIO to previous
174 * state to avoid confusing the SOR for other output types.
175 */
176 if (!port && panel == 0)
177 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
178
179 return port;
158} 180}
159 181
160static struct nouveau_encoder * 182static struct nouveau_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e4188f24fc75..508b00a2ce0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
225 if (ret) 225 if (ret)
226 return ret; 226 return ret;
227 227
228 /* power on internal panel if it's not already. the init tables of
229 * some vbios default this to off for some reason, causing the
230 * panel to not work after resume
231 */
232 if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
233 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
234 msleep(300);
235 }
236
237 /* enable polling for external displays */ 228 /* enable polling for external displays */
238 drm_kms_helper_poll_enable(dev); 229 drm_kms_helper_poll_enable(dev);
239 230
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 180a45e3b525..8b090f1eb51d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -84,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
84 struct nouveau_cli *cli; 84 struct nouveau_cli *cli;
85 int ret; 85 int ret;
86 86
87 *pcli = NULL;
87 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config, 88 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
88 nouveau_debug, size, pcli); 89 nouveau_debug, size, pcli);
89 cli = *pcli; 90 cli = *pcli;
90 if (ret) 91 if (ret) {
92 if (cli)
93 nouveau_client_destroy(&cli->base);
94 *pcli = NULL;
91 return ret; 95 return ret;
96 }
92 97
93 mutex_init(&cli->mutex); 98 mutex_init(&cli->mutex);
94 return 0; 99 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index bedafd1c9539..cdb83acdffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
60void nv10_fence_context_del(struct nouveau_channel *); 60void nv10_fence_context_del(struct nouveau_channel *);
61void nv10_fence_destroy(struct nouveau_drm *); 61void nv10_fence_destroy(struct nouveau_drm *);
62int nv10_fence_create(struct nouveau_drm *); 62int nv10_fence_create(struct nouveau_drm *);
63void nv17_fence_resume(struct nouveau_drm *drm);
63 64
64int nv50_fence_create(struct nouveau_drm *); 65int nv50_fence_create(struct nouveau_drm *);
65int nv84_fence_create(struct nouveau_drm *); 66int nv84_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 184cdf806761..39ffc07f906b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
505 505
506static inline bool is_powersaving_dpms(int mode) 506static inline bool is_powersaving_dpms(int mode)
507{ 507{
508 return (mode != DRM_MODE_DPMS_ON); 508 return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
509} 509}
510 510
511static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) 511static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 7ae7f97a6d4d..03017f24d593 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -162,6 +162,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
162 kfree(priv); 162 kfree(priv);
163} 163}
164 164
165void nv17_fence_resume(struct nouveau_drm *drm)
166{
167 struct nv10_fence_priv *priv = drm->fence;
168
169 nouveau_bo_wr32(priv->bo, 0, priv->sequence);
170}
171
165int 172int
166nv10_fence_create(struct nouveau_drm *drm) 173nv10_fence_create(struct nouveau_drm *drm)
167{ 174{
@@ -197,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
197 if (ret == 0) { 204 if (ret == 0) {
198 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 205 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
199 priv->base.sync = nv17_fence_sync; 206 priv->base.sync = nv17_fence_sync;
207 priv->base.resume = nv17_fence_resume;
200 } 208 }
201 } 209 }
202 210
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index c20f2727ea0b..d889f3ac0d41 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -122,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
122 if (ret == 0) { 122 if (ret == 0) {
123 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 123 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
124 priv->base.sync = nv17_fence_sync; 124 priv->base.sync = nv17_fence_sync;
125 priv->base.resume = nv17_fence_resume;
125 } 126 }
126 127
127 if (ret) 128 if (ret)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 03191a56eb44..69ec24ab8d63 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2476,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2476 kfree(parser->relocs); 2476 kfree(parser->relocs);
2477 for (i = 0; i < parser->nchunks; i++) { 2477 for (i = 0; i < parser->nchunks; i++) {
2478 kfree(parser->chunks[i].kdata); 2478 kfree(parser->chunks[i].kdata);
2479 kfree(parser->chunks[i].kpage[0]); 2479 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2480 kfree(parser->chunks[i].kpage[1]); 2480 kfree(parser->chunks[i].kpage[0]);
2481 kfree(parser->chunks[i].kpage[1]);
2482 }
2481 } 2483 }
2482 kfree(parser->chunks); 2484 kfree(parser->chunks);
2483 kfree(parser->chunks_array); 2485 kfree(parser->chunks_array);
@@ -2561,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2561 struct radeon_cs_chunk *relocs_chunk; 2563 struct radeon_cs_chunk *relocs_chunk;
2562 unsigned idx; 2564 unsigned idx;
2563 2565
2566 *cs_reloc = NULL;
2564 if (p->chunk_relocs_idx == -1) { 2567 if (p->chunk_relocs_idx == -1) {
2565 DRM_ERROR("No relocation chunk !\n"); 2568 DRM_ERROR("No relocation chunk !\n");
2566 return -EINVAL; 2569 return -EINVAL;
2567 } 2570 }
2568 *cs_reloc = NULL;
2569 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2571 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2570 idx = p->dma_reloc_idx; 2572 idx = p->dma_reloc_idx;
2571 if (idx >= relocs_chunk->length_dw) { 2573 if (idx >= p->nrelocs) {
2572 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2574 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2573 idx, relocs_chunk->length_dw); 2575 idx, p->nrelocs);
2574 return -EINVAL; 2576 return -EINVAL;
2575 } 2577 }
2576 *cs_reloc = p->relocs_ptr[idx]; 2578 *cs_reloc = p->relocs_ptr[idx];
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 396baba0141a..469661fd1903 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -279,13 +279,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
279 p->chunks[p->chunk_ib_idx].length_dw); 279 p->chunks[p->chunk_ib_idx].length_dw);
280 return -EINVAL; 280 return -EINVAL;
281 } 281 }
282 if ((p->rdev->flags & RADEON_IS_AGP)) { 282 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
283 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 283 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
284 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 284 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
285 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || 285 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { 286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
287 kfree(p->chunks[i].kpage[0]); 287 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
288 kfree(p->chunks[i].kpage[1]); 288 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 } 291 }
@@ -583,7 +583,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
583 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 583 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
584 int i; 584 int i;
585 int size = PAGE_SIZE; 585 int size = PAGE_SIZE;
586 bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; 586 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
587 false : true;
587 588
588 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { 589 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
589 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), 590 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index f5ba2241dacc..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
640 enum drm_connector_status found = connector_status_disconnected; 640 enum drm_connector_status found = connector_status_disconnected;
641 bool color = true; 641 bool color = true;
642 642
643 /* just don't bother on RN50 those chip are often connected to remoting
644 * console hw and often we get failure to load detect those. So to make
645 * everyone happy report the encoder as always connected.
646 */
647 if (ASIC_IS_RN50(rdev)) {
648 return connector_status_connected;
649 }
650
643 /* save the regs we need */ 651 /* save the regs we need */
644 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); 652 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
645 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 653 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 512f44add89f..fe5cdbcf2636 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -22,13 +22,17 @@
22static u8 *udl_get_edid(struct udl_device *udl) 22static u8 *udl_get_edid(struct udl_device *udl)
23{ 23{
24 u8 *block; 24 u8 *block;
25 char rbuf[3]; 25 char *rbuf;
26 int ret, i; 26 int ret, i;
27 27
28 block = kmalloc(EDID_LENGTH, GFP_KERNEL); 28 block = kmalloc(EDID_LENGTH, GFP_KERNEL);
29 if (block == NULL) 29 if (block == NULL)
30 return NULL; 30 return NULL;
31 31
32 rbuf = kmalloc(2, GFP_KERNEL);
33 if (rbuf == NULL)
34 goto error;
35
32 for (i = 0; i < EDID_LENGTH; i++) { 36 for (i = 0; i < EDID_LENGTH; i++) {
33 ret = usb_control_msg(udl->ddev->usbdev, 37 ret = usb_control_msg(udl->ddev->usbdev,
34 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02), 38 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
36 HZ); 40 HZ);
37 if (ret < 1) { 41 if (ret < 1) {
38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 42 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
39 i--;
40 goto error; 43 goto error;
41 } 44 }
42 block[i] = rbuf[1]; 45 block[i] = rbuf[1];
43 } 46 }
44 47
48 kfree(rbuf);
45 return block; 49 return block;
46 50
47error: 51error:
48 kfree(block); 52 kfree(block);
53 kfree(rbuf);
49 return NULL; 54 return NULL;
50} 55}
51 56
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
57 62
58 edid = (struct edid *)udl_get_edid(udl); 63 edid = (struct edid *)udl_get_edid(udl);
59 64
65 /*
66 * We only read the main block, but if the monitor reports extension
67 * blocks then the drm edid code expects them to be present, so patch
68 * the extension count to 0.
69 */
70 edid->checksum += edid->extensions;
71 edid->extensions = 0;
72
60 drm_mode_connector_update_edid_property(connector, edid); 73 drm_mode_connector_update_edid_property(connector, edid);
61 ret = drm_add_edid_modes(connector, edid); 74 ret = drm_add_edid_modes(connector, edid);
62 kfree(edid); 75 kfree(edid);