aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-18 17:01:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-18 17:01:08 -0400
commit0a693ab6b6b2af0a230e98323023acd3678b0f81 (patch)
tree9ffd73de7e6d342a106b1414dacef8115a3e209d
parent7a62711aacda8887d94c40daa199b37abb1d54e1 (diff)
parent3668f0df6e62cd989909f40669bbe585e8dd51ae (diff)
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "You'll be terribly disappointed in this, I'm not trying to sneak any features in or anything, its mostly radeon and intel fixes, a couple of ARM driver fixes" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (34 commits) drm/radeon/dpm: add debugfs support for RS780/RS880 (v3) drm/radeon/dpm/atom: fix broken gcc harder drm/radeon/dpm/atom: restructure logic to work around a compiler bug drm/radeon/dpm: fix atom vram table parsing drm/radeon: fix an endian bug in atom table parsing drm/radeon: add a module parameter to disable aspm drm/rcar-du: Use the GEM PRIME helpers drm/shmobile: Use the GEM PRIME helpers uvesafb: Really allow mtrr being 0, as documented and warn()ed radeon kms: do not flush uninitialized hotplug work drm/radeon/dpm/sumo: handle boost states properly when forcing a perf level drm/radeon: align VM PTBs (Page Table Blocks) to 32K drm/radeon: allow selection of alignment in the sub-allocator drm/radeon: never unpin UVD bo v3 drm/radeon: fix UVD fence emit drm/radeon: add fault decode function for CIK drm/radeon: add fault decode function for SI (v2) drm/radeon: add fault decode function for cayman/TN (v2) drm/radeon: use radeon device for request firmware drm/radeon: add missing ttm_eu_backoff_reservation to radeon_bo_list_validate ...
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c83
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c38
-rw-r--r--drivers/gpu/drm/radeon/cik.c59
-rw-r--r--drivers/gpu/drm/radeon/cikd.h16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c13
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/ni.c182
-rw-r--r--drivers/gpu/drm/radeon/nid.h16
-rw-r--r--drivers/gpu/drm/radeon/r100.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c102
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c111
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c25
-rw-r--r--drivers/gpu/drm/radeon/rs780d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c298
-rw-r--r--drivers/gpu/drm/radeon/sid.h14
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c9
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c9
-rw-r--r--drivers/video/uvesafb.c2
39 files changed, 918 insertions, 277 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 062cbda1bf4a..f4af1ca0fb62 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060
123MODULE_PARM_DESC(preliminary_hw_support, 123MODULE_PARM_DESC(preliminary_hw_support,
124 "Enable preliminary hardware support. (default: false)"); 124 "Enable preliminary hardware support. (default: false)");
125 125
126int i915_disable_power_well __read_mostly = 0; 126int i915_disable_power_well __read_mostly = 1;
127module_param_named(disable_power_well, i915_disable_power_well, int, 0600); 127module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
128MODULE_PARM_DESC(disable_power_well, 128MODULE_PARM_DESC(disable_power_well,
129 "Disable the power well when possible (default: false)"); 129 "Disable the power well when possible (default: true)");
130 130
131int i915_enable_ips __read_mostly = 1; 131int i915_enable_ips __read_mostly = 1;
132module_param_named(enable_ips, i915_enable_ips, int, 0600); 132module_param_named(enable_ips, i915_enable_ips, int, 0600);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4200c32407ec..97afd2639fb6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1880 u32 seqno = intel_ring_get_seqno(ring); 1880 u32 seqno = intel_ring_get_seqno(ring);
1881 1881
1882 BUG_ON(ring == NULL); 1882 BUG_ON(ring == NULL);
1883 if (obj->ring != ring && obj->last_write_seqno) {
1884 /* Keep the seqno relative to the current ring */
1885 obj->last_write_seqno = seqno;
1886 }
1883 obj->ring = ring; 1887 obj->ring = ring;
1884 1888
1885 /* Add a reference if we're newly entering the active list. */ 1889 /* Add a reference if we're newly entering the active list. */
@@ -2653,7 +2657,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2653 drm_i915_private_t *dev_priv = dev->dev_private; 2657 drm_i915_private_t *dev_priv = dev->dev_private;
2654 int fence_reg; 2658 int fence_reg;
2655 int fence_pitch_shift; 2659 int fence_pitch_shift;
2656 uint64_t val;
2657 2660
2658 if (INTEL_INFO(dev)->gen >= 6) { 2661 if (INTEL_INFO(dev)->gen >= 6) {
2659 fence_reg = FENCE_REG_SANDYBRIDGE_0; 2662 fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2663,8 +2666,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2663 fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 2666 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2664 } 2667 }
2665 2668
2669 fence_reg += reg * 8;
2670
2671 /* To w/a incoherency with non-atomic 64-bit register updates,
2672 * we split the 64-bit update into two 32-bit writes. In order
2673 * for a partial fence not to be evaluated between writes, we
2674 * precede the update with write to turn off the fence register,
2675 * and only enable the fence as the last step.
2676 *
2677 * For extra levels of paranoia, we make sure each step lands
2678 * before applying the next step.
2679 */
2680 I915_WRITE(fence_reg, 0);
2681 POSTING_READ(fence_reg);
2682
2666 if (obj) { 2683 if (obj) {
2667 u32 size = obj->gtt_space->size; 2684 u32 size = obj->gtt_space->size;
2685 uint64_t val;
2668 2686
2669 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2687 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2670 0xfffff000) << 32; 2688 0xfffff000) << 32;
@@ -2673,12 +2691,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2673 if (obj->tiling_mode == I915_TILING_Y) 2691 if (obj->tiling_mode == I915_TILING_Y)
2674 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2692 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2675 val |= I965_FENCE_REG_VALID; 2693 val |= I965_FENCE_REG_VALID;
2676 } else
2677 val = 0;
2678 2694
2679 fence_reg += reg * 8; 2695 I915_WRITE(fence_reg + 4, val >> 32);
2680 I915_WRITE64(fence_reg, val); 2696 POSTING_READ(fence_reg + 4);
2681 POSTING_READ(fence_reg); 2697
2698 I915_WRITE(fence_reg + 0, val);
2699 POSTING_READ(fence_reg);
2700 } else {
2701 I915_WRITE(fence_reg + 4, 0);
2702 POSTING_READ(fence_reg + 4);
2703 }
2682} 2704}
2683 2705
2684static void i915_write_fence_reg(struct drm_device *dev, int reg, 2706static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2796,56 +2818,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
2796 return fence - dev_priv->fence_regs; 2818 return fence - dev_priv->fence_regs;
2797} 2819}
2798 2820
2799struct write_fence {
2800 struct drm_device *dev;
2801 struct drm_i915_gem_object *obj;
2802 int fence;
2803};
2804
2805static void i915_gem_write_fence__ipi(void *data)
2806{
2807 struct write_fence *args = data;
2808
2809 /* Required for SNB+ with LLC */
2810 wbinvd();
2811
2812 /* Required for VLV */
2813 i915_gem_write_fence(args->dev, args->fence, args->obj);
2814}
2815
2816static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 2821static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2817 struct drm_i915_fence_reg *fence, 2822 struct drm_i915_fence_reg *fence,
2818 bool enable) 2823 bool enable)
2819{ 2824{
2820 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2825 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2821 struct write_fence args = { 2826 int reg = fence_number(dev_priv, fence);
2822 .dev = obj->base.dev, 2827
2823 .fence = fence_number(dev_priv, fence), 2828 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2824 .obj = enable ? obj : NULL,
2825 };
2826
2827 /* In order to fully serialize access to the fenced region and
2828 * the update to the fence register we need to take extreme
2829 * measures on SNB+. In theory, the write to the fence register
2830 * flushes all memory transactions before, and coupled with the
2831 * mb() placed around the register write we serialise all memory
2832 * operations with respect to the changes in the tiler. Yet, on
2833 * SNB+ we need to take a step further and emit an explicit wbinvd()
2834 * on each processor in order to manually flush all memory
2835 * transactions before updating the fence register.
2836 *
2837 * However, Valleyview complicates matter. There the wbinvd is
2838 * insufficient and unlike SNB/IVB requires the serialising
2839 * register write. (Note that that register write by itself is
2840 * conversely not sufficient for SNB+.) To compromise, we do both.
2841 */
2842 if (INTEL_INFO(args.dev)->gen >= 6)
2843 on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
2844 else
2845 i915_gem_write_fence(args.dev, args.fence, args.obj);
2846 2829
2847 if (enable) { 2830 if (enable) {
2848 obj->fence_reg = args.fence; 2831 obj->fence_reg = reg;
2849 fence->obj = obj; 2832 fence->obj = obj;
2850 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 2833 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2851 } else { 2834 } else {
@@ -4611,7 +4594,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4611 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 4594 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4612 if (obj->pages_pin_count == 0) 4595 if (obj->pages_pin_count == 0)
4613 cnt += obj->base.size >> PAGE_SHIFT; 4596 cnt += obj->base.size >> PAGE_SHIFT;
4614 list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) 4597 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
4615 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4598 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4616 cnt += obj->base.size >> PAGE_SHIFT; 4599 cnt += obj->base.size >> PAGE_SHIFT;
4617 4600
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index b73971234013..26e162bb3a51 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
75 case DP_LINK_BW_1_62: 75 case DP_LINK_BW_1_62:
76 case DP_LINK_BW_2_7: 76 case DP_LINK_BW_2_7:
77 break; 77 break;
78 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
79 max_link_bw = DP_LINK_BW_2_7;
80 break;
78 default: 81 default:
82 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
83 max_link_bw);
79 max_link_bw = DP_LINK_BW_1_62; 84 max_link_bw = DP_LINK_BW_1_62;
80 break; 85 break;
81 } 86 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ccbdd83f5220..d10e6735771f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5500,9 +5500,38 @@ void intel_gt_init(struct drm_device *dev)
5500 if (IS_VALLEYVIEW(dev)) { 5500 if (IS_VALLEYVIEW(dev)) {
5501 dev_priv->gt.force_wake_get = vlv_force_wake_get; 5501 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5502 dev_priv->gt.force_wake_put = vlv_force_wake_put; 5502 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5503 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 5503 } else if (IS_HASWELL(dev)) {
5504 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; 5504 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5505 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; 5505 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5506 } else if (IS_IVYBRIDGE(dev)) {
5507 u32 ecobus;
5508
5509 /* IVB configs may use multi-threaded forcewake */
5510
5511 /* A small trick here - if the bios hasn't configured
5512 * MT forcewake, and if the device is in RC6, then
5513 * force_wake_mt_get will not wake the device and the
5514 * ECOBUS read will return zero. Which will be
5515 * (correctly) interpreted by the test below as MT
5516 * forcewake being disabled.
5517 */
5518 mutex_lock(&dev->struct_mutex);
5519 __gen6_gt_force_wake_mt_get(dev_priv);
5520 ecobus = I915_READ_NOTRACE(ECOBUS);
5521 __gen6_gt_force_wake_mt_put(dev_priv);
5522 mutex_unlock(&dev->struct_mutex);
5523
5524 if (ecobus & FORCEWAKE_MT_ENABLE) {
5525 dev_priv->gt.force_wake_get =
5526 __gen6_gt_force_wake_mt_get;
5527 dev_priv->gt.force_wake_put =
5528 __gen6_gt_force_wake_mt_put;
5529 } else {
5530 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5531 DRM_INFO("when using vblank-synced partial screen updates.\n");
5532 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5533 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5534 }
5506 } else if (IS_GEN6(dev)) { 5535 } else if (IS_GEN6(dev)) {
5507 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; 5536 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5508 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; 5537 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e51ab552046c..664118d8c1d6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
379 return I915_READ(acthd_reg); 379 return I915_READ(acthd_reg);
380} 380}
381 381
382static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
383{
384 struct drm_i915_private *dev_priv = ring->dev->dev_private;
385 u32 addr;
386
387 addr = dev_priv->status_page_dmah->busaddr;
388 if (INTEL_INFO(ring->dev)->gen >= 4)
389 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
390 I915_WRITE(HWS_PGA, addr);
391}
392
382static int init_ring_common(struct intel_ring_buffer *ring) 393static int init_ring_common(struct intel_ring_buffer *ring)
383{ 394{
384 struct drm_device *dev = ring->dev; 395 struct drm_device *dev = ring->dev;
@@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
390 if (HAS_FORCE_WAKE(dev)) 401 if (HAS_FORCE_WAKE(dev))
391 gen6_gt_force_wake_get(dev_priv); 402 gen6_gt_force_wake_get(dev_priv);
392 403
404 if (I915_NEED_GFX_HWS(dev))
405 intel_ring_setup_status_page(ring);
406 else
407 ring_setup_phys_status_page(ring);
408
393 /* Stop the ring if it's running. */ 409 /* Stop the ring if it's running. */
394 I915_WRITE_CTL(ring, 0); 410 I915_WRITE_CTL(ring, 0);
395 I915_WRITE_HEAD(ring, 0); 411 I915_WRITE_HEAD(ring, 0);
@@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
518 struct pipe_control *pc = ring->private; 534 struct pipe_control *pc = ring->private;
519 struct drm_i915_gem_object *obj; 535 struct drm_i915_gem_object *obj;
520 536
521 if (!ring->private)
522 return;
523
524 obj = pc->obj; 537 obj = pc->obj;
525 538
526 kunmap(sg_page(obj->pages->sgl)); 539 kunmap(sg_page(obj->pages->sgl));
@@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
528 drm_gem_object_unreference(&obj->base); 541 drm_gem_object_unreference(&obj->base);
529 542
530 kfree(pc); 543 kfree(pc);
531 ring->private = NULL;
532} 544}
533 545
534static int init_render_ring(struct intel_ring_buffer *ring) 546static int init_render_ring(struct intel_ring_buffer *ring)
@@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
601 if (HAS_BROKEN_CS_TLB(dev)) 613 if (HAS_BROKEN_CS_TLB(dev))
602 drm_gem_object_unreference(to_gem_object(ring->private)); 614 drm_gem_object_unreference(to_gem_object(ring->private));
603 615
604 cleanup_pipe_control(ring); 616 if (INTEL_INFO(dev)->gen >= 5)
617 cleanup_pipe_control(ring);
618
619 ring->private = NULL;
605} 620}
606 621
607static void 622static void
@@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
1223 ring->status_page.obj = obj; 1238 ring->status_page.obj = obj;
1224 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1239 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1225 1240
1226 intel_ring_setup_status_page(ring);
1227 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1241 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1228 ring->name, ring->status_page.gfx_addr); 1242 ring->name, ring->status_page.gfx_addr);
1229 1243
@@ -1237,10 +1251,9 @@ err:
1237 return ret; 1251 return ret;
1238} 1252}
1239 1253
1240static int init_phys_hws_pga(struct intel_ring_buffer *ring) 1254static int init_phys_status_page(struct intel_ring_buffer *ring)
1241{ 1255{
1242 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1256 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1243 u32 addr;
1244 1257
1245 if (!dev_priv->status_page_dmah) { 1258 if (!dev_priv->status_page_dmah) {
1246 dev_priv->status_page_dmah = 1259 dev_priv->status_page_dmah =
@@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring)
1249 return -ENOMEM; 1262 return -ENOMEM;
1250 } 1263 }
1251 1264
1252 addr = dev_priv->status_page_dmah->busaddr;
1253 if (INTEL_INFO(ring->dev)->gen >= 4)
1254 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
1255 I915_WRITE(HWS_PGA, addr);
1256
1257 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1265 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1258 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1266 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1259 1267
@@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1281 return ret; 1289 return ret;
1282 } else { 1290 } else {
1283 BUG_ON(ring->id != RCS); 1291 BUG_ON(ring->id != RCS);
1284 ret = init_phys_hws_pga(ring); 1292 ret = init_phys_status_page(ring);
1285 if (ret) 1293 if (ret)
1286 return ret; 1294 return ret;
1287 } 1295 }
@@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1893 } 1901 }
1894 1902
1895 if (!I915_NEED_GFX_HWS(dev)) { 1903 if (!I915_NEED_GFX_HWS(dev)) {
1896 ret = init_phys_hws_pga(ring); 1904 ret = init_phys_status_page(ring);
1897 if (ret) 1905 if (ret)
1898 return ret; 1906 return ret;
1899 } 1907 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ed1d91025928..6dacec4e2090 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -22,7 +22,6 @@
22 * Authors: Alex Deucher 22 * Authors: Alex Deucher
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include "drmP.h" 27#include "drmP.h"
@@ -742,7 +741,6 @@ static int ci_mc_load_microcode(struct radeon_device *rdev)
742 */ 741 */
743static int cik_init_microcode(struct radeon_device *rdev) 742static int cik_init_microcode(struct radeon_device *rdev)
744{ 743{
745 struct platform_device *pdev;
746 const char *chip_name; 744 const char *chip_name;
747 size_t pfp_req_size, me_req_size, ce_req_size, 745 size_t pfp_req_size, me_req_size, ce_req_size,
748 mec_req_size, rlc_req_size, mc_req_size, 746 mec_req_size, rlc_req_size, mc_req_size,
@@ -752,13 +750,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
752 750
753 DRM_DEBUG("\n"); 751 DRM_DEBUG("\n");
754 752
755 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
756 err = IS_ERR(pdev);
757 if (err) {
758 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
759 return -EINVAL;
760 }
761
762 switch (rdev->family) { 753 switch (rdev->family) {
763 case CHIP_BONAIRE: 754 case CHIP_BONAIRE:
764 chip_name = "BONAIRE"; 755 chip_name = "BONAIRE";
@@ -794,7 +785,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
794 DRM_INFO("Loading %s Microcode\n", chip_name); 785 DRM_INFO("Loading %s Microcode\n", chip_name);
795 786
796 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 787 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
797 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 788 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
798 if (err) 789 if (err)
799 goto out; 790 goto out;
800 if (rdev->pfp_fw->size != pfp_req_size) { 791 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -806,7 +797,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
806 } 797 }
807 798
808 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 799 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
809 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 800 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
810 if (err) 801 if (err)
811 goto out; 802 goto out;
812 if (rdev->me_fw->size != me_req_size) { 803 if (rdev->me_fw->size != me_req_size) {
@@ -817,7 +808,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
817 } 808 }
818 809
819 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 810 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
820 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); 811 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
821 if (err) 812 if (err)
822 goto out; 813 goto out;
823 if (rdev->ce_fw->size != ce_req_size) { 814 if (rdev->ce_fw->size != ce_req_size) {
@@ -828,7 +819,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
828 } 819 }
829 820
830 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); 821 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
831 err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev); 822 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
832 if (err) 823 if (err)
833 goto out; 824 goto out;
834 if (rdev->mec_fw->size != mec_req_size) { 825 if (rdev->mec_fw->size != mec_req_size) {
@@ -839,7 +830,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
839 } 830 }
840 831
841 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); 832 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
842 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 833 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
843 if (err) 834 if (err)
844 goto out; 835 goto out;
845 if (rdev->rlc_fw->size != rlc_req_size) { 836 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -850,7 +841,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
850 } 841 }
851 842
852 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 843 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
853 err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev); 844 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
854 if (err) 845 if (err)
855 goto out; 846 goto out;
856 if (rdev->sdma_fw->size != sdma_req_size) { 847 if (rdev->sdma_fw->size != sdma_req_size) {
@@ -863,7 +854,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
863 /* No MC ucode on APUs */ 854 /* No MC ucode on APUs */
864 if (!(rdev->flags & RADEON_IS_IGP)) { 855 if (!(rdev->flags & RADEON_IS_IGP)) {
865 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 856 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
866 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 857 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
867 if (err) 858 if (err)
868 goto out; 859 goto out;
869 if (rdev->mc_fw->size != mc_req_size) { 860 if (rdev->mc_fw->size != mc_req_size) {
@@ -875,8 +866,6 @@ static int cik_init_microcode(struct radeon_device *rdev)
875 } 866 }
876 867
877out: 868out:
878 platform_device_unregister(pdev);
879
880 if (err) { 869 if (err) {
881 if (err != -EINVAL) 870 if (err != -EINVAL)
882 printk(KERN_ERR 871 printk(KERN_ERR
@@ -4453,6 +4442,29 @@ void cik_vm_fini(struct radeon_device *rdev)
4453} 4442}
4454 4443
4455/** 4444/**
4445 * cik_vm_decode_fault - print human readable fault info
4446 *
4447 * @rdev: radeon_device pointer
4448 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4449 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4450 *
4451 * Print human readable fault information (CIK).
4452 */
4453static void cik_vm_decode_fault(struct radeon_device *rdev,
4454 u32 status, u32 addr, u32 mc_client)
4455{
4456 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4457 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4458 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4459 char *block = (char *)&mc_client;
4460
4461 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
4462 protections, vmid, addr,
4463 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4464 block, mc_id);
4465}
4466
4467/**
4456 * cik_vm_flush - cik vm flush using the CP 4468 * cik_vm_flush - cik vm flush using the CP
4457 * 4469 *
4458 * @rdev: radeon_device pointer 4470 * @rdev: radeon_device pointer
@@ -5507,6 +5519,7 @@ int cik_irq_process(struct radeon_device *rdev)
5507 u32 ring_index; 5519 u32 ring_index;
5508 bool queue_hotplug = false; 5520 bool queue_hotplug = false;
5509 bool queue_reset = false; 5521 bool queue_reset = false;
5522 u32 addr, status, mc_client;
5510 5523
5511 if (!rdev->ih.enabled || rdev->shutdown) 5524 if (!rdev->ih.enabled || rdev->shutdown)
5512 return IRQ_NONE; 5525 return IRQ_NONE;
@@ -5742,11 +5755,15 @@ restart_ih:
5742 break; 5755 break;
5743 case 146: 5756 case 146:
5744 case 147: 5757 case 147:
5758 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
5759 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
5760 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
5745 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 5761 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
5746 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 5762 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
5747 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 5763 addr);
5748 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 5764 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
5749 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 5765 status);
5766 cik_vm_decode_fault(rdev, status, addr, mc_client);
5750 /* reset addr and status */ 5767 /* reset addr and status */
5751 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 5768 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5752 break; 5769 break;
@@ -6961,7 +6978,7 @@ int cik_uvd_resume(struct radeon_device *rdev)
6961 6978
6962 /* programm the VCPU memory controller bits 0-27 */ 6979 /* programm the VCPU memory controller bits 0-27 */
6963 addr = rdev->uvd.gpu_addr >> 3; 6980 addr = rdev->uvd.gpu_addr >> 3;
6964 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; 6981 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
6965 WREG32(UVD_VCPU_CACHE_OFFSET0, addr); 6982 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
6966 WREG32(UVD_VCPU_CACHE_SIZE0, size); 6983 WREG32(UVD_VCPU_CACHE_SIZE0, size);
6967 6984
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 63514b95889a..7e9275eaef80 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -136,6 +136,22 @@
136#define VM_INVALIDATE_RESPONSE 0x147c 136#define VM_INVALIDATE_RESPONSE 0x147c
137 137
138#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC 138#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
139#define PROTECTIONS_MASK (0xf << 0)
140#define PROTECTIONS_SHIFT 0
141 /* bit 0: range
142 * bit 1: pde0
143 * bit 2: valid
144 * bit 3: read
145 * bit 4: write
146 */
147#define MEMORY_CLIENT_ID_MASK (0xff << 12)
148#define MEMORY_CLIENT_ID_SHIFT 12
149#define MEMORY_CLIENT_RW_MASK (1 << 24)
150#define MEMORY_CLIENT_RW_SHIFT 24
151#define FAULT_VMID_MASK (0xf << 25)
152#define FAULT_VMID_SHIFT 25
153
154#define VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT 0x14E4
139 155
140#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC 156#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
141 157
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e49059dc9b8f..038dcac7670c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -139,6 +139,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
139void evergreen_program_aspm(struct radeon_device *rdev); 139void evergreen_program_aspm(struct radeon_device *rdev);
140extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 140extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
141 int ring, u32 cp_int_cntl); 141 int ring, u32 cp_int_cntl);
142extern void cayman_vm_decode_fault(struct radeon_device *rdev,
143 u32 status, u32 addr);
142 144
143static const u32 evergreen_golden_registers[] = 145static const u32 evergreen_golden_registers[] =
144{ 146{
@@ -4586,6 +4588,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
4586 bool queue_hotplug = false; 4588 bool queue_hotplug = false;
4587 bool queue_hdmi = false; 4589 bool queue_hdmi = false;
4588 bool queue_thermal = false; 4590 bool queue_thermal = false;
4591 u32 status, addr;
4589 4592
4590 if (!rdev->ih.enabled || rdev->shutdown) 4593 if (!rdev->ih.enabled || rdev->shutdown)
4591 return IRQ_NONE; 4594 return IRQ_NONE;
@@ -4872,11 +4875,14 @@ restart_ih:
4872 break; 4875 break;
4873 case 146: 4876 case 146:
4874 case 147: 4877 case 147:
4878 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
4879 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
4875 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 4880 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
4876 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 4881 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
4877 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 4882 addr);
4878 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 4883 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4879 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 4884 status);
4885 cayman_vm_decode_fault(rdev, status, addr);
4880 /* reset addr and status */ 4886 /* reset addr and status */
4881 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 4887 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
4882 break; 4888 break;
@@ -5509,6 +5515,9 @@ void evergreen_program_aspm(struct radeon_device *rdev)
5509 */ 5515 */
5510 bool fusion_platform = false; 5516 bool fusion_platform = false;
5511 5517
5518 if (radeon_aspm == 0)
5519 return;
5520
5512 if (!(rdev->flags & RADEON_IS_PCIE)) 5521 if (!(rdev->flags & RADEON_IS_PCIE))
5513 return; 5522 return;
5514 5523
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b9c6f7675e59..b0d3fb341417 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -177,6 +177,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
177 uint32_t offset; 177 uint32_t offset;
178 ssize_t err; 178 ssize_t err;
179 179
180 if (!dig || !dig->afmt)
181 return;
182
180 /* Silent, r600_hdmi_enable will raise WARN for us */ 183 /* Silent, r600_hdmi_enable will raise WARN for us */
181 if (!dig->afmt->enabled) 184 if (!dig->afmt->enabled)
182 return; 185 return;
@@ -280,6 +283,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
280 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 283 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
281 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 284 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
282 285
286 if (!dig || !dig->afmt)
287 return;
288
283 /* Silent, r600_hdmi_enable will raise WARN for us */ 289 /* Silent, r600_hdmi_enable will raise WARN for us */
284 if (enable && dig->afmt->enabled) 290 if (enable && dig->afmt->enabled)
285 return; 291 return;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index f30127cb30ef..56bd4f3be4fe 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -22,7 +22,6 @@
22 * Authors: Alex Deucher 22 * Authors: Alex Deucher
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <drm/drmP.h> 27#include <drm/drmP.h>
@@ -684,7 +683,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
684 683
685int ni_init_microcode(struct radeon_device *rdev) 684int ni_init_microcode(struct radeon_device *rdev)
686{ 685{
687 struct platform_device *pdev;
688 const char *chip_name; 686 const char *chip_name;
689 const char *rlc_chip_name; 687 const char *rlc_chip_name;
690 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 688 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
@@ -694,13 +692,6 @@ int ni_init_microcode(struct radeon_device *rdev)
694 692
695 DRM_DEBUG("\n"); 693 DRM_DEBUG("\n");
696 694
697 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
698 err = IS_ERR(pdev);
699 if (err) {
700 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
701 return -EINVAL;
702 }
703
704 switch (rdev->family) { 695 switch (rdev->family) {
705 case CHIP_BARTS: 696 case CHIP_BARTS:
706 chip_name = "BARTS"; 697 chip_name = "BARTS";
@@ -753,7 +744,7 @@ int ni_init_microcode(struct radeon_device *rdev)
753 DRM_INFO("Loading %s Microcode\n", chip_name); 744 DRM_INFO("Loading %s Microcode\n", chip_name);
754 745
755 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 746 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
756 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 747 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
757 if (err) 748 if (err)
758 goto out; 749 goto out;
759 if (rdev->pfp_fw->size != pfp_req_size) { 750 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -765,7 +756,7 @@ int ni_init_microcode(struct radeon_device *rdev)
765 } 756 }
766 757
767 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 758 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
768 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 759 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
769 if (err) 760 if (err)
770 goto out; 761 goto out;
771 if (rdev->me_fw->size != me_req_size) { 762 if (rdev->me_fw->size != me_req_size) {
@@ -776,7 +767,7 @@ int ni_init_microcode(struct radeon_device *rdev)
776 } 767 }
777 768
778 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 769 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
779 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 770 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
780 if (err) 771 if (err)
781 goto out; 772 goto out;
782 if (rdev->rlc_fw->size != rlc_req_size) { 773 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -789,7 +780,7 @@ int ni_init_microcode(struct radeon_device *rdev)
789 /* no MC ucode on TN */ 780 /* no MC ucode on TN */
790 if (!(rdev->flags & RADEON_IS_IGP)) { 781 if (!(rdev->flags & RADEON_IS_IGP)) {
791 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 782 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
792 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 783 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
793 if (err) 784 if (err)
794 goto out; 785 goto out;
795 if (rdev->mc_fw->size != mc_req_size) { 786 if (rdev->mc_fw->size != mc_req_size) {
@@ -802,7 +793,7 @@ int ni_init_microcode(struct radeon_device *rdev)
802 793
803 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { 794 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
804 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 795 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
805 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); 796 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
806 if (err) 797 if (err)
807 goto out; 798 goto out;
808 if (rdev->smc_fw->size != smc_req_size) { 799 if (rdev->smc_fw->size != smc_req_size) {
@@ -814,8 +805,6 @@ int ni_init_microcode(struct radeon_device *rdev)
814 } 805 }
815 806
816out: 807out:
817 platform_device_unregister(pdev);
818
819 if (err) { 808 if (err) {
820 if (err != -EINVAL) 809 if (err != -EINVAL)
821 printk(KERN_ERR 810 printk(KERN_ERR
@@ -2461,6 +2450,167 @@ void cayman_vm_fini(struct radeon_device *rdev)
2461{ 2450{
2462} 2451}
2463 2452
2453/**
2454 * cayman_vm_decode_fault - print human readable fault info
2455 *
2456 * @rdev: radeon_device pointer
2457 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2458 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2459 *
2460 * Print human readable fault information (cayman/TN).
2461 */
2462void cayman_vm_decode_fault(struct radeon_device *rdev,
2463 u32 status, u32 addr)
2464{
2465 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2466 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2467 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2468 char *block;
2469
2470 switch (mc_id) {
2471 case 32:
2472 case 16:
2473 case 96:
2474 case 80:
2475 case 160:
2476 case 144:
2477 case 224:
2478 case 208:
2479 block = "CB";
2480 break;
2481 case 33:
2482 case 17:
2483 case 97:
2484 case 81:
2485 case 161:
2486 case 145:
2487 case 225:
2488 case 209:
2489 block = "CB_FMASK";
2490 break;
2491 case 34:
2492 case 18:
2493 case 98:
2494 case 82:
2495 case 162:
2496 case 146:
2497 case 226:
2498 case 210:
2499 block = "CB_CMASK";
2500 break;
2501 case 35:
2502 case 19:
2503 case 99:
2504 case 83:
2505 case 163:
2506 case 147:
2507 case 227:
2508 case 211:
2509 block = "CB_IMMED";
2510 break;
2511 case 36:
2512 case 20:
2513 case 100:
2514 case 84:
2515 case 164:
2516 case 148:
2517 case 228:
2518 case 212:
2519 block = "DB";
2520 break;
2521 case 37:
2522 case 21:
2523 case 101:
2524 case 85:
2525 case 165:
2526 case 149:
2527 case 229:
2528 case 213:
2529 block = "DB_HTILE";
2530 break;
2531 case 38:
2532 case 22:
2533 case 102:
2534 case 86:
2535 case 166:
2536 case 150:
2537 case 230:
2538 case 214:
2539 block = "SX";
2540 break;
2541 case 39:
2542 case 23:
2543 case 103:
2544 case 87:
2545 case 167:
2546 case 151:
2547 case 231:
2548 case 215:
2549 block = "DB_STEN";
2550 break;
2551 case 40:
2552 case 24:
2553 case 104:
2554 case 88:
2555 case 232:
2556 case 216:
2557 case 168:
2558 case 152:
2559 block = "TC_TFETCH";
2560 break;
2561 case 41:
2562 case 25:
2563 case 105:
2564 case 89:
2565 case 233:
2566 case 217:
2567 case 169:
2568 case 153:
2569 block = "TC_VFETCH";
2570 break;
2571 case 42:
2572 case 26:
2573 case 106:
2574 case 90:
2575 case 234:
2576 case 218:
2577 case 170:
2578 case 154:
2579 block = "VC";
2580 break;
2581 case 112:
2582 block = "CP";
2583 break;
2584 case 113:
2585 case 114:
2586 block = "SH";
2587 break;
2588 case 115:
2589 block = "VGT";
2590 break;
2591 case 178:
2592 block = "IH";
2593 break;
2594 case 51:
2595 block = "RLC";
2596 break;
2597 case 55:
2598 block = "DMA";
2599 break;
2600 case 56:
2601 block = "HDP";
2602 break;
2603 default:
2604 block = "unknown";
2605 break;
2606 }
2607
2608 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2609 protections, vmid, addr,
2610 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2611 block, mc_id);
2612}
2613
2464#define R600_ENTRY_VALID (1 << 0) 2614#define R600_ENTRY_VALID (1 << 0)
2465#define R600_PTE_SYSTEM (1 << 1) 2615#define R600_PTE_SYSTEM (1 << 1)
2466#define R600_PTE_SNOOPED (1 << 2) 2616#define R600_PTE_SNOOPED (1 << 2)
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index fe24a93542ec..22421bc80c0d 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -133,6 +133,22 @@
133#define VM_CONTEXT1_CNTL2 0x1434 133#define VM_CONTEXT1_CNTL2 0x1434
134#define VM_INVALIDATE_REQUEST 0x1478 134#define VM_INVALIDATE_REQUEST 0x1478
135#define VM_INVALIDATE_RESPONSE 0x147c 135#define VM_INVALIDATE_RESPONSE 0x147c
136#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
137#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
138#define PROTECTIONS_MASK (0xf << 0)
139#define PROTECTIONS_SHIFT 0
140 /* bit 0: range
141 * bit 2: pde0
142 * bit 3: valid
143 * bit 4: read
144 * bit 5: write
145 */
146#define MEMORY_CLIENT_ID_MASK (0xff << 12)
147#define MEMORY_CLIENT_ID_SHIFT 12
148#define MEMORY_CLIENT_RW_MASK (1 << 24)
149#define MEMORY_CLIENT_RW_SHIFT 24
150#define FAULT_VMID_MASK (0x7 << 25)
151#define FAULT_VMID_SHIFT 25
136#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 152#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
137#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c 153#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
138#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C 154#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9affefd79f6..75349cdaa84b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -39,7 +39,6 @@
39#include "atom.h" 39#include "atom.h"
40 40
41#include <linux/firmware.h> 41#include <linux/firmware.h>
42#include <linux/platform_device.h>
43#include <linux/module.h> 42#include <linux/module.h>
44 43
45#include "r100_reg_safe.h" 44#include "r100_reg_safe.h"
@@ -989,18 +988,11 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
989/* Load the microcode for the CP */ 988/* Load the microcode for the CP */
990static int r100_cp_init_microcode(struct radeon_device *rdev) 989static int r100_cp_init_microcode(struct radeon_device *rdev)
991{ 990{
992 struct platform_device *pdev;
993 const char *fw_name = NULL; 991 const char *fw_name = NULL;
994 int err; 992 int err;
995 993
996 DRM_DEBUG_KMS("\n"); 994 DRM_DEBUG_KMS("\n");
997 995
998 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
999 err = IS_ERR(pdev);
1000 if (err) {
1001 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1002 return -EINVAL;
1003 }
1004 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 996 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
1005 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 997 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
1006 (rdev->family == CHIP_RS200)) { 998 (rdev->family == CHIP_RS200)) {
@@ -1042,8 +1034,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
1042 fw_name = FIRMWARE_R520; 1034 fw_name = FIRMWARE_R520;
1043 } 1035 }
1044 1036
1045 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 1037 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1046 platform_device_unregister(pdev);
1047 if (err) { 1038 if (err) {
1048 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 1039 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1049 fw_name); 1040 fw_name);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2d3655f7f41e..393880a09412 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -28,7 +28,6 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/firmware.h> 30#include <linux/firmware.h>
31#include <linux/platform_device.h>
32#include <linux/module.h> 31#include <linux/module.h>
33#include <drm/drmP.h> 32#include <drm/drmP.h>
34#include <drm/radeon_drm.h> 33#include <drm/radeon_drm.h>
@@ -2144,7 +2143,6 @@ void r600_cp_stop(struct radeon_device *rdev)
2144 2143
2145int r600_init_microcode(struct radeon_device *rdev) 2144int r600_init_microcode(struct radeon_device *rdev)
2146{ 2145{
2147 struct platform_device *pdev;
2148 const char *chip_name; 2146 const char *chip_name;
2149 const char *rlc_chip_name; 2147 const char *rlc_chip_name;
2150 const char *smc_chip_name = "RV770"; 2148 const char *smc_chip_name = "RV770";
@@ -2154,13 +2152,6 @@ int r600_init_microcode(struct radeon_device *rdev)
2154 2152
2155 DRM_DEBUG("\n"); 2153 DRM_DEBUG("\n");
2156 2154
2157 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
2158 err = IS_ERR(pdev);
2159 if (err) {
2160 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
2161 return -EINVAL;
2162 }
2163
2164 switch (rdev->family) { 2155 switch (rdev->family) {
2165 case CHIP_R600: 2156 case CHIP_R600:
2166 chip_name = "R600"; 2157 chip_name = "R600";
@@ -2272,7 +2263,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2272 DRM_INFO("Loading %s Microcode\n", chip_name); 2263 DRM_INFO("Loading %s Microcode\n", chip_name);
2273 2264
2274 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 2265 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2275 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 2266 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2276 if (err) 2267 if (err)
2277 goto out; 2268 goto out;
2278 if (rdev->pfp_fw->size != pfp_req_size) { 2269 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -2284,7 +2275,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2284 } 2275 }
2285 2276
2286 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 2277 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2287 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 2278 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2288 if (err) 2279 if (err)
2289 goto out; 2280 goto out;
2290 if (rdev->me_fw->size != me_req_size) { 2281 if (rdev->me_fw->size != me_req_size) {
@@ -2295,7 +2286,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2295 } 2286 }
2296 2287
2297 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 2288 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2298 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 2289 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2299 if (err) 2290 if (err)
2300 goto out; 2291 goto out;
2301 if (rdev->rlc_fw->size != rlc_req_size) { 2292 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -2307,7 +2298,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2307 2298
2308 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { 2299 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2309 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); 2300 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2310 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); 2301 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2311 if (err) 2302 if (err)
2312 goto out; 2303 goto out;
2313 if (rdev->smc_fw->size != smc_req_size) { 2304 if (rdev->smc_fw->size != smc_req_size) {
@@ -2319,8 +2310,6 @@ int r600_init_microcode(struct radeon_device *rdev)
2319 } 2310 }
2320 2311
2321out: 2312out:
2322 platform_device_unregister(pdev);
2323
2324 if (err) { 2313 if (err) {
2325 if (err != -EINVAL) 2314 if (err != -EINVAL)
2326 printk(KERN_ERR 2315 printk(KERN_ERR
@@ -3019,7 +3008,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev,
3019 struct radeon_fence *fence) 3008 struct radeon_fence *fence)
3020{ 3009{
3021 struct radeon_ring *ring = &rdev->ring[fence->ring]; 3010 struct radeon_ring *ring = &rdev->ring[fence->ring];
3022 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; 3011 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
3023 3012
3024 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 3013 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
3025 radeon_ring_write(ring, fence->seq); 3014 radeon_ring_write(ring, fence->seq);
@@ -3145,6 +3134,87 @@ int r600_copy_blit(struct radeon_device *rdev,
3145} 3134}
3146 3135
3147/** 3136/**
3137 * r600_copy_cpdma - copy pages using the CP DMA engine
3138 *
3139 * @rdev: radeon_device pointer
3140 * @src_offset: src GPU address
3141 * @dst_offset: dst GPU address
3142 * @num_gpu_pages: number of GPU pages to xfer
3143 * @fence: radeon fence object
3144 *
3145 * Copy GPU paging using the CP DMA engine (r6xx+).
3146 * Used by the radeon ttm implementation to move pages if
3147 * registered as the asic copy callback.
3148 */
3149int r600_copy_cpdma(struct radeon_device *rdev,
3150 uint64_t src_offset, uint64_t dst_offset,
3151 unsigned num_gpu_pages,
3152 struct radeon_fence **fence)
3153{
3154 struct radeon_semaphore *sem = NULL;
3155 int ring_index = rdev->asic->copy.blit_ring_index;
3156 struct radeon_ring *ring = &rdev->ring[ring_index];
3157 u32 size_in_bytes, cur_size_in_bytes, tmp;
3158 int i, num_loops;
3159 int r = 0;
3160
3161 r = radeon_semaphore_create(rdev, &sem);
3162 if (r) {
3163 DRM_ERROR("radeon: moving bo (%d).\n", r);
3164 return r;
3165 }
3166
3167 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3168 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3169 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21);
3170 if (r) {
3171 DRM_ERROR("radeon: moving bo (%d).\n", r);
3172 radeon_semaphore_free(rdev, &sem, NULL);
3173 return r;
3174 }
3175
3176 if (radeon_fence_need_sync(*fence, ring->idx)) {
3177 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3178 ring->idx);
3179 radeon_fence_note_sync(*fence, ring->idx);
3180 } else {
3181 radeon_semaphore_free(rdev, &sem, NULL);
3182 }
3183
3184 for (i = 0; i < num_loops; i++) {
3185 cur_size_in_bytes = size_in_bytes;
3186 if (cur_size_in_bytes > 0x1fffff)
3187 cur_size_in_bytes = 0x1fffff;
3188 size_in_bytes -= cur_size_in_bytes;
3189 tmp = upper_32_bits(src_offset) & 0xff;
3190 if (size_in_bytes == 0)
3191 tmp |= PACKET3_CP_DMA_CP_SYNC;
3192 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
3193 radeon_ring_write(ring, src_offset & 0xffffffff);
3194 radeon_ring_write(ring, tmp);
3195 radeon_ring_write(ring, dst_offset & 0xffffffff);
3196 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3197 radeon_ring_write(ring, cur_size_in_bytes);
3198 src_offset += cur_size_in_bytes;
3199 dst_offset += cur_size_in_bytes;
3200 }
3201 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3202 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3203 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
3204
3205 r = radeon_fence_emit(rdev, fence, ring->idx);
3206 if (r) {
3207 radeon_ring_unlock_undo(rdev, ring);
3208 return r;
3209 }
3210
3211 radeon_ring_unlock_commit(rdev, ring);
3212 radeon_semaphore_free(rdev, &sem, *fence);
3213
3214 return r;
3215}
3216
3217/**
3148 * r600_copy_dma - copy pages using the DMA engine 3218 * r600_copy_dma - copy pages using the DMA engine
3149 * 3219 *
3150 * @rdev: radeon_device pointer 3220 * @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e73b2a73494a..f48240bb8c56 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -266,6 +266,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
266 uint32_t offset; 266 uint32_t offset;
267 ssize_t err; 267 ssize_t err;
268 268
269 if (!dig || !dig->afmt)
270 return;
271
269 /* Silent, r600_hdmi_enable will raise WARN for us */ 272 /* Silent, r600_hdmi_enable will raise WARN for us */
270 if (!dig->afmt->enabled) 273 if (!dig->afmt->enabled)
271 return; 274 return;
@@ -448,6 +451,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
448 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 451 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
449 u32 hdmi = HDMI0_ERROR_ACK; 452 u32 hdmi = HDMI0_ERROR_ACK;
450 453
454 if (!dig || !dig->afmt)
455 return;
456
451 /* Silent, r600_hdmi_enable will raise WARN for us */ 457 /* Silent, r600_hdmi_enable will raise WARN for us */
452 if (enable && dig->afmt->enabled) 458 if (enable && dig->afmt->enabled)
453 return; 459 return;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f1b3084d8f51..8e3fe815edab 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -602,6 +602,7 @@
602#define L2_BUSY (1 << 0) 602#define L2_BUSY (1 << 0)
603 603
604#define WAIT_UNTIL 0x8040 604#define WAIT_UNTIL 0x8040
605#define WAIT_CP_DMA_IDLE_bit (1 << 8)
605#define WAIT_2D_IDLE_bit (1 << 14) 606#define WAIT_2D_IDLE_bit (1 << 14)
606#define WAIT_3D_IDLE_bit (1 << 15) 607#define WAIT_3D_IDLE_bit (1 << 15)
607#define WAIT_2D_IDLECLEAN_bit (1 << 16) 608#define WAIT_2D_IDLECLEAN_bit (1 << 16)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9b7025d02cd0..2f08219c39b6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -97,6 +97,7 @@ extern int radeon_msi;
97extern int radeon_lockup_timeout; 97extern int radeon_lockup_timeout;
98extern int radeon_fastfb; 98extern int radeon_fastfb;
99extern int radeon_dpm; 99extern int radeon_dpm;
100extern int radeon_aspm;
100 101
101/* 102/*
102 * Copy from radeon_drv.h so we don't have to include both and have conflicting 103 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -455,6 +456,7 @@ struct radeon_sa_manager {
455 uint64_t gpu_addr; 456 uint64_t gpu_addr;
456 void *cpu_ptr; 457 void *cpu_ptr;
457 uint32_t domain; 458 uint32_t domain;
459 uint32_t align;
458}; 460};
459 461
460struct radeon_sa_bo; 462struct radeon_sa_bo;
@@ -783,6 +785,11 @@ struct radeon_mec {
783/* number of entries in page table */ 785/* number of entries in page table */
784#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) 786#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
785 787
788/* PTBs (Page Table Blocks) need to be aligned to 32K */
789#define RADEON_VM_PTB_ALIGN_SIZE 32768
790#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
791#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
792
786struct radeon_vm { 793struct radeon_vm {
787 struct list_head list; 794 struct list_head list;
788 struct list_head va; 795 struct list_head va;
@@ -1460,6 +1467,8 @@ struct radeon_uvd {
1460 struct radeon_bo *vcpu_bo; 1467 struct radeon_bo *vcpu_bo;
1461 void *cpu_addr; 1468 void *cpu_addr;
1462 uint64_t gpu_addr; 1469 uint64_t gpu_addr;
1470 void *saved_bo;
1471 unsigned fw_size;
1463 atomic_t handles[RADEON_MAX_UVD_HANDLES]; 1472 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1464 struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; 1473 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1465 struct delayed_work idle_work; 1474 struct delayed_work idle_work;
@@ -2054,7 +2063,6 @@ struct radeon_device {
2054 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 2063 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
2055 const struct firmware *mc_fw; /* NI MC firmware */ 2064 const struct firmware *mc_fw; /* NI MC firmware */
2056 const struct firmware *ce_fw; /* SI CE firmware */ 2065 const struct firmware *ce_fw; /* SI CE firmware */
2057 const struct firmware *uvd_fw; /* UVD firmware */
2058 const struct firmware *mec_fw; /* CIK MEC firmware */ 2066 const struct firmware *mec_fw; /* CIK MEC firmware */
2059 const struct firmware *sdma_fw; /* CIK SDMA firmware */ 2067 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2060 const struct firmware *smc_fw; /* SMC firmware */ 2068 const struct firmware *smc_fw; /* SMC firmware */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 097077499cc6..78bec1a58ed1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1026,8 +1026,8 @@ static struct radeon_asic r600_asic = {
1026 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1026 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1027 .dma = &r600_copy_dma, 1027 .dma = &r600_copy_dma,
1028 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1028 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1029 .copy = &r600_copy_dma, 1029 .copy = &r600_copy_cpdma,
1030 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1030 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1031 }, 1031 },
1032 .surface = { 1032 .surface = {
1033 .set_reg = r600_set_surface_reg, 1033 .set_reg = r600_set_surface_reg,
@@ -1119,8 +1119,8 @@ static struct radeon_asic rv6xx_asic = {
1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1119 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1120 .dma = &r600_copy_dma, 1120 .dma = &r600_copy_dma,
1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1121 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1122 .copy = &r600_copy_dma, 1122 .copy = &r600_copy_cpdma,
1123 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1123 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1124 }, 1124 },
1125 .surface = { 1125 .surface = {
1126 .set_reg = r600_set_surface_reg, 1126 .set_reg = r600_set_surface_reg,
@@ -1229,8 +1229,8 @@ static struct radeon_asic rs780_asic = {
1229 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1229 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1230 .dma = &r600_copy_dma, 1230 .dma = &r600_copy_dma,
1231 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1231 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1232 .copy = &r600_copy_dma, 1232 .copy = &r600_copy_cpdma,
1233 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1233 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1234 }, 1234 },
1235 .surface = { 1235 .surface = {
1236 .set_reg = r600_set_surface_reg, 1236 .set_reg = r600_set_surface_reg,
@@ -1270,6 +1270,7 @@ static struct radeon_asic rs780_asic = {
1270 .get_sclk = &rs780_dpm_get_sclk, 1270 .get_sclk = &rs780_dpm_get_sclk,
1271 .get_mclk = &rs780_dpm_get_mclk, 1271 .get_mclk = &rs780_dpm_get_mclk,
1272 .print_power_state = &rs780_dpm_print_power_state, 1272 .print_power_state = &rs780_dpm_print_power_state,
1273 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
1273 }, 1274 },
1274 .pflip = { 1275 .pflip = {
1275 .pre_page_flip = &rs600_pre_page_flip, 1276 .pre_page_flip = &rs600_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 45d0693cddd5..ca1895709908 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -340,6 +340,9 @@ int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
340int r600_copy_blit(struct radeon_device *rdev, 340int r600_copy_blit(struct radeon_device *rdev,
341 uint64_t src_offset, uint64_t dst_offset, 341 uint64_t src_offset, uint64_t dst_offset,
342 unsigned num_gpu_pages, struct radeon_fence **fence); 342 unsigned num_gpu_pages, struct radeon_fence **fence);
343int r600_copy_cpdma(struct radeon_device *rdev,
344 uint64_t src_offset, uint64_t dst_offset,
345 unsigned num_gpu_pages, struct radeon_fence **fence);
343int r600_copy_dma(struct radeon_device *rdev, 346int r600_copy_dma(struct radeon_device *rdev,
344 uint64_t src_offset, uint64_t dst_offset, 347 uint64_t src_offset, uint64_t dst_offset,
345 unsigned num_gpu_pages, struct radeon_fence **fence); 348 unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -430,6 +433,8 @@ u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
430u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low); 433u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
431void rs780_dpm_print_power_state(struct radeon_device *rdev, 434void rs780_dpm_print_power_state(struct radeon_device *rdev,
432 struct radeon_ps *ps); 435 struct radeon_ps *ps);
436void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
437 struct seq_file *m);
433 438
434/* uvd */ 439/* uvd */
435int r600_uvd_init(struct radeon_device *rdev); 440int r600_uvd_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fbdaff55556b..e3f3e8841789 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -3513,7 +3513,6 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3513 u8 frev, crev, i; 3513 u8 frev, crev, i;
3514 u16 data_offset, size; 3514 u16 data_offset, size;
3515 union vram_info *vram_info; 3515 union vram_info *vram_info;
3516 u8 *p;
3517 3516
3518 memset(mem_info, 0, sizeof(struct atom_memory_info)); 3517 memset(mem_info, 0, sizeof(struct atom_memory_info));
3519 3518
@@ -3529,13 +3528,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3529 if (module_index < vram_info->v1_3.ucNumOfVRAMModule) { 3528 if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
3530 ATOM_VRAM_MODULE_V3 *vram_module = 3529 ATOM_VRAM_MODULE_V3 *vram_module =
3531 (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo; 3530 (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
3532 p = (u8 *)vram_info->v1_3.aVramInfo;
3533 3531
3534 for (i = 0; i < module_index; i++) { 3532 for (i = 0; i < module_index; i++) {
3535 vram_module = (ATOM_VRAM_MODULE_V3 *)p;
3536 if (le16_to_cpu(vram_module->usSize) == 0) 3533 if (le16_to_cpu(vram_module->usSize) == 0)
3537 return -EINVAL; 3534 return -EINVAL;
3538 p += le16_to_cpu(vram_module->usSize); 3535 vram_module = (ATOM_VRAM_MODULE_V3 *)
3536 ((u8 *)vram_module + le16_to_cpu(vram_module->usSize));
3539 } 3537 }
3540 mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf; 3538 mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
3541 mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0; 3539 mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
@@ -3547,13 +3545,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3547 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { 3545 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3548 ATOM_VRAM_MODULE_V4 *vram_module = 3546 ATOM_VRAM_MODULE_V4 *vram_module =
3549 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; 3547 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3550 p = (u8 *)vram_info->v1_4.aVramInfo;
3551 3548
3552 for (i = 0; i < module_index; i++) { 3549 for (i = 0; i < module_index; i++) {
3553 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3554 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3550 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3555 return -EINVAL; 3551 return -EINVAL;
3556 p += le16_to_cpu(vram_module->usModuleSize); 3552 vram_module = (ATOM_VRAM_MODULE_V4 *)
3553 ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
3557 } 3554 }
3558 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; 3555 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3559 mem_info->mem_type = vram_module->ucMemoryType & 0xf0; 3556 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
@@ -3572,13 +3569,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev,
3572 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { 3569 if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
3573 ATOM_VRAM_MODULE_V7 *vram_module = 3570 ATOM_VRAM_MODULE_V7 *vram_module =
3574 (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo; 3571 (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
3575 p = (u8 *)vram_info->v2_1.aVramInfo;
3576 3572
3577 for (i = 0; i < module_index; i++) { 3573 for (i = 0; i < module_index; i++) {
3578 vram_module = (ATOM_VRAM_MODULE_V7 *)p;
3579 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3574 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3580 return -EINVAL; 3575 return -EINVAL;
3581 p += le16_to_cpu(vram_module->usModuleSize); 3576 vram_module = (ATOM_VRAM_MODULE_V7 *)
3577 ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
3582 } 3578 }
3583 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; 3579 mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
3584 mem_info->mem_type = vram_module->ucMemoryType & 0xf0; 3580 mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
@@ -3628,21 +3624,19 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
3628 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { 3624 if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
3629 ATOM_VRAM_MODULE_V4 *vram_module = 3625 ATOM_VRAM_MODULE_V4 *vram_module =
3630 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; 3626 (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
3631 ATOM_MEMORY_TIMING_FORMAT *format;
3632 p = (u8 *)vram_info->v1_4.aVramInfo;
3633 3627
3634 for (i = 0; i < module_index; i++) { 3628 for (i = 0; i < module_index; i++) {
3635 vram_module = (ATOM_VRAM_MODULE_V4 *)p;
3636 if (le16_to_cpu(vram_module->usModuleSize) == 0) 3629 if (le16_to_cpu(vram_module->usModuleSize) == 0)
3637 return -EINVAL; 3630 return -EINVAL;
3638 p += le16_to_cpu(vram_module->usModuleSize); 3631 vram_module = (ATOM_VRAM_MODULE_V4 *)
3632 ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
3639 } 3633 }
3640 mclk_range_table->num_entries = (u8) 3634 mclk_range_table->num_entries = (u8)
3641 ((vram_module->usModuleSize - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / 3635 ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
3642 mem_timing_size); 3636 mem_timing_size);
3643 p = (u8 *)vram_module->asMemTiming; 3637 p = (u8 *)&vram_module->asMemTiming[0];
3644 for (i = 0; i < mclk_range_table->num_entries; i++) { 3638 for (i = 0; i < mclk_range_table->num_entries; i++) {
3645 format = (ATOM_MEMORY_TIMING_FORMAT *)p; 3639 ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
3646 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); 3640 mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
3647 p += mem_timing_size; 3641 p += mem_timing_size;
3648 } 3642 }
@@ -3705,17 +3699,21 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
3705 (ATOM_MEMORY_SETTING_DATA_BLOCK *) 3699 (ATOM_MEMORY_SETTING_DATA_BLOCK *)
3706 ((u8 *)reg_block + (2 * sizeof(u16)) + 3700 ((u8 *)reg_block + (2 * sizeof(u16)) +
3707 le16_to_cpu(reg_block->usRegIndexTblSize)); 3701 le16_to_cpu(reg_block->usRegIndexTblSize));
3702 ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
3708 num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) / 3703 num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
3709 sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1; 3704 sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
3710 if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE) 3705 if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
3711 return -EINVAL; 3706 return -EINVAL;
3712 while (!(reg_block->asRegIndexBuf[i].ucPreRegDataLength & ACCESS_PLACEHOLDER) && 3707 while (i < num_entries) {
3713 (i < num_entries)) { 3708 if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
3709 break;
3714 reg_table->mc_reg_address[i].s1 = 3710 reg_table->mc_reg_address[i].s1 =
3715 (u16)(le16_to_cpu(reg_block->asRegIndexBuf[i].usRegIndex)); 3711 (u16)(le16_to_cpu(format->usRegIndex));
3716 reg_table->mc_reg_address[i].pre_reg_data = 3712 reg_table->mc_reg_address[i].pre_reg_data =
3717 (u8)(reg_block->asRegIndexBuf[i].ucPreRegDataLength); 3713 (u8)(format->ucPreRegDataLength);
3718 i++; 3714 i++;
3715 format = (ATOM_INIT_REG_INDEX_FORMAT *)
3716 ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
3719 } 3717 }
3720 reg_table->last = i; 3718 reg_table->last = i;
3721 while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) && 3719 while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e5419b350170..29876b1be8ec 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -167,6 +167,7 @@ int radeon_msi = -1;
167int radeon_lockup_timeout = 10000; 167int radeon_lockup_timeout = 10000;
168int radeon_fastfb = 0; 168int radeon_fastfb = 0;
169int radeon_dpm = -1; 169int radeon_dpm = -1;
170int radeon_aspm = -1;
170 171
171MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 172MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
172module_param_named(no_wb, radeon_no_wb, int, 0444); 173module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -225,6 +226,9 @@ module_param_named(fastfb, radeon_fastfb, int, 0444);
225MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); 226MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
226module_param_named(dpm, radeon_dpm, int, 0444); 227module_param_named(dpm, radeon_dpm, int, 0444);
227 228
229MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
230module_param_named(aspm, radeon_aspm, int, 0444);
231
228static struct pci_device_id pciidlist[] = { 232static struct pci_device_id pciidlist[] = {
229 radeon_PCI_IDS 233 radeon_PCI_IDS
230}; 234};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e04eb5..7ddb0efe2408 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
782 782
783 } else { 783 } else {
784 /* put fence directly behind firmware */ 784 /* put fence directly behind firmware */
785 index = ALIGN(rdev->uvd_fw->size, 8); 785 index = ALIGN(rdev->uvd.fw_size, 8);
786 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; 786 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
787 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; 787 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
788 } 788 }
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 43ec4a401f07..d9d31a383276 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -466,7 +466,8 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
466 size += rdev->vm_manager.max_pfn * 8; 466 size += rdev->vm_manager.max_pfn * 8;
467 size *= 2; 467 size *= 2;
468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 468 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
469 RADEON_GPU_PAGE_ALIGN(size), 469 RADEON_VM_PTB_ALIGN(size),
470 RADEON_VM_PTB_ALIGN_SIZE,
470 RADEON_GEM_DOMAIN_VRAM); 471 RADEON_GEM_DOMAIN_VRAM);
471 if (r) { 472 if (r) {
472 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 473 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -620,10 +621,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
620 } 621 }
621 622
622retry: 623retry:
623 pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); 624 pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev));
624 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 625 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
625 &vm->page_directory, pd_size, 626 &vm->page_directory, pd_size,
626 RADEON_GPU_PAGE_SIZE, false); 627 RADEON_VM_PTB_ALIGN_SIZE, false);
627 if (r == -ENOMEM) { 628 if (r == -ENOMEM) {
628 r = radeon_vm_evict(rdev, vm); 629 r = radeon_vm_evict(rdev, vm);
629 if (r) 630 if (r)
@@ -952,8 +953,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev,
952retry: 953retry:
953 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, 954 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
954 &vm->page_tables[pt_idx], 955 &vm->page_tables[pt_idx],
955 RADEON_VM_PTE_COUNT * 8, 956 RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8),
956 RADEON_GPU_PAGE_SIZE, false); 957 RADEON_VM_PTB_ALIGN_SIZE, false);
957 958
958 if (r == -ENOMEM) { 959 if (r == -ENOMEM) {
959 r = radeon_vm_evict(rdev, vm); 960 r = radeon_vm_evict(rdev, vm);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index bcdefd1dcd43..081886b0642d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -260,10 +260,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
260{ 260{
261 int r = 0; 261 int r = 0;
262 262
263 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
264 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
265 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
266
267 spin_lock_init(&rdev->irq.lock); 263 spin_lock_init(&rdev->irq.lock);
268 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 264 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
269 if (r) { 265 if (r) {
@@ -285,6 +281,11 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
285 rdev->irq.installed = false; 281 rdev->irq.installed = false;
286 return r; 282 return r;
287 } 283 }
284
285 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
286 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
287 INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
288
288 DRM_INFO("radeon: irq initialized.\n"); 289 DRM_INFO("radeon: irq initialized.\n");
289 return 0; 290 return 0;
290} 291}
@@ -304,8 +305,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
304 rdev->irq.installed = false; 305 rdev->irq.installed = false;
305 if (rdev->msi_enabled) 306 if (rdev->msi_enabled)
306 pci_disable_msi(rdev->pdev); 307 pci_disable_msi(rdev->pdev);
308 flush_work(&rdev->hotplug_work);
307 } 309 }
308 flush_work(&rdev->hotplug_work);
309} 310}
310 311
311/** 312/**
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0219d263e2df..2020bf4a3830 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -377,6 +377,7 @@ int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
377 domain = lobj->alt_domain; 377 domain = lobj->alt_domain;
378 goto retry; 378 goto retry;
379 } 379 }
380 ttm_eu_backoff_reservation(ticket, head);
380 return r; 381 return r;
381 } 382 }
382 } 383 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 91519a5622b4..49c82c480013 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -174,7 +174,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
174 174
175extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, 175extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
176 struct radeon_sa_manager *sa_manager, 176 struct radeon_sa_manager *sa_manager,
177 unsigned size, u32 domain); 177 unsigned size, u32 align, u32 domain);
178extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, 178extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
179 struct radeon_sa_manager *sa_manager); 179 struct radeon_sa_manager *sa_manager);
180extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, 180extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 5f1c51a776ed..fb5ea6208970 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
224 } 224 }
225 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, 225 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
226 RADEON_IB_POOL_SIZE*64*1024, 226 RADEON_IB_POOL_SIZE*64*1024,
227 RADEON_GPU_PAGE_SIZE,
227 RADEON_GEM_DOMAIN_GTT); 228 RADEON_GEM_DOMAIN_GTT);
228 if (r) { 229 if (r) {
229 return r; 230 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0abe5a9431bb..f0bac68254b7 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
49 49
50int radeon_sa_bo_manager_init(struct radeon_device *rdev, 50int radeon_sa_bo_manager_init(struct radeon_device *rdev,
51 struct radeon_sa_manager *sa_manager, 51 struct radeon_sa_manager *sa_manager,
52 unsigned size, u32 domain) 52 unsigned size, u32 align, u32 domain)
53{ 53{
54 int i, r; 54 int i, r;
55 55
@@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
57 sa_manager->bo = NULL; 57 sa_manager->bo = NULL;
58 sa_manager->size = size; 58 sa_manager->size = size;
59 sa_manager->domain = domain; 59 sa_manager->domain = domain;
60 sa_manager->align = align;
60 sa_manager->hole = &sa_manager->olist; 61 sa_manager->hole = &sa_manager->olist;
61 INIT_LIST_HEAD(&sa_manager->olist); 62 INIT_LIST_HEAD(&sa_manager->olist);
62 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 63 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
63 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
64 } 65 }
65 66
66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, 67 r = radeon_bo_create(rdev, size, align, true,
67 domain, NULL, &sa_manager->bo); 68 domain, NULL, &sa_manager->bo);
68 if (r) { 69 if (r) {
69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
@@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
317 unsigned tries[RADEON_NUM_RINGS]; 318 unsigned tries[RADEON_NUM_RINGS];
318 int i, r; 319 int i, r;
319 320
320 BUG_ON(align > RADEON_GPU_PAGE_SIZE); 321 BUG_ON(align > sa_manager->align);
321 BUG_ON(size > sa_manager->size); 322 BUG_ON(size > sa_manager->size);
322 323
323 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); 324 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 41efcec28cd8..414fd145d20e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -56,20 +56,13 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
56 56
57int radeon_uvd_init(struct radeon_device *rdev) 57int radeon_uvd_init(struct radeon_device *rdev)
58{ 58{
59 struct platform_device *pdev; 59 const struct firmware *fw;
60 unsigned long bo_size; 60 unsigned long bo_size;
61 const char *fw_name; 61 const char *fw_name;
62 int i, r; 62 int i, r;
63 63
64 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); 64 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
65 65
66 pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
67 r = IS_ERR(pdev);
68 if (r) {
69 dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
70 return -EINVAL;
71 }
72
73 switch (rdev->family) { 66 switch (rdev->family) {
74 case CHIP_RV710: 67 case CHIP_RV710:
75 case CHIP_RV730: 68 case CHIP_RV730:
@@ -112,17 +105,14 @@ int radeon_uvd_init(struct radeon_device *rdev)
112 return -EINVAL; 105 return -EINVAL;
113 } 106 }
114 107
115 r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev); 108 r = request_firmware(&fw, fw_name, rdev->dev);
116 if (r) { 109 if (r) {
117 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", 110 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
118 fw_name); 111 fw_name);
119 platform_device_unregister(pdev);
120 return r; 112 return r;
121 } 113 }
122 114
123 platform_device_unregister(pdev); 115 bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
124
125 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
126 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 116 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
127 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 117 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
128 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); 118 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@@ -131,16 +121,35 @@ int radeon_uvd_init(struct radeon_device *rdev)
131 return r; 121 return r;
132 } 122 }
133 123
134 r = radeon_uvd_resume(rdev); 124 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
135 if (r) 125 if (r) {
126 radeon_bo_unref(&rdev->uvd.vcpu_bo);
127 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
136 return r; 128 return r;
129 }
137 130
138 memset(rdev->uvd.cpu_addr, 0, bo_size); 131 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
139 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); 132 &rdev->uvd.gpu_addr);
133 if (r) {
134 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
135 radeon_bo_unref(&rdev->uvd.vcpu_bo);
136 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
137 return r;
138 }
140 139
141 r = radeon_uvd_suspend(rdev); 140 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
142 if (r) 141 if (r) {
142 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
143 return r; 143 return r;
144 }
145
146 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
147
148 rdev->uvd.fw_size = fw->size;
149 memset(rdev->uvd.cpu_addr, 0, bo_size);
150 memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
151
152 release_firmware(fw);
144 153
145 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 154 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
146 atomic_set(&rdev->uvd.handles[i], 0); 155 atomic_set(&rdev->uvd.handles[i], 0);
@@ -152,71 +161,47 @@ int radeon_uvd_init(struct radeon_device *rdev)
152 161
153void radeon_uvd_fini(struct radeon_device *rdev) 162void radeon_uvd_fini(struct radeon_device *rdev)
154{ 163{
155 radeon_uvd_suspend(rdev);
156 radeon_bo_unref(&rdev->uvd.vcpu_bo);
157}
158
159int radeon_uvd_suspend(struct radeon_device *rdev)
160{
161 int r; 164 int r;
162 165
163 if (rdev->uvd.vcpu_bo == NULL) 166 if (rdev->uvd.vcpu_bo == NULL)
164 return 0; 167 return;
165 168
166 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 169 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
167 if (!r) { 170 if (!r) {
168 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 171 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
169 radeon_bo_unpin(rdev->uvd.vcpu_bo); 172 radeon_bo_unpin(rdev->uvd.vcpu_bo);
170 rdev->uvd.cpu_addr = NULL;
171 if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
172 radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
173 }
174 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 173 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
175
176 if (rdev->uvd.cpu_addr) {
177 radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
178 } else {
179 rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
180 }
181 } 174 }
182 return r; 175
176 radeon_bo_unref(&rdev->uvd.vcpu_bo);
183} 177}
184 178
185int radeon_uvd_resume(struct radeon_device *rdev) 179int radeon_uvd_suspend(struct radeon_device *rdev)
186{ 180{
187 int r; 181 unsigned size;
188 182
189 if (rdev->uvd.vcpu_bo == NULL) 183 if (rdev->uvd.vcpu_bo == NULL)
190 return -EINVAL; 184 return 0;
191 185
192 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); 186 size = radeon_bo_size(rdev->uvd.vcpu_bo);
193 if (r) { 187 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
194 radeon_bo_unref(&rdev->uvd.vcpu_bo); 188 memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
195 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
196 return r;
197 }
198 189
199 /* Have been pin in cpu unmap unpin */ 190 return 0;
200 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 191}
201 radeon_bo_unpin(rdev->uvd.vcpu_bo);
202 192
203 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 193int radeon_uvd_resume(struct radeon_device *rdev)
204 &rdev->uvd.gpu_addr); 194{
205 if (r) { 195 if (rdev->uvd.vcpu_bo == NULL)
206 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 196 return -EINVAL;
207 radeon_bo_unref(&rdev->uvd.vcpu_bo);
208 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
209 return r;
210 }
211 197
212 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); 198 if (rdev->uvd.saved_bo != NULL) {
213 if (r) { 199 unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
214 dev_err(rdev->dev, "(%d) UVD map failed\n", r); 200 memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
215 return r; 201 kfree(rdev->uvd.saved_bo);
202 rdev->uvd.saved_bo = NULL;
216 } 203 }
217 204
218 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
219
220 return 0; 205 return 0;
221} 206}
222 207
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index bef832a62fee..d1a1ce73bd45 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -28,6 +28,7 @@
28#include "r600_dpm.h" 28#include "r600_dpm.h"
29#include "rs780_dpm.h" 29#include "rs780_dpm.h"
30#include "atom.h" 30#include "atom.h"
31#include <linux/seq_file.h>
31 32
32static struct igp_ps *rs780_get_ps(struct radeon_ps *rps) 33static struct igp_ps *rs780_get_ps(struct radeon_ps *rps)
33{ 34{
@@ -961,3 +962,27 @@ u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low)
961 962
962 return pi->bootup_uma_clk; 963 return pi->bootup_uma_clk;
963} 964}
965
966void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
967 struct seq_file *m)
968{
969 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
970 struct igp_ps *ps = rs780_get_ps(rps);
971 u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK;
972 u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
973 u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1;
974 u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 +
975 ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1;
976 u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) /
977 (post_div * ref_div);
978
979 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
980
981 /* guess based on the current sclk */
982 if (sclk < (ps->sclk_low + 500))
983 seq_printf(m, "power level 0 sclk: %u vddc_index: %d\n",
984 ps->sclk_low, ps->min_voltage);
985 else
986 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
987 ps->sclk_high, ps->max_voltage);
988}
diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h
index b1142ed1c628..cfbe9a43d97b 100644
--- a/drivers/gpu/drm/radeon/rs780d.h
+++ b/drivers/gpu/drm/radeon/rs780d.h
@@ -28,6 +28,7 @@
28# define SPLL_SLEEP (1 << 1) 28# define SPLL_SLEEP (1 << 1)
29# define SPLL_REF_DIV(x) ((x) << 2) 29# define SPLL_REF_DIV(x) ((x) << 2)
30# define SPLL_REF_DIV_MASK (7 << 2) 30# define SPLL_REF_DIV_MASK (7 << 2)
31# define SPLL_REF_DIV_SHIFT 2
31# define SPLL_FB_DIV(x) ((x) << 5) 32# define SPLL_FB_DIV(x) ((x) << 5)
32# define SPLL_FB_DIV_MASK (0xff << 2) 33# define SPLL_FB_DIV_MASK (0xff << 2)
33# define SPLL_FB_DIV_SHIFT 2 34# define SPLL_FB_DIV_SHIFT 2
@@ -36,8 +37,10 @@
36# define SPLL_PULSENUM_MASK (3 << 14) 37# define SPLL_PULSENUM_MASK (3 << 14)
37# define SPLL_SW_HILEN(x) ((x) << 16) 38# define SPLL_SW_HILEN(x) ((x) << 16)
38# define SPLL_SW_HILEN_MASK (0xf << 16) 39# define SPLL_SW_HILEN_MASK (0xf << 16)
40# define SPLL_SW_HILEN_SHIFT 16
39# define SPLL_SW_LOLEN(x) ((x) << 20) 41# define SPLL_SW_LOLEN(x) ((x) << 20)
40# define SPLL_SW_LOLEN_MASK (0xf << 20) 42# define SPLL_SW_LOLEN_MASK (0xf << 20)
43# define SPLL_SW_LOLEN_SHIFT 20
41# define SPLL_DIVEN (1 << 24) 44# define SPLL_DIVEN (1 << 24)
42# define SPLL_BYPASS_EN (1 << 25) 45# define SPLL_BYPASS_EN (1 << 25)
43# define SPLL_CHG_STATUS (1 << 29) 46# define SPLL_CHG_STATUS (1 << 29)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 8303de267ee5..65e33f387341 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1763,12 +1763,14 @@ void rv6xx_setup_asic(struct radeon_device *rdev)
1763{ 1763{
1764 r600_enable_acpi_pm(rdev); 1764 r600_enable_acpi_pm(rdev);
1765 1765
1766 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) 1766 if (radeon_aspm != 0) {
1767 rv6xx_enable_l0s(rdev); 1767 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
1768 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) 1768 rv6xx_enable_l0s(rdev);
1769 rv6xx_enable_l1(rdev); 1769 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
1770 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) 1770 rv6xx_enable_l1(rdev);
1771 rv6xx_enable_pll_sleep_in_l1(rdev); 1771 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
1772 rv6xx_enable_pll_sleep_in_l1(rdev);
1773 }
1772} 1774}
1773 1775
1774void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev) 1776void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4a62ad2e5399..30ea14e8854c 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev)
813 813
814 /* programm the VCPU memory controller bits 0-27 */ 814 /* programm the VCPU memory controller bits 0-27 */
815 addr = rdev->uvd.gpu_addr >> 3; 815 addr = rdev->uvd.gpu_addr >> 3;
816 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; 816 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
817 WREG32(UVD_VCPU_CACHE_OFFSET0, addr); 817 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
818 WREG32(UVD_VCPU_CACHE_SIZE0, size); 818 WREG32(UVD_VCPU_CACHE_SIZE0, size);
819 819
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index d914e04ea39a..2d347925f77d 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2099,12 +2099,14 @@ void rv770_dpm_setup_asic(struct radeon_device *rdev)
2099 2099
2100 rv770_enable_acpi_pm(rdev); 2100 rv770_enable_acpi_pm(rdev);
2101 2101
2102 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) 2102 if (radeon_aspm != 0) {
2103 rv770_enable_l0s(rdev); 2103 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
2104 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) 2104 rv770_enable_l0s(rdev);
2105 rv770_enable_l1(rdev); 2105 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
2106 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) 2106 rv770_enable_l1(rdev);
2107 rv770_enable_pll_sleep_in_l1(rdev); 2107 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
2108 rv770_enable_pll_sleep_in_l1(rdev);
2109 }
2108} 2110}
2109 2111
2110void rv770_dpm_display_configuration_changed(struct radeon_device *rdev) 2112void rv770_dpm_display_configuration_changed(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 234906709067..d325280e2f9f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -22,7 +22,6 @@
22 * Authors: Alex Deucher 22 * Authors: Alex Deucher
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <drm/drmP.h> 27#include <drm/drmP.h>
@@ -1541,7 +1540,6 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
1541 1540
1542static int si_init_microcode(struct radeon_device *rdev) 1541static int si_init_microcode(struct radeon_device *rdev)
1543{ 1542{
1544 struct platform_device *pdev;
1545 const char *chip_name; 1543 const char *chip_name;
1546 const char *rlc_chip_name; 1544 const char *rlc_chip_name;
1547 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; 1545 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
@@ -1551,13 +1549,6 @@ static int si_init_microcode(struct radeon_device *rdev)
1551 1549
1552 DRM_DEBUG("\n"); 1550 DRM_DEBUG("\n");
1553 1551
1554 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1555 err = IS_ERR(pdev);
1556 if (err) {
1557 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1558 return -EINVAL;
1559 }
1560
1561 switch (rdev->family) { 1552 switch (rdev->family) {
1562 case CHIP_TAHITI: 1553 case CHIP_TAHITI:
1563 chip_name = "TAHITI"; 1554 chip_name = "TAHITI";
@@ -1615,7 +1606,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1615 DRM_INFO("Loading %s Microcode\n", chip_name); 1606 DRM_INFO("Loading %s Microcode\n", chip_name);
1616 1607
1617 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1608 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1618 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 1609 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1619 if (err) 1610 if (err)
1620 goto out; 1611 goto out;
1621 if (rdev->pfp_fw->size != pfp_req_size) { 1612 if (rdev->pfp_fw->size != pfp_req_size) {
@@ -1627,7 +1618,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1627 } 1618 }
1628 1619
1629 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 1620 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1630 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 1621 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1631 if (err) 1622 if (err)
1632 goto out; 1623 goto out;
1633 if (rdev->me_fw->size != me_req_size) { 1624 if (rdev->me_fw->size != me_req_size) {
@@ -1638,7 +1629,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1638 } 1629 }
1639 1630
1640 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 1631 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1641 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); 1632 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1642 if (err) 1633 if (err)
1643 goto out; 1634 goto out;
1644 if (rdev->ce_fw->size != ce_req_size) { 1635 if (rdev->ce_fw->size != ce_req_size) {
@@ -1649,7 +1640,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1649 } 1640 }
1650 1641
1651 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 1642 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1652 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 1643 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1653 if (err) 1644 if (err)
1654 goto out; 1645 goto out;
1655 if (rdev->rlc_fw->size != rlc_req_size) { 1646 if (rdev->rlc_fw->size != rlc_req_size) {
@@ -1660,7 +1651,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1660 } 1651 }
1661 1652
1662 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1653 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1663 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 1654 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1664 if (err) 1655 if (err)
1665 goto out; 1656 goto out;
1666 if (rdev->mc_fw->size != mc_req_size) { 1657 if (rdev->mc_fw->size != mc_req_size) {
@@ -1671,7 +1662,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1671 } 1662 }
1672 1663
1673 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 1664 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1674 err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); 1665 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1675 if (err) 1666 if (err)
1676 goto out; 1667 goto out;
1677 if (rdev->smc_fw->size != smc_req_size) { 1668 if (rdev->smc_fw->size != smc_req_size) {
@@ -1682,8 +1673,6 @@ static int si_init_microcode(struct radeon_device *rdev)
1682 } 1673 }
1683 1674
1684out: 1675out:
1685 platform_device_unregister(pdev);
1686
1687 if (err) { 1676 if (err) {
1688 if (err != -EINVAL) 1677 if (err != -EINVAL)
1689 printk(KERN_ERR 1678 printk(KERN_ERR
@@ -4401,6 +4390,270 @@ void si_vm_fini(struct radeon_device *rdev)
4401} 4390}
4402 4391
4403/** 4392/**
4393 * si_vm_decode_fault - print human readable fault info
4394 *
4395 * @rdev: radeon_device pointer
4396 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4397 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4398 *
4399 * Print human readable fault information (SI).
4400 */
4401static void si_vm_decode_fault(struct radeon_device *rdev,
4402 u32 status, u32 addr)
4403{
4404 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4405 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4406 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4407 char *block;
4408
4409 if (rdev->family == CHIP_TAHITI) {
4410 switch (mc_id) {
4411 case 160:
4412 case 144:
4413 case 96:
4414 case 80:
4415 case 224:
4416 case 208:
4417 case 32:
4418 case 16:
4419 block = "CB";
4420 break;
4421 case 161:
4422 case 145:
4423 case 97:
4424 case 81:
4425 case 225:
4426 case 209:
4427 case 33:
4428 case 17:
4429 block = "CB_FMASK";
4430 break;
4431 case 162:
4432 case 146:
4433 case 98:
4434 case 82:
4435 case 226:
4436 case 210:
4437 case 34:
4438 case 18:
4439 block = "CB_CMASK";
4440 break;
4441 case 163:
4442 case 147:
4443 case 99:
4444 case 83:
4445 case 227:
4446 case 211:
4447 case 35:
4448 case 19:
4449 block = "CB_IMMED";
4450 break;
4451 case 164:
4452 case 148:
4453 case 100:
4454 case 84:
4455 case 228:
4456 case 212:
4457 case 36:
4458 case 20:
4459 block = "DB";
4460 break;
4461 case 165:
4462 case 149:
4463 case 101:
4464 case 85:
4465 case 229:
4466 case 213:
4467 case 37:
4468 case 21:
4469 block = "DB_HTILE";
4470 break;
4471 case 167:
4472 case 151:
4473 case 103:
4474 case 87:
4475 case 231:
4476 case 215:
4477 case 39:
4478 case 23:
4479 block = "DB_STEN";
4480 break;
4481 case 72:
4482 case 68:
4483 case 64:
4484 case 8:
4485 case 4:
4486 case 0:
4487 case 136:
4488 case 132:
4489 case 128:
4490 case 200:
4491 case 196:
4492 case 192:
4493 block = "TC";
4494 break;
4495 case 112:
4496 case 48:
4497 block = "CP";
4498 break;
4499 case 49:
4500 case 177:
4501 case 50:
4502 case 178:
4503 block = "SH";
4504 break;
4505 case 53:
4506 case 190:
4507 block = "VGT";
4508 break;
4509 case 117:
4510 block = "IH";
4511 break;
4512 case 51:
4513 case 115:
4514 block = "RLC";
4515 break;
4516 case 119:
4517 case 183:
4518 block = "DMA0";
4519 break;
4520 case 61:
4521 block = "DMA1";
4522 break;
4523 case 248:
4524 case 120:
4525 block = "HDP";
4526 break;
4527 default:
4528 block = "unknown";
4529 break;
4530 }
4531 } else {
4532 switch (mc_id) {
4533 case 32:
4534 case 16:
4535 case 96:
4536 case 80:
4537 case 160:
4538 case 144:
4539 case 224:
4540 case 208:
4541 block = "CB";
4542 break;
4543 case 33:
4544 case 17:
4545 case 97:
4546 case 81:
4547 case 161:
4548 case 145:
4549 case 225:
4550 case 209:
4551 block = "CB_FMASK";
4552 break;
4553 case 34:
4554 case 18:
4555 case 98:
4556 case 82:
4557 case 162:
4558 case 146:
4559 case 226:
4560 case 210:
4561 block = "CB_CMASK";
4562 break;
4563 case 35:
4564 case 19:
4565 case 99:
4566 case 83:
4567 case 163:
4568 case 147:
4569 case 227:
4570 case 211:
4571 block = "CB_IMMED";
4572 break;
4573 case 36:
4574 case 20:
4575 case 100:
4576 case 84:
4577 case 164:
4578 case 148:
4579 case 228:
4580 case 212:
4581 block = "DB";
4582 break;
4583 case 37:
4584 case 21:
4585 case 101:
4586 case 85:
4587 case 165:
4588 case 149:
4589 case 229:
4590 case 213:
4591 block = "DB_HTILE";
4592 break;
4593 case 39:
4594 case 23:
4595 case 103:
4596 case 87:
4597 case 167:
4598 case 151:
4599 case 231:
4600 case 215:
4601 block = "DB_STEN";
4602 break;
4603 case 72:
4604 case 68:
4605 case 8:
4606 case 4:
4607 case 136:
4608 case 132:
4609 case 200:
4610 case 196:
4611 block = "TC";
4612 break;
4613 case 112:
4614 case 48:
4615 block = "CP";
4616 break;
4617 case 49:
4618 case 177:
4619 case 50:
4620 case 178:
4621 block = "SH";
4622 break;
4623 case 53:
4624 block = "VGT";
4625 break;
4626 case 117:
4627 block = "IH";
4628 break;
4629 case 51:
4630 case 115:
4631 block = "RLC";
4632 break;
4633 case 119:
4634 case 183:
4635 block = "DMA0";
4636 break;
4637 case 61:
4638 block = "DMA1";
4639 break;
4640 case 248:
4641 case 120:
4642 block = "HDP";
4643 break;
4644 default:
4645 block = "unknown";
4646 break;
4647 }
4648 }
4649
4650 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
4651 protections, vmid, addr,
4652 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4653 block, mc_id);
4654}
4655
4656/**
4404 * si_vm_set_page - update the page tables using the CP 4657 * si_vm_set_page - update the page tables using the CP
4405 * 4658 *
4406 * @rdev: radeon_device pointer 4659 * @rdev: radeon_device pointer
@@ -5766,6 +6019,7 @@ int si_irq_process(struct radeon_device *rdev)
5766 u32 ring_index; 6019 u32 ring_index;
5767 bool queue_hotplug = false; 6020 bool queue_hotplug = false;
5768 bool queue_thermal = false; 6021 bool queue_thermal = false;
6022 u32 status, addr;
5769 6023
5770 if (!rdev->ih.enabled || rdev->shutdown) 6024 if (!rdev->ih.enabled || rdev->shutdown)
5771 return IRQ_NONE; 6025 return IRQ_NONE;
@@ -6001,11 +6255,14 @@ restart_ih:
6001 break; 6255 break;
6002 case 146: 6256 case 146:
6003 case 147: 6257 case 147:
6258 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6259 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
6004 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 6260 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6005 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 6261 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
6006 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 6262 addr);
6007 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 6263 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6008 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 6264 status);
6265 si_vm_decode_fault(rdev, status, addr);
6009 /* reset addr and status */ 6266 /* reset addr and status */
6010 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 6267 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6011 break; 6268 break;
@@ -6796,6 +7053,9 @@ static void si_program_aspm(struct radeon_device *rdev)
6796 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; 7053 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
6797 bool disable_clkreq = false; 7054 bool disable_clkreq = false;
6798 7055
7056 if (radeon_aspm == 0)
7057 return;
7058
6799 if (!(rdev->flags & RADEON_IS_PCIE)) 7059 if (!(rdev->flags & RADEON_IS_PCIE))
6800 return; 7060 return;
6801 7061
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 12a20eb77d0c..2c8da27a929f 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -367,6 +367,20 @@
367 367
368#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC 368#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
369#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC 369#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
370#define PROTECTIONS_MASK (0xf << 0)
371#define PROTECTIONS_SHIFT 0
372 /* bit 0: range
373 * bit 1: pde0
374 * bit 2: valid
375 * bit 3: read
376 * bit 4: write
377 */
378#define MEMORY_CLIENT_ID_MASK (0xff << 12)
379#define MEMORY_CLIENT_ID_SHIFT 12
380#define MEMORY_CLIENT_RW_MASK (1 << 24)
381#define MEMORY_CLIENT_RW_SHIFT 24
382#define FAULT_VMID_MASK (0xf << 25)
383#define FAULT_VMID_SHIFT 25
370 384
371#define VM_INVALIDATE_REQUEST 0x1478 385#define VM_INVALIDATE_REQUEST 0x1478
372#define VM_INVALIDATE_RESPONSE 0x147c 386#define VM_INVALIDATE_RESPONSE 0x147c
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 11b6b9924f1b..c0a850319908 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1732,7 +1732,13 @@ int sumo_dpm_init(struct radeon_device *rdev)
1732 pi->enable_sclk_ds = true; 1732 pi->enable_sclk_ds = true;
1733 pi->enable_dynamic_m3_arbiter = false; 1733 pi->enable_dynamic_m3_arbiter = false;
1734 pi->enable_dynamic_patch_ps = true; 1734 pi->enable_dynamic_patch_ps = true;
1735 pi->enable_gfx_power_gating = true; 1735 /* Some PALM chips don't seem to properly ungate gfx when UVD is in use;
1736 * for now just disable gfx PG.
1737 */
1738 if (rdev->family == CHIP_PALM)
1739 pi->enable_gfx_power_gating = false;
1740 else
1741 pi->enable_gfx_power_gating = true;
1736 pi->enable_gfx_clock_gating = true; 1742 pi->enable_gfx_clock_gating = true;
1737 pi->enable_mg_clock_gating = true; 1743 pi->enable_mg_clock_gating = true;
1738 pi->enable_auto_thermal_throttling = true; 1744 pi->enable_auto_thermal_throttling = true;
@@ -1845,6 +1851,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
1845 return 0; 1851 return 0;
1846 1852
1847 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1853 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1854 if (pi->enable_boost)
1855 sumo_enable_boost(rdev, rps, false);
1848 sumo_power_level_enable(rdev, ps->num_levels - 1, true); 1856 sumo_power_level_enable(rdev, ps->num_levels - 1, true);
1849 sumo_set_forced_level(rdev, ps->num_levels - 1); 1857 sumo_set_forced_level(rdev, ps->num_levels - 1);
1850 sumo_set_forced_mode_enabled(rdev); 1858 sumo_set_forced_mode_enabled(rdev);
@@ -1855,6 +1863,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
1855 sumo_set_forced_mode_enabled(rdev); 1863 sumo_set_forced_mode_enabled(rdev);
1856 sumo_set_forced_mode(rdev, false); 1864 sumo_set_forced_mode(rdev, false);
1857 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1865 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1866 if (pi->enable_boost)
1867 sumo_enable_boost(rdev, rps, false);
1858 sumo_power_level_enable(rdev, 0, true); 1868 sumo_power_level_enable(rdev, 0, true);
1859 sumo_set_forced_level(rdev, 0); 1869 sumo_set_forced_level(rdev, 0);
1860 sumo_set_forced_mode_enabled(rdev); 1870 sumo_set_forced_mode_enabled(rdev);
@@ -1868,6 +1878,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
1868 for (i = 0; i < ps->num_levels; i++) { 1878 for (i = 0; i < ps->num_levels; i++) {
1869 sumo_power_level_enable(rdev, i, true); 1879 sumo_power_level_enable(rdev, i, true);
1870 } 1880 }
1881 if (pi->enable_boost)
1882 sumo_enable_boost(rdev, rps, true);
1871 } 1883 }
1872 1884
1873 rdev->pm.dpm.forced_level = level; 1885 rdev->pm.dpm.forced_level = level;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index ff82877de876..dc0fe09b2ba1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -249,8 +249,13 @@ static struct drm_driver rcar_du_driver = {
249 .gem_vm_ops = &drm_gem_cma_vm_ops, 249 .gem_vm_ops = &drm_gem_cma_vm_ops,
250 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 250 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
251 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 251 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
252 .gem_prime_import = drm_gem_cma_dmabuf_import, 252 .gem_prime_import = drm_gem_prime_import,
253 .gem_prime_export = drm_gem_cma_dmabuf_export, 253 .gem_prime_export = drm_gem_prime_export,
254 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
255 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
256 .gem_prime_vmap = drm_gem_cma_prime_vmap,
257 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
258 .gem_prime_mmap = drm_gem_cma_prime_mmap,
254 .dumb_create = rcar_du_dumb_create, 259 .dumb_create = rcar_du_dumb_create,
255 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 260 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
256 .dumb_destroy = drm_gem_cma_dumb_destroy, 261 .dumb_destroy = drm_gem_cma_dumb_destroy,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index edc10181f551..5f83f9a3ef59 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -276,8 +276,13 @@ static struct drm_driver shmob_drm_driver = {
276 .gem_vm_ops = &drm_gem_cma_vm_ops, 276 .gem_vm_ops = &drm_gem_cma_vm_ops,
277 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 277 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
278 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 278 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
279 .gem_prime_import = drm_gem_cma_dmabuf_import, 279 .gem_prime_import = drm_gem_prime_import,
280 .gem_prime_export = drm_gem_cma_dmabuf_export, 280 .gem_prime_export = drm_gem_prime_export,
281 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
282 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
283 .gem_prime_vmap = drm_gem_cma_prime_vmap,
284 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
285 .gem_prime_mmap = drm_gem_cma_prime_mmap,
281 .dumb_create = drm_gem_cma_dumb_create, 286 .dumb_create = drm_gem_cma_dumb_create,
282 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 287 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
283 .dumb_destroy = drm_gem_cma_dumb_destroy, 288 .dumb_destroy = drm_gem_cma_dumb_destroy,
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index b963ea12d175..7aec6f39fdd5 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -1891,7 +1891,7 @@ static int uvesafb_setup(char *options)
1891 } 1891 }
1892 } 1892 }
1893 1893
1894 if (mtrr != 3 && mtrr != 1) 1894 if (mtrr != 3 && mtrr != 0)
1895 pr_warn("uvesafb: mtrr should be set to 0 or 3; %d is unsupported", mtrr); 1895 pr_warn("uvesafb: mtrr should be set to 0 or 3; %d is unsupported", mtrr);
1896 1896
1897 return 0; 1897 return 0;