aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-02-11 18:16:23 -0500
committerDave Airlie <airlied@redhat.com>2015-02-11 18:16:23 -0500
commit64aa7e342a0a05cc19e278bc0822e2e667ebb645 (patch)
tree82b8e948b559c70018927c1b8da6b9a54761ced3 /drivers/gpu/drm
parent85840c76d8ad18d978da44e8d2f27bb35b7159af (diff)
parent46efa4abe5712276494adbce102f46e3214632fd (diff)
Merge tag 'drm-intel-next-fixes-2015-02-11' of git://anongit.freedesktop.org/drm-intel into drm-next
Here's a batch of i915 fixes for drm-next, with more cc: stable material than fixes specific to drm-next. * tag 'drm-intel-next-fixes-2015-02-11' of git://anongit.freedesktop.org/drm-intel: drm/i915: Clamp efficient frequency to valid range drm/i915: Really ignore long HPD pulses on eDP drm/i915: Correct the base value while updating LP_OUTPUT_HOLD in MIPI_PORT_CTRL drm/i915: Insert a command barrier on BLT/BSD cache flushes drm/i915: Drop vblank wait from intel_dp_link_down drm/i915: Squelch overzealous uncore reset WARN_ON drm/i915: Take runtime pm reference on hangcheck_info drm/i915: Correct the IOSF Dev_FN field for IOSF transfers drm/i915: Prevent use-after-free in invalidate_range_start callback
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c20
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c19
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c20
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c26
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c9
9 files changed, 94 insertions, 55 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 211d4949a675..96e811fe24ca 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1223,8 +1223,11 @@ out:
1223static int i915_hangcheck_info(struct seq_file *m, void *unused) 1223static int i915_hangcheck_info(struct seq_file *m, void *unused)
1224{ 1224{
1225 struct drm_info_node *node = m->private; 1225 struct drm_info_node *node = m->private;
1226 struct drm_i915_private *dev_priv = to_i915(node->minor->dev); 1226 struct drm_device *dev = node->minor->dev;
1227 struct drm_i915_private *dev_priv = dev->dev_private;
1227 struct intel_engine_cs *ring; 1228 struct intel_engine_cs *ring;
1229 u64 acthd[I915_NUM_RINGS];
1230 u32 seqno[I915_NUM_RINGS];
1228 int i; 1231 int i;
1229 1232
1230 if (!i915.enable_hangcheck) { 1233 if (!i915.enable_hangcheck) {
@@ -1232,6 +1235,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1232 return 0; 1235 return 0;
1233 } 1236 }
1234 1237
1238 intel_runtime_pm_get(dev_priv);
1239
1240 for_each_ring(ring, dev_priv, i) {
1241 seqno[i] = ring->get_seqno(ring, false);
1242 acthd[i] = intel_ring_get_active_head(ring);
1243 }
1244
1245 intel_runtime_pm_put(dev_priv);
1246
1235 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { 1247 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1236 seq_printf(m, "Hangcheck active, fires in %dms\n", 1248 seq_printf(m, "Hangcheck active, fires in %dms\n",
1237 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1249 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
@@ -1242,14 +1254,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1242 for_each_ring(ring, dev_priv, i) { 1254 for_each_ring(ring, dev_priv, i) {
1243 seq_printf(m, "%s:\n", ring->name); 1255 seq_printf(m, "%s:\n", ring->name);
1244 seq_printf(m, "\tseqno = %x [current %x]\n", 1256 seq_printf(m, "\tseqno = %x [current %x]\n",
1245 ring->hangcheck.seqno, ring->get_seqno(ring, false)); 1257 ring->hangcheck.seqno, seqno[i]);
1246 seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1247 seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1248 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1258 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1249 (long long)ring->hangcheck.acthd, 1259 (long long)ring->hangcheck.acthd,
1250 (long long)intel_ring_get_active_head(ring)); 1260 (long long)acthd[i]);
1251 seq_printf(m, "\tmax ACTHD = 0x%08llx\n", 1261 seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
1252 (long long)ring->hangcheck.max_acthd); 1262 (long long)ring->hangcheck.max_acthd);
1263 seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1264 seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1253 } 1265 }
1254 1266
1255 return 0; 1267 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d182058383a9..1719078c763a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -113,7 +113,10 @@ restart:
113 continue; 113 continue;
114 114
115 obj = mo->obj; 115 obj = mo->obj;
116 drm_gem_object_reference(&obj->base); 116
117 if (!kref_get_unless_zero(&obj->base.refcount))
118 continue;
119
117 spin_unlock(&mn->lock); 120 spin_unlock(&mn->lock);
118 121
119 cancel_userptr(obj); 122 cancel_userptr(obj);
@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
149 it = interval_tree_iter_first(&mn->objects, start, end); 152 it = interval_tree_iter_first(&mn->objects, start, end);
150 if (it != NULL) { 153 if (it != NULL) {
151 obj = container_of(it, struct i915_mmu_object, it)->obj; 154 obj = container_of(it, struct i915_mmu_object, it)->obj;
152 drm_gem_object_reference(&obj->base); 155
156 /* The mmu_object is released late when destroying the
157 * GEM object so it is entirely possible to gain a
158 * reference on an object in the process of being freed
159 * since our serialisation is via the spinlock and not
160 * the struct_mutex - and consequently use it after it
161 * is freed and then double free it.
162 */
163 if (!kref_get_unless_zero(&obj->base.refcount)) {
164 spin_unlock(&mn->lock);
165 serial = 0;
166 continue;
167 }
168
153 serial = mn->serial; 169 serial = mn->serial;
154 } 170 }
155 spin_unlock(&mn->lock); 171 spin_unlock(&mn->lock);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index eea9e366a109..a74aaf9242b9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3521,8 +3521,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3521 enum port port = intel_dig_port->port; 3521 enum port port = intel_dig_port->port;
3522 struct drm_device *dev = intel_dig_port->base.base.dev; 3522 struct drm_device *dev = intel_dig_port->base.base.dev;
3523 struct drm_i915_private *dev_priv = dev->dev_private; 3523 struct drm_i915_private *dev_priv = dev->dev_private;
3524 struct intel_crtc *intel_crtc =
3525 to_intel_crtc(intel_dig_port->base.base.crtc);
3526 uint32_t DP = intel_dp->DP; 3524 uint32_t DP = intel_dp->DP;
3527 3525
3528 if (WARN_ON(HAS_DDI(dev))) 3526 if (WARN_ON(HAS_DDI(dev)))
@@ -3547,8 +3545,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3547 3545
3548 if (HAS_PCH_IBX(dev) && 3546 if (HAS_PCH_IBX(dev) &&
3549 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 3547 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3550 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
3551
3552 /* Hardware workaround: leaving our transcoder select 3548 /* Hardware workaround: leaving our transcoder select
3553 * set to transcoder B while it's off will prevent the 3549 * set to transcoder B while it's off will prevent the
3554 * corresponding HDMI output on transcoder A. 3550 * corresponding HDMI output on transcoder A.
@@ -3559,18 +3555,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3559 */ 3555 */
3560 DP &= ~DP_PIPEB_SELECT; 3556 DP &= ~DP_PIPEB_SELECT;
3561 I915_WRITE(intel_dp->output_reg, DP); 3557 I915_WRITE(intel_dp->output_reg, DP);
3562 3558 POSTING_READ(intel_dp->output_reg);
3563 /* Changes to enable or select take place the vblank
3564 * after being written.
3565 */
3566 if (WARN_ON(crtc == NULL)) {
3567 /* We should never try to disable a port without a crtc
3568 * attached. For paranoia keep the code around for a
3569 * bit. */
3570 POSTING_READ(intel_dp->output_reg);
3571 msleep(50);
3572 } else
3573 intel_wait_for_vblank(dev, intel_crtc->pipe);
3574 } 3559 }
3575 3560
3576 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 3561 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -4446,7 +4431,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4446 */ 4431 */
4447 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", 4432 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4448 port_name(intel_dig_port->port)); 4433 port_name(intel_dig_port->port));
4449 return false; 4434 return IRQ_HANDLED;
4450 } 4435 }
4451 4436
4452 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", 4437 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index ef3df5e3d819..10ab68457ca8 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -360,12 +360,11 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
360 I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER); 360 I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
361 usleep_range(2500, 3000); 361 usleep_range(2500, 3000);
362 362
363 val = I915_READ(MIPI_PORT_CTRL(port));
364
365 /* Enable MIPI PHY transparent latch 363 /* Enable MIPI PHY transparent latch
366 * Common bit for both MIPI Port A & MIPI Port C 364 * Common bit for both MIPI Port A & MIPI Port C
367 * No similar bit in MIPI Port C reg 365 * No similar bit in MIPI Port C reg
368 */ 366 */
367 val = I915_READ(MIPI_PORT_CTRL(PORT_A));
369 I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD); 368 I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
370 usleep_range(1000, 1500); 369 usleep_range(1000, 1500);
371 370
@@ -543,10 +542,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
543 == 0x00000), 30)) 542 == 0x00000), 30))
544 DRM_ERROR("DSI LP not going Low\n"); 543 DRM_ERROR("DSI LP not going Low\n");
545 544
546 val = I915_READ(MIPI_PORT_CTRL(port));
547 /* Disable MIPI PHY transparent latch 545 /* Disable MIPI PHY transparent latch
548 * Common bit for both MIPI Port A & MIPI Port C 546 * Common bit for both MIPI Port A & MIPI Port C
549 */ 547 */
548 val = I915_READ(MIPI_PORT_CTRL(PORT_A));
550 I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD); 549 I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
551 usleep_range(1000, 1500); 550 usleep_range(1000, 1500);
552 551
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index a94346fee160..0f358c5999ec 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1211,15 +1211,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
1211 1211
1212 cmd = MI_FLUSH_DW + 1; 1212 cmd = MI_FLUSH_DW + 1;
1213 1213
1214 if (ring == &dev_priv->ring[VCS]) { 1214 /* We always require a command barrier so that subsequent
1215 if (invalidate_domains & I915_GEM_GPU_DOMAINS) 1215 * commands, such as breadcrumb interrupts, are strictly ordered
1216 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 1216 * wrt the contents of the write cache being flushed to memory
1217 MI_FLUSH_DW_STORE_INDEX | 1217 * (and thus being coherent from the CPU).
1218 MI_FLUSH_DW_OP_STOREDW; 1218 */
1219 } else { 1219 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1220 if (invalidate_domains & I915_GEM_DOMAIN_RENDER) 1220
1221 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 1221 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1222 MI_FLUSH_DW_OP_STOREDW; 1222 cmd |= MI_INVALIDATE_TLB;
1223 if (ring == &dev_priv->ring[VCS])
1224 cmd |= MI_INVALIDATE_BSD;
1223 } 1225 }
1224 1226
1225 intel_logical_ring_emit(ringbuf, cmd); 1227 intel_logical_ring_emit(ringbuf, cmd);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6ece663f3394..24d77ddcc5f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4005,7 +4005,10 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4005 &ddcc_status); 4005 &ddcc_status);
4006 if (0 == ret) 4006 if (0 == ret)
4007 dev_priv->rps.efficient_freq = 4007 dev_priv->rps.efficient_freq =
4008 (ddcc_status >> 8) & 0xff; 4008 clamp_t(u8,
4009 ((ddcc_status >> 8) & 0xff),
4010 dev_priv->rps.min_freq,
4011 dev_priv->rps.max_freq);
4009 } 4012 }
4010 4013
4011 /* Preserve min/max settings in case of re-init */ 4014 /* Preserve min/max settings in case of re-init */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0bd3976d88e1..e5b3c6dbd467 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2240,6 +2240,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
2240 cmd = MI_FLUSH_DW; 2240 cmd = MI_FLUSH_DW;
2241 if (INTEL_INFO(ring->dev)->gen >= 8) 2241 if (INTEL_INFO(ring->dev)->gen >= 8)
2242 cmd += 1; 2242 cmd += 1;
2243
2244 /* We always require a command barrier so that subsequent
2245 * commands, such as breadcrumb interrupts, are strictly ordered
2246 * wrt the contents of the write cache being flushed to memory
2247 * (and thus being coherent from the CPU).
2248 */
2249 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2250
2243 /* 2251 /*
2244 * Bspec vol 1c.5 - video engine command streamer: 2252 * Bspec vol 1c.5 - video engine command streamer:
2245 * "If ENABLED, all TLBs will be invalidated once the flush 2253 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -2247,8 +2255,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
2247 * Post-Sync Operation field is a value of 1h or 3h." 2255 * Post-Sync Operation field is a value of 1h or 3h."
2248 */ 2256 */
2249 if (invalidate & I915_GEM_GPU_DOMAINS) 2257 if (invalidate & I915_GEM_GPU_DOMAINS)
2250 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 2258 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2251 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2259
2252 intel_ring_emit(ring, cmd); 2260 intel_ring_emit(ring, cmd);
2253 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2261 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2254 if (INTEL_INFO(ring->dev)->gen >= 8) { 2262 if (INTEL_INFO(ring->dev)->gen >= 8) {
@@ -2344,6 +2352,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2344 cmd = MI_FLUSH_DW; 2352 cmd = MI_FLUSH_DW;
2345 if (INTEL_INFO(ring->dev)->gen >= 8) 2353 if (INTEL_INFO(ring->dev)->gen >= 8)
2346 cmd += 1; 2354 cmd += 1;
2355
2356 /* We always require a command barrier so that subsequent
2357 * commands, such as breadcrumb interrupts, are strictly ordered
2358 * wrt the contents of the write cache being flushed to memory
2359 * (and thus being coherent from the CPU).
2360 */
2361 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2362
2347 /* 2363 /*
2348 * Bspec vol 1c.3 - blitter engine command streamer: 2364 * Bspec vol 1c.3 - blitter engine command streamer:
2349 * "If ENABLED, all TLBs will be invalidated once the flush 2365 * "If ENABLED, all TLBs will be invalidated once the flush
@@ -2351,8 +2367,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2351 * Post-Sync Operation field is a value of 1h or 3h." 2367 * Post-Sync Operation field is a value of 1h or 3h."
2352 */ 2368 */
2353 if (invalidate & I915_GEM_DOMAIN_RENDER) 2369 if (invalidate & I915_GEM_DOMAIN_RENDER)
2354 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 2370 cmd |= MI_INVALIDATE_TLB;
2355 MI_FLUSH_DW_OP_STOREDW;
2356 intel_ring_emit(ring, cmd); 2371 intel_ring_emit(ring, cmd);
2357 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2372 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2358 if (INTEL_INFO(ring->dev)->gen >= 8) { 2373 if (INTEL_INFO(ring->dev)->gen >= 8) {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 3c42eeffa3cb..693ce8281970 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
82 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 82 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
83 83
84 mutex_lock(&dev_priv->dpio_lock); 84 mutex_lock(&dev_priv->dpio_lock);
85 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, 85 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
86 SB_CRRDDA_NP, addr, &val); 86 SB_CRRDDA_NP, addr, &val);
87 mutex_unlock(&dev_priv->dpio_lock); 87 mutex_unlock(&dev_priv->dpio_lock);
88 88
@@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
94 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 94 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
95 95
96 mutex_lock(&dev_priv->dpio_lock); 96 mutex_lock(&dev_priv->dpio_lock);
97 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, 97 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
98 SB_CRWRDA_NP, addr, &val); 98 SB_CRWRDA_NP, addr, &val);
99 mutex_unlock(&dev_priv->dpio_lock); 99 mutex_unlock(&dev_priv->dpio_lock);
100} 100}
@@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
103{ 103{
104 u32 val = 0; 104 u32 val = 0;
105 105
106 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, 106 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
107 SB_CRRDDA_NP, reg, &val); 107 SB_CRRDDA_NP, reg, &val);
108 108
109 return val; 109 return val;
@@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
111 111
112void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 112void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
113{ 113{
114 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, 114 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
115 SB_CRWRDA_NP, reg, &val); 115 SB_CRWRDA_NP, reg, &val);
116} 116}
117 117
@@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
122 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 122 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
123 123
124 mutex_lock(&dev_priv->dpio_lock); 124 mutex_lock(&dev_priv->dpio_lock);
125 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC, 125 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
126 SB_CRRDDA_NP, addr, &val); 126 SB_CRRDDA_NP, addr, &val);
127 mutex_unlock(&dev_priv->dpio_lock); 127 mutex_unlock(&dev_priv->dpio_lock);
128 128
@@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
132u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg) 132u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
133{ 133{
134 u32 val = 0; 134 u32 val = 0;
135 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, 135 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
136 SB_CRRDDA_NP, reg, &val); 136 SB_CRRDDA_NP, reg, &val);
137 return val; 137 return val;
138} 138}
139 139
140void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 140void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
141{ 141{
142 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, 142 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
143 SB_CRWRDA_NP, reg, &val); 143 SB_CRWRDA_NP, reg, &val);
144} 144}
145 145
146u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg) 146u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
147{ 147{
148 u32 val = 0; 148 u32 val = 0;
149 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, 149 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
150 SB_CRRDDA_NP, reg, &val); 150 SB_CRRDDA_NP, reg, &val);
151 return val; 151 return val;
152} 152}
153 153
154void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 154void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
155{ 155{
156 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, 156 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
157 SB_CRWRDA_NP, reg, &val); 157 SB_CRWRDA_NP, reg, &val);
158} 158}
159 159
160u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg) 160u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
161{ 161{
162 u32 val = 0; 162 u32 val = 0;
163 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, 163 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
164 SB_CRRDDA_NP, reg, &val); 164 SB_CRRDDA_NP, reg, &val);
165 return val; 165 return val;
166} 166}
167 167
168void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 168void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
169{ 169{
170 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, 170 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
171 SB_CRWRDA_NP, reg, &val); 171 SB_CRWRDA_NP, reg, &val);
172} 172}
173 173
174u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) 174u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
175{ 175{
176 u32 val = 0; 176 u32 val = 0;
177 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, 177 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
178 SB_CRRDDA_NP, reg, &val); 178 SB_CRRDDA_NP, reg, &val);
179 return val; 179 return val;
180} 180}
181 181
182void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) 182void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
183{ 183{
184 vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, 184 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
185 SB_CRWRDA_NP, reg, &val); 185 SB_CRWRDA_NP, reg, &val);
186} 186}
187 187
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 76b60a3538b2..c47a3baa53d5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -166,7 +166,8 @@ fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_do
166 struct intel_uncore_forcewake_domain *d; 166 struct intel_uncore_forcewake_domain *d;
167 enum forcewake_domain_id id; 167 enum forcewake_domain_id id;
168 168
169 WARN_ON(dev_priv->uncore.fw_domains == 0); 169 if (dev_priv->uncore.fw_domains == 0)
170 return;
170 171
171 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) 172 for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
172 fw_domain_reset(d); 173 fw_domain_reset(d);
@@ -997,6 +998,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
997{ 998{
998 struct drm_i915_private *dev_priv = dev->dev_private; 999 struct drm_i915_private *dev_priv = dev->dev_private;
999 1000
1001 if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1002 return;
1003
1000 if (IS_GEN9(dev)) { 1004 if (IS_GEN9(dev)) {
1001 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1005 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1002 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1006 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1069,6 +1073,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1069 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1073 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1070 FORCEWAKE, FORCEWAKE_ACK); 1074 FORCEWAKE, FORCEWAKE_ACK);
1071 } 1075 }
1076
1077 /* All future platforms are expected to require complex power gating */
1078 WARN_ON(dev_priv->uncore.fw_domains == 0);
1072} 1079}
1073 1080
1074void intel_uncore_init(struct drm_device *dev) 1081void intel_uncore_init(struct drm_device *dev)