aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-07-19 15:36:52 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-25 09:21:50 -0400
commit907b28c56ea40629aa6595ddfa414ec2fc7da41c (patch)
treeb426955ac1e889b5672296399712ff7b1d70662f /drivers/gpu/drm/i915/intel_pm.c
parentcb54b53adae70701bdd77d848cea4b9b39b61cf9 (diff)
drm/i915: Colocate all GT access routines in the same file
Currently, the register access code is split between i915_drv.c and intel_pm.c. It only bares a superficial resemblance to the reset of the powermanagement code, so move it all into its own file. This is to ease further patches to enforce serialised register access. v2: Scan for random abuse of I915_WRITE_NOTRACE v3: Take the opportunity to rename the GT functions as uncore. Uncore is the term used by the hardware design (and bspec) for all functions outside of the GPU (and CPU) cores in what is also known as the System Agent. v4: Rebase onto SNB rc6 fixes Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Wrestle patch into applying and inline intel_uncore_early_sanitize (plus move the old comment to the new function). Also keep the _santize postfix for intel_uncore_sanitize.] [danvet: Squash in fixup spotted by Chris on irc: We need to call intel_pm_init before intel_uncore_sanitize since the later will call cancel_work on the delayed rps setup work the former initializes.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c258
1 files changed, 8 insertions, 250 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 74d6c4d78360..0a5ba92a4b12 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,8 +32,6 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <drm/i915_powerwell.h> 33#include <drm/i915_powerwell.h>
34 34
35#define FORCEWAKE_ACK_TIMEOUT_MS 2
36
37/* FBC, or Frame Buffer Compression, is a technique employed to compress the 35/* FBC, or Frame Buffer Compression, is a technique employed to compress the
38 * framebuffer contents in-memory, aiming at reducing the required bandwidth 36 * framebuffer contents in-memory, aiming at reducing the required bandwidth
39 * during in-memory transfers and, therefore, reduce the power packet. 37 * during in-memory transfers and, therefore, reduce the power packet.
@@ -5289,254 +5287,6 @@ void intel_init_pm(struct drm_device *dev)
5289 } 5287 }
5290} 5288}
5291 5289
5292static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
5293{
5294 u32 gt_thread_status_mask;
5295
5296 if (IS_HASWELL(dev_priv->dev))
5297 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
5298 else
5299 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
5300
5301 /* w/a for a sporadic read returning 0 by waiting for the GT
5302 * thread to wake up.
5303 */
5304 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
5305 DRM_ERROR("GT thread status wait timed out\n");
5306}
5307
5308static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
5309{
5310 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5311 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5312}
5313
5314static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5315{
5316 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
5317 FORCEWAKE_ACK_TIMEOUT_MS))
5318 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5319
5320 I915_WRITE_NOTRACE(FORCEWAKE, 1);
5321 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5322
5323 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
5324 FORCEWAKE_ACK_TIMEOUT_MS))
5325 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5326
5327 /* WaRsForcewakeWaitTC0:snb */
5328 __gen6_gt_wait_for_thread_c0(dev_priv);
5329}
5330
5331static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
5332{
5333 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
5334 /* something from same cacheline, but !FORCEWAKE_MT */
5335 POSTING_READ(ECOBUS);
5336}
5337
5338static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
5339{
5340 u32 forcewake_ack;
5341
5342 if (IS_HASWELL(dev_priv->dev))
5343 forcewake_ack = FORCEWAKE_ACK_HSW;
5344 else
5345 forcewake_ack = FORCEWAKE_MT_ACK;
5346
5347 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
5348 FORCEWAKE_ACK_TIMEOUT_MS))
5349 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5350
5351 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5352 /* something from same cacheline, but !FORCEWAKE_MT */
5353 POSTING_READ(ECOBUS);
5354
5355 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
5356 FORCEWAKE_ACK_TIMEOUT_MS))
5357 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5358
5359 /* WaRsForcewakeWaitTC0:ivb,hsw */
5360 __gen6_gt_wait_for_thread_c0(dev_priv);
5361}
5362
5363/*
5364 * Generally this is called implicitly by the register read function. However,
5365 * if some sequence requires the GT to not power down then this function should
5366 * be called at the beginning of the sequence followed by a call to
5367 * gen6_gt_force_wake_put() at the end of the sequence.
5368 */
5369void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5370{
5371 unsigned long irqflags;
5372
5373 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5374 if (dev_priv->forcewake_count++ == 0)
5375 dev_priv->gt.force_wake_get(dev_priv);
5376 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5377}
5378
5379void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5380{
5381 u32 gtfifodbg;
5382 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
5383 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
5384 "MMIO read or write has been dropped %x\n", gtfifodbg))
5385 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
5386}
5387
5388static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5389{
5390 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5391 /* something from same cacheline, but !FORCEWAKE */
5392 POSTING_READ(ECOBUS);
5393 gen6_gt_check_fifodbg(dev_priv);
5394}
5395
5396static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
5397{
5398 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5399 /* something from same cacheline, but !FORCEWAKE_MT */
5400 POSTING_READ(ECOBUS);
5401 gen6_gt_check_fifodbg(dev_priv);
5402}
5403
5404/*
5405 * see gen6_gt_force_wake_get()
5406 */
5407void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5408{
5409 unsigned long irqflags;
5410
5411 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5412 if (--dev_priv->forcewake_count == 0)
5413 dev_priv->gt.force_wake_put(dev_priv);
5414 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5415}
5416
5417int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5418{
5419 int ret = 0;
5420
5421 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
5422 int loop = 500;
5423 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5424 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
5425 udelay(10);
5426 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5427 }
5428 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
5429 ++ret;
5430 dev_priv->gt_fifo_count = fifo;
5431 }
5432 dev_priv->gt_fifo_count--;
5433
5434 return ret;
5435}
5436
5437static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
5438{
5439 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
5440 /* something from same cacheline, but !FORCEWAKE_VLV */
5441 POSTING_READ(FORCEWAKE_ACK_VLV);
5442}
5443
5444static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
5445{
5446 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
5447 FORCEWAKE_ACK_TIMEOUT_MS))
5448 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5449
5450 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5451 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5452 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5453
5454 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
5455 FORCEWAKE_ACK_TIMEOUT_MS))
5456 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
5457
5458 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
5459 FORCEWAKE_KERNEL),
5460 FORCEWAKE_ACK_TIMEOUT_MS))
5461 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
5462
5463 /* WaRsForcewakeWaitTC0:vlv */
5464 __gen6_gt_wait_for_thread_c0(dev_priv);
5465}
5466
5467static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
5468{
5469 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5470 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5471 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5472 /* The below doubles as a POSTING_READ */
5473 gen6_gt_check_fifodbg(dev_priv);
5474}
5475
5476void intel_gt_sanitize(struct drm_device *dev)
5477{
5478 struct drm_i915_private *dev_priv = dev->dev_private;
5479
5480 if (IS_VALLEYVIEW(dev)) {
5481 vlv_force_wake_reset(dev_priv);
5482 } else if (INTEL_INFO(dev)->gen >= 6) {
5483 __gen6_gt_force_wake_reset(dev_priv);
5484 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5485 __gen6_gt_force_wake_mt_reset(dev_priv);
5486 }
5487
5488 /* BIOS often leaves RC6 enabled, but disable it for hw init */
5489 if (INTEL_INFO(dev)->gen >= 6)
5490 intel_disable_gt_powersave(dev);
5491}
5492
5493void intel_gt_init(struct drm_device *dev)
5494{
5495 struct drm_i915_private *dev_priv = dev->dev_private;
5496
5497 if (IS_VALLEYVIEW(dev)) {
5498 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5499 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5500 } else if (IS_HASWELL(dev)) {
5501 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5502 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5503 } else if (IS_IVYBRIDGE(dev)) {
5504 u32 ecobus;
5505
5506 /* IVB configs may use multi-threaded forcewake */
5507
5508 /* A small trick here - if the bios hasn't configured
5509 * MT forcewake, and if the device is in RC6, then
5510 * force_wake_mt_get will not wake the device and the
5511 * ECOBUS read will return zero. Which will be
5512 * (correctly) interpreted by the test below as MT
5513 * forcewake being disabled.
5514 */
5515 mutex_lock(&dev->struct_mutex);
5516 __gen6_gt_force_wake_mt_get(dev_priv);
5517 ecobus = I915_READ_NOTRACE(ECOBUS);
5518 __gen6_gt_force_wake_mt_put(dev_priv);
5519 mutex_unlock(&dev->struct_mutex);
5520
5521 if (ecobus & FORCEWAKE_MT_ENABLE) {
5522 dev_priv->gt.force_wake_get =
5523 __gen6_gt_force_wake_mt_get;
5524 dev_priv->gt.force_wake_put =
5525 __gen6_gt_force_wake_mt_put;
5526 } else {
5527 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5528 DRM_INFO("when using vblank-synced partial screen updates.\n");
5529 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5530 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5531 }
5532 } else if (IS_GEN6(dev)) {
5533 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5534 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5535 }
5536 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5537 intel_gen6_powersave_work);
5538}
5539
5540int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) 5290int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5541{ 5291{
5542 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5292 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5639,3 +5389,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
5639 return val; 5389 return val;
5640} 5390}
5641 5391
5392void intel_pm_init(struct drm_device *dev)
5393{
5394 struct drm_i915_private *dev_priv = dev->dev_private;
5395
5396 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5397 intel_gen6_powersave_work);
5398}
5399