aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-07-19 15:36:52 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-25 09:21:50 -0400
commit907b28c56ea40629aa6595ddfa414ec2fc7da41c (patch)
treeb426955ac1e889b5672296399712ff7b1d70662f
parentcb54b53adae70701bdd77d848cea4b9b39b61cf9 (diff)
drm/i915: Colocate all GT access routines in the same file
Currently, the register access code is split between i915_drv.c and intel_pm.c. It only bares a superficial resemblance to the reset of the powermanagement code, so move it all into its own file. This is to ease further patches to enforce serialised register access. v2: Scan for random abuse of I915_WRITE_NOTRACE v3: Take the opportunity to rename the GT functions as uncore. Uncore is the term used by the hardware design (and bspec) for all functions outside of the GPU (and CPU) cores in what is also known as the System Agent. v4: Rebase onto SNB rc6 fixes Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Wrestle patch into applying and inline intel_uncore_early_sanitize (plus move the old comment to the new function). Also keep the _santize postfix for intel_uncore_sanitize.] [danvet: Squash in fixup spotted by Chris on irc: We need to call intel_pm_init before intel_uncore_sanitize since the later will call cancel_work on the delayed rps setup work the former initializes.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c25
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c271
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c258
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c571
10 files changed, 614 insertions, 565 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9d1da7cceb21..b8449a84a0dc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -38,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
38 intel_sprite.o \ 38 intel_sprite.o \
39 intel_opregion.o \ 39 intel_opregion.o \
40 intel_sideband.o \ 40 intel_sideband.o \
41 intel_uncore.o \
41 dvo_ch7xxx.o \ 42 dvo_ch7xxx.o \
42 dvo_ch7017.o \ 43 dvo_ch7017.o \
43 dvo_ivch.o \ 44 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9d871c7eeaee..0e904986f3e9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -989,9 +989,9 @@ static int gen6_drpc_info(struct seq_file *m)
989 if (ret) 989 if (ret)
990 return ret; 990 return ret;
991 991
992 spin_lock_irq(&dev_priv->gt_lock); 992 spin_lock_irq(&dev_priv->uncore.lock);
993 forcewake_count = dev_priv->forcewake_count; 993 forcewake_count = dev_priv->uncore.forcewake_count;
994 spin_unlock_irq(&dev_priv->gt_lock); 994 spin_unlock_irq(&dev_priv->uncore.lock);
995 995
996 if (forcewake_count) { 996 if (forcewake_count) {
997 seq_puts(m, "RC information inaccurate because somebody " 997 seq_puts(m, "RC information inaccurate because somebody "
@@ -1375,9 +1375,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1375 struct drm_i915_private *dev_priv = dev->dev_private; 1375 struct drm_i915_private *dev_priv = dev->dev_private;
1376 unsigned forcewake_count; 1376 unsigned forcewake_count;
1377 1377
1378 spin_lock_irq(&dev_priv->gt_lock); 1378 spin_lock_irq(&dev_priv->uncore.lock);
1379 forcewake_count = dev_priv->forcewake_count; 1379 forcewake_count = dev_priv->uncore.forcewake_count;
1380 spin_unlock_irq(&dev_priv->gt_lock); 1380 spin_unlock_irq(&dev_priv->uncore.lock);
1381 1381
1382 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1382 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1383 1383
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1c5b39738508..8536acd0a85d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1436,22 +1436,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1436} 1436}
1437 1437
1438/** 1438/**
1439 * intel_early_sanitize_regs - clean up BIOS state
1440 * @dev: DRM device
1441 *
1442 * This function must be called before we do any I915_READ or I915_WRITE. Its
1443 * purpose is to clean up any state left by the BIOS that may affect us when
1444 * reading and/or writing registers.
1445 */
1446static void intel_early_sanitize_regs(struct drm_device *dev)
1447{
1448 struct drm_i915_private *dev_priv = dev->dev_private;
1449
1450 if (HAS_FPGA_DBG_UNCLAIMED(dev))
1451 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1452}
1453
1454/**
1455 * i915_driver_load - setup chip and create an initial config 1439 * i915_driver_load - setup chip and create an initial config
1456 * @dev: DRM device 1440 * @dev: DRM device
1457 * @flags: startup flags 1441 * @flags: startup flags
@@ -1493,7 +1477,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1493 spin_lock_init(&dev_priv->irq_lock); 1477 spin_lock_init(&dev_priv->irq_lock);
1494 spin_lock_init(&dev_priv->gpu_error.lock); 1478 spin_lock_init(&dev_priv->gpu_error.lock);
1495 spin_lock_init(&dev_priv->backlight.lock); 1479 spin_lock_init(&dev_priv->backlight.lock);
1496 spin_lock_init(&dev_priv->gt_lock); 1480 spin_lock_init(&dev_priv->uncore.lock);
1497 mutex_init(&dev_priv->dpio_lock); 1481 mutex_init(&dev_priv->dpio_lock);
1498 mutex_init(&dev_priv->rps.hw_lock); 1482 mutex_init(&dev_priv->rps.hw_lock);
1499 mutex_init(&dev_priv->modeset_restore_lock); 1483 mutex_init(&dev_priv->modeset_restore_lock);
@@ -1529,7 +1513,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1529 goto put_bridge; 1513 goto put_bridge;
1530 } 1514 }
1531 1515
1532 intel_early_sanitize_regs(dev); 1516 intel_uncore_early_sanitize(dev);
1533 1517
1534 if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { 1518 if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
1535 /* The docs do not explain exactly how the calculation can be 1519 /* The docs do not explain exactly how the calculation can be
@@ -1602,8 +1586,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1602 intel_detect_pch(dev); 1586 intel_detect_pch(dev);
1603 1587
1604 intel_irq_init(dev); 1588 intel_irq_init(dev);
1605 intel_gt_sanitize(dev); 1589 intel_pm_init(dev);
1606 intel_gt_init(dev); 1590 intel_uncore_sanitize(dev);
1591 intel_uncore_init(dev);
1607 1592
1608 /* Try to make sure MCHBAR is enabled before poking at it */ 1593 /* Try to make sure MCHBAR is enabled before poking at it */
1609 intel_setup_mchbar(dev); 1594 intel_setup_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5849b0a91b4e..01d63a0435fb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -723,7 +723,7 @@ static int i915_drm_thaw(struct drm_device *dev)
723{ 723{
724 int error = 0; 724 int error = 0;
725 725
726 intel_gt_sanitize(dev); 726 intel_uncore_sanitize(dev);
727 727
728 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 728 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
729 mutex_lock(&dev->struct_mutex); 729 mutex_lock(&dev->struct_mutex);
@@ -749,7 +749,7 @@ int i915_resume(struct drm_device *dev)
749 749
750 pci_set_master(dev->pdev); 750 pci_set_master(dev->pdev);
751 751
752 intel_gt_sanitize(dev); 752 intel_uncore_sanitize(dev);
753 753
754 /* 754 /*
755 * Platforms with opregion should have sane BIOS, older ones (gen3 and 755 * Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -770,140 +770,6 @@ int i915_resume(struct drm_device *dev)
770 return 0; 770 return 0;
771} 771}
772 772
773static int i8xx_do_reset(struct drm_device *dev)
774{
775 struct drm_i915_private *dev_priv = dev->dev_private;
776
777 if (IS_I85X(dev))
778 return -ENODEV;
779
780 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
781 POSTING_READ(D_STATE);
782
783 if (IS_I830(dev) || IS_845G(dev)) {
784 I915_WRITE(DEBUG_RESET_I830,
785 DEBUG_RESET_DISPLAY |
786 DEBUG_RESET_RENDER |
787 DEBUG_RESET_FULL);
788 POSTING_READ(DEBUG_RESET_I830);
789 msleep(1);
790
791 I915_WRITE(DEBUG_RESET_I830, 0);
792 POSTING_READ(DEBUG_RESET_I830);
793 }
794
795 msleep(1);
796
797 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
798 POSTING_READ(D_STATE);
799
800 return 0;
801}
802
803static int i965_reset_complete(struct drm_device *dev)
804{
805 u8 gdrst;
806 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
807 return (gdrst & GRDOM_RESET_ENABLE) == 0;
808}
809
810static int i965_do_reset(struct drm_device *dev)
811{
812 int ret;
813
814 /*
815 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
816 * well as the reset bit (GR/bit 0). Setting the GR bit
817 * triggers the reset; when done, the hardware will clear it.
818 */
819 pci_write_config_byte(dev->pdev, I965_GDRST,
820 GRDOM_RENDER | GRDOM_RESET_ENABLE);
821 ret = wait_for(i965_reset_complete(dev), 500);
822 if (ret)
823 return ret;
824
825 /* We can't reset render&media without also resetting display ... */
826 pci_write_config_byte(dev->pdev, I965_GDRST,
827 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
828
829 ret = wait_for(i965_reset_complete(dev), 500);
830 if (ret)
831 return ret;
832
833 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
834
835 return 0;
836}
837
838static int ironlake_do_reset(struct drm_device *dev)
839{
840 struct drm_i915_private *dev_priv = dev->dev_private;
841 u32 gdrst;
842 int ret;
843
844 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
845 gdrst &= ~GRDOM_MASK;
846 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
847 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
848 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
849 if (ret)
850 return ret;
851
852 /* We can't reset render&media without also resetting display ... */
853 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
854 gdrst &= ~GRDOM_MASK;
855 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
856 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
857 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
858}
859
860static int gen6_do_reset(struct drm_device *dev)
861{
862 struct drm_i915_private *dev_priv = dev->dev_private;
863 int ret;
864 unsigned long irqflags;
865
866 /* Hold gt_lock across reset to prevent any register access
867 * with forcewake not set correctly
868 */
869 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
870
871 /* Reset the chip */
872
873 /* GEN6_GDRST is not in the gt power well, no need to check
874 * for fifo space for the write or forcewake the chip for
875 * the read
876 */
877 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
878
879 /* Spin waiting for the device to ack the reset request */
880 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
881
882 /* If reset with a user forcewake, try to restore, otherwise turn it off */
883 if (dev_priv->forcewake_count)
884 dev_priv->gt.force_wake_get(dev_priv);
885 else
886 dev_priv->gt.force_wake_put(dev_priv);
887
888 /* Restore fifo count */
889 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
890
891 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
892 return ret;
893}
894
895int intel_gpu_reset(struct drm_device *dev)
896{
897 switch (INTEL_INFO(dev)->gen) {
898 case 7:
899 case 6: return gen6_do_reset(dev);
900 case 5: return ironlake_do_reset(dev);
901 case 4: return i965_do_reset(dev);
902 case 2: return i8xx_do_reset(dev);
903 default: return -ENODEV;
904 }
905}
906
907/** 773/**
908 * i915_reset - reset chip after a hang 774 * i915_reset - reset chip after a hang
909 * @dev: drm device to reset 775 * @dev: drm device to reset
@@ -1233,136 +1099,3 @@ module_exit(i915_exit);
1233MODULE_AUTHOR(DRIVER_AUTHOR); 1099MODULE_AUTHOR(DRIVER_AUTHOR);
1234MODULE_DESCRIPTION(DRIVER_DESC); 1100MODULE_DESCRIPTION(DRIVER_DESC);
1235MODULE_LICENSE("GPL and additional rights"); 1101MODULE_LICENSE("GPL and additional rights");
1236
1237/* We give fast paths for the really cool registers */
1238#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1239 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1240 ((reg) < 0x40000) && \
1241 ((reg) != FORCEWAKE))
1242static void
1243ilk_dummy_write(struct drm_i915_private *dev_priv)
1244{
1245 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1246 * the chip from rc6 before touching it for real. MI_MODE is masked,
1247 * hence harmless to write 0 into. */
1248 I915_WRITE_NOTRACE(MI_MODE, 0);
1249}
1250
1251static void
1252hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1253{
1254 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1255 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1256 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
1257 reg);
1258 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1259 }
1260}
1261
1262static void
1263hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1264{
1265 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1266 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1267 DRM_ERROR("Unclaimed write to %x\n", reg);
1268 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1269 }
1270}
1271
1272#define __i915_read(x, y) \
1273u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1274 unsigned long irqflags; \
1275 u##x val = 0; \
1276 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1277 if (IS_GEN5(dev_priv->dev)) \
1278 ilk_dummy_write(dev_priv); \
1279 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1280 if (dev_priv->forcewake_count == 0) \
1281 dev_priv->gt.force_wake_get(dev_priv); \
1282 val = read##y(dev_priv->regs + reg); \
1283 if (dev_priv->forcewake_count == 0) \
1284 dev_priv->gt.force_wake_put(dev_priv); \
1285 } else { \
1286 val = read##y(dev_priv->regs + reg); \
1287 } \
1288 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1289 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1290 return val; \
1291}
1292
1293__i915_read(8, b)
1294__i915_read(16, w)
1295__i915_read(32, l)
1296__i915_read(64, q)
1297#undef __i915_read
1298
1299#define __i915_write(x, y) \
1300void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1301 unsigned long irqflags; \
1302 u32 __fifo_ret = 0; \
1303 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1304 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1305 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1306 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1307 } \
1308 if (IS_GEN5(dev_priv->dev)) \
1309 ilk_dummy_write(dev_priv); \
1310 hsw_unclaimed_reg_clear(dev_priv, reg); \
1311 write##y(val, dev_priv->regs + reg); \
1312 if (unlikely(__fifo_ret)) { \
1313 gen6_gt_check_fifodbg(dev_priv); \
1314 } \
1315 hsw_unclaimed_reg_check(dev_priv, reg); \
1316 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1317}
1318__i915_write(8, b)
1319__i915_write(16, w)
1320__i915_write(32, l)
1321__i915_write(64, q)
1322#undef __i915_write
1323
1324static const struct register_whitelist {
1325 uint64_t offset;
1326 uint32_t size;
1327 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1328} whitelist[] = {
1329 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1330};
1331
1332int i915_reg_read_ioctl(struct drm_device *dev,
1333 void *data, struct drm_file *file)
1334{
1335 struct drm_i915_private *dev_priv = dev->dev_private;
1336 struct drm_i915_reg_read *reg = data;
1337 struct register_whitelist const *entry = whitelist;
1338 int i;
1339
1340 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1341 if (entry->offset == reg->offset &&
1342 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1343 break;
1344 }
1345
1346 if (i == ARRAY_SIZE(whitelist))
1347 return -EINVAL;
1348
1349 switch (entry->size) {
1350 case 8:
1351 reg->val = I915_READ64(reg->offset);
1352 break;
1353 case 4:
1354 reg->val = I915_READ(reg->offset);
1355 break;
1356 case 2:
1357 reg->val = I915_READ16(reg->offset);
1358 break;
1359 case 1:
1360 reg->val = I915_READ8(reg->offset);
1361 break;
1362 default:
1363 WARN_ON(1);
1364 return -EINVAL;
1365 }
1366
1367 return 0;
1368}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 331c00b69f15..a55315a8d5a3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -391,11 +391,20 @@ struct drm_i915_display_funcs {
391 /* pll clock increase/decrease */ 391 /* pll clock increase/decrease */
392}; 392};
393 393
394struct drm_i915_gt_funcs { 394struct intel_uncore_funcs {
395 void (*force_wake_get)(struct drm_i915_private *dev_priv); 395 void (*force_wake_get)(struct drm_i915_private *dev_priv);
396 void (*force_wake_put)(struct drm_i915_private *dev_priv); 396 void (*force_wake_put)(struct drm_i915_private *dev_priv);
397}; 397};
398 398
399struct intel_uncore {
400 spinlock_t lock; /** lock is also taken in irq contexts. */
401
402 struct intel_uncore_funcs funcs;
403
404 unsigned fifo_count;
405 unsigned forcewake_count;
406};
407
399#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 408#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
400 func(is_mobile) sep \ 409 func(is_mobile) sep \
401 func(is_i85x) sep \ 410 func(is_i85x) sep \
@@ -1045,14 +1054,7 @@ typedef struct drm_i915_private {
1045 1054
1046 void __iomem *regs; 1055 void __iomem *regs;
1047 1056
1048 struct drm_i915_gt_funcs gt; 1057 struct intel_uncore uncore;
1049 /** gt_fifo_count and the subsequent register write are synchronized
1050 * with dev->struct_mutex. */
1051 unsigned gt_fifo_count;
1052 /** forcewake_count is protected by gt_lock */
1053 unsigned forcewake_count;
1054 /** gt_lock is also taken in irq contexts. */
1055 spinlock_t gt_lock;
1056 1058
1057 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 1059 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1058 1060
@@ -1670,8 +1672,14 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
1670 1672
1671extern void intel_irq_init(struct drm_device *dev); 1673extern void intel_irq_init(struct drm_device *dev);
1672extern void intel_hpd_init(struct drm_device *dev); 1674extern void intel_hpd_init(struct drm_device *dev);
1673extern void intel_gt_init(struct drm_device *dev); 1675extern void intel_pm_init(struct drm_device *dev);
1674extern void intel_gt_sanitize(struct drm_device *dev); 1676
1677extern void intel_uncore_sanitize(struct drm_device *dev);
1678extern void intel_uncore_early_sanitize(struct drm_device *dev);
1679extern void intel_uncore_init(struct drm_device *dev);
1680extern void intel_uncore_reset(struct drm_device *dev);
1681extern void intel_uncore_clear_errors(struct drm_device *dev);
1682extern void intel_uncore_check_errors(struct drm_device *dev);
1675 1683
1676void 1684void
1677i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1685i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -2107,7 +2115,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2107 */ 2115 */
2108void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 2116void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2109void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 2117void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
2110int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
2111 2118
2112int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2119int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2113int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2120int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f708e4efa1be..ee3e49cc0eb4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1307,11 +1307,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1307 1307
1308 /* We get interrupts on unclaimed registers, so check for this before we 1308 /* We get interrupts on unclaimed registers, so check for this before we
1309 * do any I915_{READ,WRITE}. */ 1309 * do any I915_{READ,WRITE}. */
1310 if (IS_HASWELL(dev) && 1310 intel_uncore_check_errors(dev);
1311 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1312 DRM_ERROR("Unclaimed register before interrupt\n");
1313 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1314 }
1315 1311
1316 /* disable master interrupt before clearing iir */ 1312 /* disable master interrupt before clearing iir */
1317 de_ier = I915_READ(DEIER); 1313 de_ier = I915_READ(DEIER);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index baaefd70cc67..b3389d74d695 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10453,8 +10453,7 @@ intel_display_capture_error_state(struct drm_device *dev)
10453 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to 10453 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
10454 * prevent the next I915_WRITE from detecting it and printing an error 10454 * prevent the next I915_WRITE from detecting it and printing an error
10455 * message. */ 10455 * message. */
10456 if (HAS_POWER_WELL(dev)) 10456 intel_uncore_clear_errors(dev);
10457 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
10458 10457
10459 return error; 10458 return error;
10460} 10459}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3fbe80bc36bb..d9f50e368fe9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -806,7 +806,6 @@ extern void intel_init_power_well(struct drm_device *dev);
806extern void intel_set_power_well(struct drm_device *dev, bool enable); 806extern void intel_set_power_well(struct drm_device *dev, bool enable);
807extern void intel_enable_gt_powersave(struct drm_device *dev); 807extern void intel_enable_gt_powersave(struct drm_device *dev);
808extern void intel_disable_gt_powersave(struct drm_device *dev); 808extern void intel_disable_gt_powersave(struct drm_device *dev);
809extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
810extern void ironlake_teardown_rc6(struct drm_device *dev); 809extern void ironlake_teardown_rc6(struct drm_device *dev);
811 810
812extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 811extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 74d6c4d78360..0a5ba92a4b12 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,8 +32,6 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <drm/i915_powerwell.h> 33#include <drm/i915_powerwell.h>
34 34
35#define FORCEWAKE_ACK_TIMEOUT_MS 2
36
37/* FBC, or Frame Buffer Compression, is a technique employed to compress the 35/* FBC, or Frame Buffer Compression, is a technique employed to compress the
38 * framebuffer contents in-memory, aiming at reducing the required bandwidth 36 * framebuffer contents in-memory, aiming at reducing the required bandwidth
39 * during in-memory transfers and, therefore, reduce the power packet. 37 * during in-memory transfers and, therefore, reduce the power packet.
@@ -5289,254 +5287,6 @@ void intel_init_pm(struct drm_device *dev)
5289 } 5287 }
5290} 5288}
5291 5289
5292static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
5293{
5294 u32 gt_thread_status_mask;
5295
5296 if (IS_HASWELL(dev_priv->dev))
5297 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
5298 else
5299 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
5300
5301 /* w/a for a sporadic read returning 0 by waiting for the GT
5302 * thread to wake up.
5303 */
5304 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
5305 DRM_ERROR("GT thread status wait timed out\n");
5306}
5307
5308static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
5309{
5310 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5311 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5312}
5313
5314static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5315{
5316 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
5317 FORCEWAKE_ACK_TIMEOUT_MS))
5318 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5319
5320 I915_WRITE_NOTRACE(FORCEWAKE, 1);
5321 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5322
5323 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
5324 FORCEWAKE_ACK_TIMEOUT_MS))
5325 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5326
5327 /* WaRsForcewakeWaitTC0:snb */
5328 __gen6_gt_wait_for_thread_c0(dev_priv);
5329}
5330
5331static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
5332{
5333 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
5334 /* something from same cacheline, but !FORCEWAKE_MT */
5335 POSTING_READ(ECOBUS);
5336}
5337
5338static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
5339{
5340 u32 forcewake_ack;
5341
5342 if (IS_HASWELL(dev_priv->dev))
5343 forcewake_ack = FORCEWAKE_ACK_HSW;
5344 else
5345 forcewake_ack = FORCEWAKE_MT_ACK;
5346
5347 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
5348 FORCEWAKE_ACK_TIMEOUT_MS))
5349 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5350
5351 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5352 /* something from same cacheline, but !FORCEWAKE_MT */
5353 POSTING_READ(ECOBUS);
5354
5355 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
5356 FORCEWAKE_ACK_TIMEOUT_MS))
5357 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5358
5359 /* WaRsForcewakeWaitTC0:ivb,hsw */
5360 __gen6_gt_wait_for_thread_c0(dev_priv);
5361}
5362
5363/*
5364 * Generally this is called implicitly by the register read function. However,
5365 * if some sequence requires the GT to not power down then this function should
5366 * be called at the beginning of the sequence followed by a call to
5367 * gen6_gt_force_wake_put() at the end of the sequence.
5368 */
5369void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5370{
5371 unsigned long irqflags;
5372
5373 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5374 if (dev_priv->forcewake_count++ == 0)
5375 dev_priv->gt.force_wake_get(dev_priv);
5376 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5377}
5378
5379void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5380{
5381 u32 gtfifodbg;
5382 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
5383 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
5384 "MMIO read or write has been dropped %x\n", gtfifodbg))
5385 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
5386}
5387
5388static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5389{
5390 I915_WRITE_NOTRACE(FORCEWAKE, 0);
5391 /* something from same cacheline, but !FORCEWAKE */
5392 POSTING_READ(ECOBUS);
5393 gen6_gt_check_fifodbg(dev_priv);
5394}
5395
5396static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
5397{
5398 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5399 /* something from same cacheline, but !FORCEWAKE_MT */
5400 POSTING_READ(ECOBUS);
5401 gen6_gt_check_fifodbg(dev_priv);
5402}
5403
5404/*
5405 * see gen6_gt_force_wake_get()
5406 */
5407void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5408{
5409 unsigned long irqflags;
5410
5411 spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5412 if (--dev_priv->forcewake_count == 0)
5413 dev_priv->gt.force_wake_put(dev_priv);
5414 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5415}
5416
5417int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5418{
5419 int ret = 0;
5420
5421 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
5422 int loop = 500;
5423 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5424 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
5425 udelay(10);
5426 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5427 }
5428 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
5429 ++ret;
5430 dev_priv->gt_fifo_count = fifo;
5431 }
5432 dev_priv->gt_fifo_count--;
5433
5434 return ret;
5435}
5436
5437static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
5438{
5439 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
5440 /* something from same cacheline, but !FORCEWAKE_VLV */
5441 POSTING_READ(FORCEWAKE_ACK_VLV);
5442}
5443
5444static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
5445{
5446 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
5447 FORCEWAKE_ACK_TIMEOUT_MS))
5448 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5449
5450 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5451 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5452 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5453
5454 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
5455 FORCEWAKE_ACK_TIMEOUT_MS))
5456 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
5457
5458 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
5459 FORCEWAKE_KERNEL),
5460 FORCEWAKE_ACK_TIMEOUT_MS))
5461 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
5462
5463 /* WaRsForcewakeWaitTC0:vlv */
5464 __gen6_gt_wait_for_thread_c0(dev_priv);
5465}
5466
5467static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
5468{
5469 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5470 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5471 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5472 /* The below doubles as a POSTING_READ */
5473 gen6_gt_check_fifodbg(dev_priv);
5474}
5475
5476void intel_gt_sanitize(struct drm_device *dev)
5477{
5478 struct drm_i915_private *dev_priv = dev->dev_private;
5479
5480 if (IS_VALLEYVIEW(dev)) {
5481 vlv_force_wake_reset(dev_priv);
5482 } else if (INTEL_INFO(dev)->gen >= 6) {
5483 __gen6_gt_force_wake_reset(dev_priv);
5484 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5485 __gen6_gt_force_wake_mt_reset(dev_priv);
5486 }
5487
5488 /* BIOS often leaves RC6 enabled, but disable it for hw init */
5489 if (INTEL_INFO(dev)->gen >= 6)
5490 intel_disable_gt_powersave(dev);
5491}
5492
5493void intel_gt_init(struct drm_device *dev)
5494{
5495 struct drm_i915_private *dev_priv = dev->dev_private;
5496
5497 if (IS_VALLEYVIEW(dev)) {
5498 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5499 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5500 } else if (IS_HASWELL(dev)) {
5501 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5502 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5503 } else if (IS_IVYBRIDGE(dev)) {
5504 u32 ecobus;
5505
5506 /* IVB configs may use multi-threaded forcewake */
5507
5508 /* A small trick here - if the bios hasn't configured
5509 * MT forcewake, and if the device is in RC6, then
5510 * force_wake_mt_get will not wake the device and the
5511 * ECOBUS read will return zero. Which will be
5512 * (correctly) interpreted by the test below as MT
5513 * forcewake being disabled.
5514 */
5515 mutex_lock(&dev->struct_mutex);
5516 __gen6_gt_force_wake_mt_get(dev_priv);
5517 ecobus = I915_READ_NOTRACE(ECOBUS);
5518 __gen6_gt_force_wake_mt_put(dev_priv);
5519 mutex_unlock(&dev->struct_mutex);
5520
5521 if (ecobus & FORCEWAKE_MT_ENABLE) {
5522 dev_priv->gt.force_wake_get =
5523 __gen6_gt_force_wake_mt_get;
5524 dev_priv->gt.force_wake_put =
5525 __gen6_gt_force_wake_mt_put;
5526 } else {
5527 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5528 DRM_INFO("when using vblank-synced partial screen updates.\n");
5529 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5530 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5531 }
5532 } else if (IS_GEN6(dev)) {
5533 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5534 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5535 }
5536 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5537 intel_gen6_powersave_work);
5538}
5539
5540int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) 5290int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5541{ 5291{
5542 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5292 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5639,3 +5389,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
5639 return val; 5389 return val;
5640} 5390}
5641 5391
5392void intel_pm_init(struct drm_device *dev)
5393{
5394 struct drm_i915_private *dev_priv = dev->dev_private;
5395
5396 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5397 intel_gen6_powersave_work);
5398}
5399
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 000000000000..97e8b1b86476
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,571 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26
27#define FORCEWAKE_ACK_TIMEOUT_MS 2
28
29static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
30{
31 u32 gt_thread_status_mask;
32
33 if (IS_HASWELL(dev_priv->dev))
34 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
35 else
36 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
37
38 /* w/a for a sporadic read returning 0 by waiting for the GT
39 * thread to wake up.
40 */
41 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
42 DRM_ERROR("GT thread status wait timed out\n");
43}
44
45static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
46{
47 I915_WRITE_NOTRACE(FORCEWAKE, 0);
48 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
49}
50
51static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
52{
53 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
54 FORCEWAKE_ACK_TIMEOUT_MS))
55 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
56
57 I915_WRITE_NOTRACE(FORCEWAKE, 1);
58 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
59
60 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
61 FORCEWAKE_ACK_TIMEOUT_MS))
62 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
63
64 /* WaRsForcewakeWaitTC0:snb */
65 __gen6_gt_wait_for_thread_c0(dev_priv);
66}
67
68static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
69{
70 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
71 /* something from same cacheline, but !FORCEWAKE_MT */
72 POSTING_READ(ECOBUS);
73}
74
75static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
76{
77 u32 forcewake_ack;
78
79 if (IS_HASWELL(dev_priv->dev))
80 forcewake_ack = FORCEWAKE_ACK_HSW;
81 else
82 forcewake_ack = FORCEWAKE_MT_ACK;
83
84 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
85 FORCEWAKE_ACK_TIMEOUT_MS))
86 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
87
88 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
89 /* something from same cacheline, but !FORCEWAKE_MT */
90 POSTING_READ(ECOBUS);
91
92 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
93 FORCEWAKE_ACK_TIMEOUT_MS))
94 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
95
96 /* WaRsForcewakeWaitTC0:ivb,hsw */
97 __gen6_gt_wait_for_thread_c0(dev_priv);
98}
99
100static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
101{
102 u32 gtfifodbg;
103 gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
104 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
105 "MMIO read or write has been dropped %x\n", gtfifodbg))
106 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
107}
108
109static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
110{
111 I915_WRITE_NOTRACE(FORCEWAKE, 0);
112 /* something from same cacheline, but !FORCEWAKE */
113 POSTING_READ(ECOBUS);
114 gen6_gt_check_fifodbg(dev_priv);
115}
116
117static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
118{
119 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
120 /* something from same cacheline, but !FORCEWAKE_MT */
121 POSTING_READ(ECOBUS);
122 gen6_gt_check_fifodbg(dev_priv);
123}
124
125static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
126{
127 int ret = 0;
128
129 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
130 int loop = 500;
131 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
132 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
133 udelay(10);
134 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
135 }
136 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
137 ++ret;
138 dev_priv->uncore.fifo_count = fifo;
139 }
140 dev_priv->uncore.fifo_count--;
141
142 return ret;
143}
144
145static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
146{
147 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
148 /* something from same cacheline, but !FORCEWAKE_VLV */
149 POSTING_READ(FORCEWAKE_ACK_VLV);
150}
151
152static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
153{
154 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
155 FORCEWAKE_ACK_TIMEOUT_MS))
156 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
157
158 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
159 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
160 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
161
162 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
163 FORCEWAKE_ACK_TIMEOUT_MS))
164 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
165
166 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
167 FORCEWAKE_KERNEL),
168 FORCEWAKE_ACK_TIMEOUT_MS))
169 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
170
171 /* WaRsForcewakeWaitTC0:vlv */
172 __gen6_gt_wait_for_thread_c0(dev_priv);
173}
174
175static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
176{
177 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
178 I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
179 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
180 /* The below doubles as a POSTING_READ */
181 gen6_gt_check_fifodbg(dev_priv);
182}
183
184void intel_uncore_early_sanitize(struct drm_device *dev)
185{
186 struct drm_i915_private *dev_priv = dev->dev_private;
187
188 if (HAS_FPGA_DBG_UNCLAIMED(dev))
189 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
190}
191
192void intel_uncore_init(struct drm_device *dev)
193{
194 struct drm_i915_private *dev_priv = dev->dev_private;
195
196 if (IS_VALLEYVIEW(dev)) {
197 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
198 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
199 } else if (IS_HASWELL(dev)) {
200 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
201 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
202 } else if (IS_IVYBRIDGE(dev)) {
203 u32 ecobus;
204
205 /* IVB configs may use multi-threaded forcewake */
206
207 /* A small trick here - if the bios hasn't configured
208 * MT forcewake, and if the device is in RC6, then
209 * force_wake_mt_get will not wake the device and the
210 * ECOBUS read will return zero. Which will be
211 * (correctly) interpreted by the test below as MT
212 * forcewake being disabled.
213 */
214 mutex_lock(&dev->struct_mutex);
215 __gen6_gt_force_wake_mt_get(dev_priv);
216 ecobus = I915_READ_NOTRACE(ECOBUS);
217 __gen6_gt_force_wake_mt_put(dev_priv);
218 mutex_unlock(&dev->struct_mutex);
219
220 if (ecobus & FORCEWAKE_MT_ENABLE) {
221 dev_priv->uncore.funcs.force_wake_get =
222 __gen6_gt_force_wake_mt_get;
223 dev_priv->uncore.funcs.force_wake_put =
224 __gen6_gt_force_wake_mt_put;
225 } else {
226 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
227 DRM_INFO("when using vblank-synced partial screen updates.\n");
228 dev_priv->uncore.funcs.force_wake_get =
229 __gen6_gt_force_wake_get;
230 dev_priv->uncore.funcs.force_wake_put =
231 __gen6_gt_force_wake_put;
232 }
233 } else if (IS_GEN6(dev)) {
234 dev_priv->uncore.funcs.force_wake_get =
235 __gen6_gt_force_wake_get;
236 dev_priv->uncore.funcs.force_wake_put =
237 __gen6_gt_force_wake_put;
238 }
239}
240
241void intel_uncore_sanitize(struct drm_device *dev)
242{
243 struct drm_i915_private *dev_priv = dev->dev_private;
244
245 if (IS_VALLEYVIEW(dev)) {
246 vlv_force_wake_reset(dev_priv);
247 } else if (INTEL_INFO(dev)->gen >= 6) {
248 __gen6_gt_force_wake_reset(dev_priv);
249 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
250 __gen6_gt_force_wake_mt_reset(dev_priv);
251 }
252
253 /* BIOS often leaves RC6 enabled, but disable it for hw init */
254 intel_disable_gt_powersave(dev);
255}
256
257/*
258 * Generally this is called implicitly by the register read function. However,
259 * if some sequence requires the GT to not power down then this function should
260 * be called at the beginning of the sequence followed by a call to
261 * gen6_gt_force_wake_put() at the end of the sequence.
262 */
263void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
264{
265 unsigned long irqflags;
266
267 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
268 if (dev_priv->uncore.forcewake_count++ == 0)
269 dev_priv->uncore.funcs.force_wake_get(dev_priv);
270 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
271}
272
273/*
274 * see gen6_gt_force_wake_get()
275 */
276void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
277{
278 unsigned long irqflags;
279
280 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
281 if (--dev_priv->uncore.forcewake_count == 0)
282 dev_priv->uncore.funcs.force_wake_put(dev_priv);
283 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
284}
285
286/* We give fast paths for the really cool registers */
287#define NEEDS_FORCE_WAKE(dev_priv, reg) \
288 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
289 ((reg) < 0x40000) && \
290 ((reg) != FORCEWAKE))
291
292static void
293ilk_dummy_write(struct drm_i915_private *dev_priv)
294{
295 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
296 * the chip from rc6 before touching it for real. MI_MODE is masked,
297 * hence harmless to write 0 into. */
298 I915_WRITE_NOTRACE(MI_MODE, 0);
299}
300
301static void
302hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
303{
304 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
305 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
306 DRM_ERROR("Unknown unclaimed register before writing to %x\n",
307 reg);
308 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
309 }
310}
311
312static void
313hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
314{
315 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
316 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
317 DRM_ERROR("Unclaimed write to %x\n", reg);
318 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
319 }
320}
321
322#define __i915_read(x, y) \
323u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
324 unsigned long irqflags; \
325 u##x val = 0; \
326 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
327 if (IS_GEN5(dev_priv->dev)) \
328 ilk_dummy_write(dev_priv); \
329 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
330 if (dev_priv->uncore.forcewake_count == 0) \
331 dev_priv->uncore.funcs.force_wake_get(dev_priv); \
332 val = read##y(dev_priv->regs + reg); \
333 if (dev_priv->uncore.forcewake_count == 0) \
334 dev_priv->uncore.funcs.force_wake_put(dev_priv); \
335 } else { \
336 val = read##y(dev_priv->regs + reg); \
337 } \
338 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
339 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
340 return val; \
341}
342
343__i915_read(8, b)
344__i915_read(16, w)
345__i915_read(32, l)
346__i915_read(64, q)
347#undef __i915_read
348
349#define __i915_write(x, y) \
350void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
351 unsigned long irqflags; \
352 u32 __fifo_ret = 0; \
353 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
354 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
355 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
356 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
357 } \
358 if (IS_GEN5(dev_priv->dev)) \
359 ilk_dummy_write(dev_priv); \
360 hsw_unclaimed_reg_clear(dev_priv, reg); \
361 write##y(val, dev_priv->regs + reg); \
362 if (unlikely(__fifo_ret)) { \
363 gen6_gt_check_fifodbg(dev_priv); \
364 } \
365 hsw_unclaimed_reg_check(dev_priv, reg); \
366 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
367}
368__i915_write(8, b)
369__i915_write(16, w)
370__i915_write(32, l)
371__i915_write(64, q)
372#undef __i915_write
373
374static const struct register_whitelist {
375 uint64_t offset;
376 uint32_t size;
377 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
378} whitelist[] = {
379 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
380};
381
382int i915_reg_read_ioctl(struct drm_device *dev,
383 void *data, struct drm_file *file)
384{
385 struct drm_i915_private *dev_priv = dev->dev_private;
386 struct drm_i915_reg_read *reg = data;
387 struct register_whitelist const *entry = whitelist;
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
391 if (entry->offset == reg->offset &&
392 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
393 break;
394 }
395
396 if (i == ARRAY_SIZE(whitelist))
397 return -EINVAL;
398
399 switch (entry->size) {
400 case 8:
401 reg->val = I915_READ64(reg->offset);
402 break;
403 case 4:
404 reg->val = I915_READ(reg->offset);
405 break;
406 case 2:
407 reg->val = I915_READ16(reg->offset);
408 break;
409 case 1:
410 reg->val = I915_READ8(reg->offset);
411 break;
412 default:
413 WARN_ON(1);
414 return -EINVAL;
415 }
416
417 return 0;
418}
419
420static int i8xx_do_reset(struct drm_device *dev)
421{
422 struct drm_i915_private *dev_priv = dev->dev_private;
423
424 if (IS_I85X(dev))
425 return -ENODEV;
426
427 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
428 POSTING_READ(D_STATE);
429
430 if (IS_I830(dev) || IS_845G(dev)) {
431 I915_WRITE(DEBUG_RESET_I830,
432 DEBUG_RESET_DISPLAY |
433 DEBUG_RESET_RENDER |
434 DEBUG_RESET_FULL);
435 POSTING_READ(DEBUG_RESET_I830);
436 msleep(1);
437
438 I915_WRITE(DEBUG_RESET_I830, 0);
439 POSTING_READ(DEBUG_RESET_I830);
440 }
441
442 msleep(1);
443
444 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
445 POSTING_READ(D_STATE);
446
447 return 0;
448}
449
450static int i965_reset_complete(struct drm_device *dev)
451{
452 u8 gdrst;
453 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
454 return (gdrst & GRDOM_RESET_ENABLE) == 0;
455}
456
457static int i965_do_reset(struct drm_device *dev)
458{
459 int ret;
460
461 /*
462 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
463 * well as the reset bit (GR/bit 0). Setting the GR bit
464 * triggers the reset; when done, the hardware will clear it.
465 */
466 pci_write_config_byte(dev->pdev, I965_GDRST,
467 GRDOM_RENDER | GRDOM_RESET_ENABLE);
468 ret = wait_for(i965_reset_complete(dev), 500);
469 if (ret)
470 return ret;
471
472 /* We can't reset render&media without also resetting display ... */
473 pci_write_config_byte(dev->pdev, I965_GDRST,
474 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
475
476 ret = wait_for(i965_reset_complete(dev), 500);
477 if (ret)
478 return ret;
479
480 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
481
482 return 0;
483}
484
485static int ironlake_do_reset(struct drm_device *dev)
486{
487 struct drm_i915_private *dev_priv = dev->dev_private;
488 u32 gdrst;
489 int ret;
490
491 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
492 gdrst &= ~GRDOM_MASK;
493 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
494 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
495 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
496 if (ret)
497 return ret;
498
499 /* We can't reset render&media without also resetting display ... */
500 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
501 gdrst &= ~GRDOM_MASK;
502 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
503 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
504 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
505}
506
507static int gen6_do_reset(struct drm_device *dev)
508{
509 struct drm_i915_private *dev_priv = dev->dev_private;
510 int ret;
511 unsigned long irqflags;
512
513 /* Hold uncore.lock across reset to prevent any register access
514 * with forcewake not set correctly
515 */
516 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
517
518 /* Reset the chip */
519
520 /* GEN6_GDRST is not in the gt power well, no need to check
521 * for fifo space for the write or forcewake the chip for
522 * the read
523 */
524 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
525
526 /* Spin waiting for the device to ack the reset request */
527 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
528
529 /* If reset with a user forcewake, try to restore, otherwise turn it off */
530 if (dev_priv->uncore.forcewake_count)
531 dev_priv->uncore.funcs.force_wake_get(dev_priv);
532 else
533 dev_priv->uncore.funcs.force_wake_put(dev_priv);
534
535 /* Restore fifo count */
536 dev_priv->uncore.fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
537
538 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
539 return ret;
540}
541
542int intel_gpu_reset(struct drm_device *dev)
543{
544 switch (INTEL_INFO(dev)->gen) {
545 case 7:
546 case 6: return gen6_do_reset(dev);
547 case 5: return ironlake_do_reset(dev);
548 case 4: return i965_do_reset(dev);
549 case 2: return i8xx_do_reset(dev);
550 default: return -ENODEV;
551 }
552}
553
554void intel_uncore_clear_errors(struct drm_device *dev)
555{
556 struct drm_i915_private *dev_priv = dev->dev_private;
557
558 if (HAS_FPGA_DBG_UNCLAIMED(dev))
559 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
560}
561
562void intel_uncore_check_errors(struct drm_device *dev)
563{
564 struct drm_i915_private *dev_priv = dev->dev_private;
565
566 if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
567 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
568 DRM_ERROR("Unclaimed register before interrupt\n");
569 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
570 }
571}