aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2013-11-03 00:07:09 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-08 12:09:39 -0500
commitabd58f0175915bed644aa67c8f69dc571b8280e0 (patch)
tree28d4ee3ac78111b72ced12f285ddac1441e236db
parent9459d252378aea80d28dc12bfec9a0d31b2a61bf (diff)
drm/i915/bdw: Implement interrupt changes
The interrupt handling implementation remains the same as previous generations with the 4 types of registers, status, identity, mask, and enable. However the layout of where the bits go have changed entirely. To address these changes, all of the interrupt vfuncs needed special gen8 code. The way it works is there is a top level status register now which informs the interrupt service routine which unit caused the interrupt, and therefore which interrupt registers to read to process the interrupt. For display the division is quite logical, a set of interrupt registers for each pipe, and in addition to those, a set each for "misc" and port. For GT the things get a bit hairy, as seen by the code. Each of the GT units has it's own bits defined. They all look *very similar* and resides in 16 bits of a GT register. As an example, RCS and BCS share register 0. To compact the code a bit, at a slight expense to complexity, this is exactly how the code works as well. 2 structures are added to the ring buffer so that our ring buffer interrupt handling code knows which ring shares the interrupt registers, and a shift value (ie. the top or bottom 16 bits of the register). The above allows us to kept the interrupt register caching scheme, the per interrupt enables, and the code to mask and unmask interrupts relatively clean (again at the cost of some more complexity). Most of the GT units mentioned above are command streamers, and so the symmetry should work quite well for even the yet to be implemented rings which Broadwell adds. v2: Fixes up a couple of bugs, and is more verbose about errors in the Broadwell interrupt handler. v3: fix DE_MISC IER offset v4: Simplify interrupts: I totally misread the docs the first time I implemented interrupts, and so this should greatly simplify the mess. Unlike GEN6, we never touch the regular mask registers in irq_get/put. v5: Rebased on to of recent pch hotplug setup changes. v6: Fixup on top of moving num_pipes to intel_info. v7: Rebased on top of Egbert Eich's hpd irq handling rework. Also wired up ibx_hpd_irq_setup for gen8. v8: Rebase on top of Jani's asle handling rework. v9: Rebase on top of Ben's VECS enabling for Haswell, where he unfortunately went OCD on the gt irq #defines. Not that they're still not yet fully consistent: - Used the GT_RENDER_ #defines + bdw shifts. - Dropped the shift from the L3_PARITY stuff, seemed clearer. - s/irq_refcount/irq_refcount.gt/ v10: Squash in VECS enabling patches and the gen8_gt_irq_handler refactoring from Zhao Yakui <yakui.zhao@intel.com> v11: Rebase on top of the interrupt cleanups in upstream. v12: Rebase on top of Ben's DPF changes in upstream. v13: Drop bdw from the HAS_L3_DPF feature flag for now, it's unclear what exactly needs to be done. Requested by Ben. v14: Fix the patch. - Drop the mask of reserved bits and assorted logic, it doesn't match the spec. - Do the posting read inconditionally instead of commenting it out. - Add a GEN8_MASTER_IRQ_CONTROL definition and use it. - Fix up the GEN8_PIPE interrupt defines and give the GEN8_ prefixes - we actually will need to use them. - Enclose macros in do {} while (0) (checkpatch). - Clear DE_MISC interrupt bits only after having processed them. - Fix whitespace fail (checkpatch). - Fix overtly long lines where appropriate (checkpatch). - Don't use typedef'ed private_t (maintainer-scripts). - Align the function parameter list correctly. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> (v4) Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> bikeshed
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c321
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h68
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c96
4 files changed, 478 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5a29dd3898aa..12cc0c51c73d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1317,7 +1317,10 @@ typedef struct drm_i915_private {
1317 struct mutex dpio_lock; 1317 struct mutex dpio_lock;
1318 1318
1319 /** Cached value of IMR to avoid reads in updating the bitfield */ 1319 /** Cached value of IMR to avoid reads in updating the bitfield */
1320 u32 irq_mask; 1320 union {
1321 u32 irq_mask;
1322 u32 de_irq_mask[I915_MAX_PIPES];
1323 };
1321 u32 gt_irq_mask; 1324 u32 gt_irq_mask;
1322 u32 pm_irq_mask; 1325 u32 pm_irq_mask;
1323 1326
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a228176676b2..54338cf72feb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1119,6 +1119,56 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1119 ivybridge_parity_error_irq_handler(dev, gt_iir); 1119 ivybridge_parity_error_irq_handler(dev, gt_iir);
1120} 1120}
1121 1121
1122static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1123 struct drm_i915_private *dev_priv,
1124 u32 master_ctl)
1125{
1126 u32 rcs, bcs, vcs;
1127 uint32_t tmp = 0;
1128 irqreturn_t ret = IRQ_NONE;
1129
1130 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1131 tmp = I915_READ(GEN8_GT_IIR(0));
1132 if (tmp) {
1133 ret = IRQ_HANDLED;
1134 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1135 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1136 if (rcs & GT_RENDER_USER_INTERRUPT)
1137 notify_ring(dev, &dev_priv->ring[RCS]);
1138 if (bcs & GT_RENDER_USER_INTERRUPT)
1139 notify_ring(dev, &dev_priv->ring[BCS]);
1140 I915_WRITE(GEN8_GT_IIR(0), tmp);
1141 } else
1142 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1143 }
1144
1145 if (master_ctl & GEN8_GT_VCS1_IRQ) {
1146 tmp = I915_READ(GEN8_GT_IIR(1));
1147 if (tmp) {
1148 ret = IRQ_HANDLED;
1149 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1150 if (vcs & GT_RENDER_USER_INTERRUPT)
1151 notify_ring(dev, &dev_priv->ring[VCS]);
1152 I915_WRITE(GEN8_GT_IIR(1), tmp);
1153 } else
1154 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1155 }
1156
1157 if (master_ctl & GEN8_GT_VECS_IRQ) {
1158 tmp = I915_READ(GEN8_GT_IIR(3));
1159 if (tmp) {
1160 ret = IRQ_HANDLED;
1161 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1162 if (vcs & GT_RENDER_USER_INTERRUPT)
1163 notify_ring(dev, &dev_priv->ring[VECS]);
1164 I915_WRITE(GEN8_GT_IIR(3), tmp);
1165 } else
1166 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1167 }
1168
1169 return ret;
1170}
1171
1122#define HPD_STORM_DETECT_PERIOD 1000 1172#define HPD_STORM_DETECT_PERIOD 1000
1123#define HPD_STORM_THRESHOLD 5 1173#define HPD_STORM_THRESHOLD 5
1124 1174
@@ -1692,6 +1742,75 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1692 return ret; 1742 return ret;
1693} 1743}
1694 1744
1745static irqreturn_t gen8_irq_handler(int irq, void *arg)
1746{
1747 struct drm_device *dev = arg;
1748 struct drm_i915_private *dev_priv = dev->dev_private;
1749 u32 master_ctl;
1750 irqreturn_t ret = IRQ_NONE;
1751 uint32_t tmp = 0;
1752
1753 atomic_inc(&dev_priv->irq_received);
1754
1755 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1756 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1757 if (!master_ctl)
1758 return IRQ_NONE;
1759
1760 I915_WRITE(GEN8_MASTER_IRQ, 0);
1761 POSTING_READ(GEN8_MASTER_IRQ);
1762
1763 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1764
1765 if (master_ctl & GEN8_DE_MISC_IRQ) {
1766 tmp = I915_READ(GEN8_DE_MISC_IIR);
1767 if (tmp & GEN8_DE_MISC_GSE)
1768 intel_opregion_asle_intr(dev);
1769 else if (tmp)
1770 DRM_ERROR("Unexpected DE Misc interrupt\n");
1771 else
1772 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1773
1774 if (tmp) {
1775 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1776 ret = IRQ_HANDLED;
1777 }
1778 }
1779
1780 if (master_ctl & GEN8_DE_IRQS) {
1781 int de_ret = 0;
1782 int pipe;
1783 for_each_pipe(pipe) {
1784 uint32_t pipe_iir;
1785
1786 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1787 if (pipe_iir & GEN8_PIPE_VBLANK)
1788 drm_handle_vblank(dev, pipe);
1789
1790 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1791 intel_prepare_page_flip(dev, pipe);
1792 intel_finish_page_flip_plane(dev, pipe);
1793 }
1794
1795 if (pipe_iir & GEN8_DE_PIPE_IRQ_ERRORS)
1796 DRM_ERROR("Errors on pipe %c\n", 'A' + pipe);
1797
1798 if (pipe_iir) {
1799 de_ret++;
1800 ret = IRQ_HANDLED;
1801 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1802 }
1803 }
1804 if (!de_ret)
1805 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1806 }
1807
1808 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1809 POSTING_READ(GEN8_MASTER_IRQ);
1810
1811 return ret;
1812}
1813
1695static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1814static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1696 bool reset_completed) 1815 bool reset_completed)
1697{ 1816{
@@ -2045,6 +2164,25 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2045 return 0; 2164 return 0;
2046} 2165}
2047 2166
2167static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2168{
2169 struct drm_i915_private *dev_priv = dev->dev_private;
2170 unsigned long irqflags;
2171 uint32_t imr;
2172
2173 if (!i915_pipe_enabled(dev, pipe))
2174 return -EINVAL;
2175
2176 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2177 imr = I915_READ(GEN8_DE_PIPE_IMR(pipe));
2178 if ((imr & GEN8_PIPE_VBLANK) == 1) {
2179 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), imr & ~GEN8_PIPE_VBLANK);
2180 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2181 }
2182 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2183 return 0;
2184}
2185
2048/* Called from drm generic code, passed 'crtc' which 2186/* Called from drm generic code, passed 'crtc' which
2049 * we use as a pipe index 2187 * we use as a pipe index
2050 */ 2188 */
@@ -2093,6 +2231,24 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2093 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2231 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2094} 2232}
2095 2233
2234static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2235{
2236 struct drm_i915_private *dev_priv = dev->dev_private;
2237 unsigned long irqflags;
2238 uint32_t imr;
2239
2240 if (!i915_pipe_enabled(dev, pipe))
2241 return;
2242
2243 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2244 imr = I915_READ(GEN8_DE_PIPE_IMR(pipe));
2245 if ((imr & GEN8_PIPE_VBLANK) == 0) {
2246 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), imr | GEN8_PIPE_VBLANK);
2247 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2248 }
2249 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2250}
2251
2096static u32 2252static u32
2097ring_last_seqno(struct intel_ring_buffer *ring) 2253ring_last_seqno(struct intel_ring_buffer *ring)
2098{ 2254{
@@ -2427,6 +2583,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2427 POSTING_READ(VLV_IER); 2583 POSTING_READ(VLV_IER);
2428} 2584}
2429 2585
2586static void gen8_irq_preinstall(struct drm_device *dev)
2587{
2588 struct drm_i915_private *dev_priv = dev->dev_private;
2589 int pipe;
2590
2591 atomic_set(&dev_priv->irq_received, 0);
2592
2593 I915_WRITE(GEN8_MASTER_IRQ, 0);
2594 POSTING_READ(GEN8_MASTER_IRQ);
2595
2596 /* IIR can theoretically queue up two events. Be paranoid */
2597#define GEN8_IRQ_INIT_NDX(type, which) do { \
2598 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2599 POSTING_READ(GEN8_##type##_IMR(which)); \
2600 I915_WRITE(GEN8_##type##_IER(which), 0); \
2601 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2602 POSTING_READ(GEN8_##type##_IIR(which)); \
2603 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2604 } while (0)
2605
2606#define GEN8_IRQ_INIT(type) do { \
2607 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2608 POSTING_READ(GEN8_##type##_IMR); \
2609 I915_WRITE(GEN8_##type##_IER, 0); \
2610 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2611 POSTING_READ(GEN8_##type##_IIR); \
2612 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2613 } while (0)
2614
2615 GEN8_IRQ_INIT_NDX(GT, 0);
2616 GEN8_IRQ_INIT_NDX(GT, 1);
2617 GEN8_IRQ_INIT_NDX(GT, 2);
2618 GEN8_IRQ_INIT_NDX(GT, 3);
2619
2620 for_each_pipe(pipe) {
2621 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2622 }
2623
2624 GEN8_IRQ_INIT(DE_PORT);
2625 GEN8_IRQ_INIT(DE_MISC);
2626 GEN8_IRQ_INIT(PCU);
2627#undef GEN8_IRQ_INIT
2628#undef GEN8_IRQ_INIT_NDX
2629
2630 POSTING_READ(GEN8_PCU_IIR);
2631}
2632
2430static void ibx_hpd_irq_setup(struct drm_device *dev) 2633static void ibx_hpd_irq_setup(struct drm_device *dev)
2431{ 2634{
2432 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2635 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2632,6 +2835,116 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2632 return 0; 2835 return 0;
2633} 2836}
2634 2837
2838static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2839{
2840 int i;
2841
2842 /* These are interrupts we'll toggle with the ring mask register */
2843 uint32_t gt_interrupts[] = {
2844 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2845 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2846 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2847 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2848 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2849 0,
2850 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2851 };
2852
2853 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2854 u32 tmp = I915_READ(GEN8_GT_IIR(i));
2855 if (tmp)
2856 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2857 i, tmp);
2858 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2859 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2860 }
2861 POSTING_READ(GEN8_GT_IER(0));
2862}
2863
2864static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2865{
2866 struct drm_device *dev = dev_priv->dev;
2867 uint32_t de_pipe_enables = GEN8_PIPE_FLIP_DONE |
2868 GEN8_PIPE_SCAN_LINE_EVENT |
2869 GEN8_PIPE_VBLANK |
2870 GEN8_DE_PIPE_IRQ_ERRORS;
2871 int pipe;
2872 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_enables;
2873 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_enables;
2874 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_enables;
2875
2876 for_each_pipe(pipe) {
2877 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2878 if (tmp)
2879 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2880 pipe, tmp);
2881 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2882 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2883 }
2884 POSTING_READ(GEN8_DE_PIPE_ISR(0));
2885
2886 I915_WRITE(GEN8_DE_PORT_IMR, ~_PORT_DP_A_HOTPLUG);
2887 I915_WRITE(GEN8_DE_PORT_IER, _PORT_DP_A_HOTPLUG);
2888 POSTING_READ(GEN8_DE_PORT_IER);
2889}
2890
2891static int gen8_irq_postinstall(struct drm_device *dev)
2892{
2893 struct drm_i915_private *dev_priv = dev->dev_private;
2894
2895 gen8_gt_irq_postinstall(dev_priv);
2896 gen8_de_irq_postinstall(dev_priv);
2897
2898 ibx_irq_postinstall(dev);
2899
2900 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2901 POSTING_READ(GEN8_MASTER_IRQ);
2902
2903 return 0;
2904}
2905
2906static void gen8_irq_uninstall(struct drm_device *dev)
2907{
2908 struct drm_i915_private *dev_priv = dev->dev_private;
2909 int pipe;
2910
2911 if (!dev_priv)
2912 return;
2913
2914 atomic_set(&dev_priv->irq_received, 0);
2915
2916 I915_WRITE(GEN8_MASTER_IRQ, 0);
2917
2918#define GEN8_IRQ_FINI_NDX(type, which) do { \
2919 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2920 I915_WRITE(GEN8_##type##_IER(which), 0); \
2921 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2922 } while (0)
2923
2924#define GEN8_IRQ_FINI(type) do { \
2925 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2926 I915_WRITE(GEN8_##type##_IER, 0); \
2927 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2928 } while (0)
2929
2930 GEN8_IRQ_FINI_NDX(GT, 0);
2931 GEN8_IRQ_FINI_NDX(GT, 1);
2932 GEN8_IRQ_FINI_NDX(GT, 2);
2933 GEN8_IRQ_FINI_NDX(GT, 3);
2934
2935 for_each_pipe(pipe) {
2936 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
2937 }
2938
2939 GEN8_IRQ_FINI(DE_PORT);
2940 GEN8_IRQ_FINI(DE_MISC);
2941 GEN8_IRQ_FINI(PCU);
2942#undef GEN8_IRQ_FINI
2943#undef GEN8_IRQ_FINI_NDX
2944
2945 POSTING_READ(GEN8_PCU_IIR);
2946}
2947
2635static void valleyview_irq_uninstall(struct drm_device *dev) 2948static void valleyview_irq_uninstall(struct drm_device *dev)
2636{ 2949{
2637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2950 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -3411,6 +3724,14 @@ void intel_irq_init(struct drm_device *dev)
3411 dev->driver->enable_vblank = valleyview_enable_vblank; 3724 dev->driver->enable_vblank = valleyview_enable_vblank;
3412 dev->driver->disable_vblank = valleyview_disable_vblank; 3725 dev->driver->disable_vblank = valleyview_disable_vblank;
3413 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3726 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3727 } else if (IS_GEN8(dev)) {
3728 dev->driver->irq_handler = gen8_irq_handler;
3729 dev->driver->irq_preinstall = gen8_irq_preinstall;
3730 dev->driver->irq_postinstall = gen8_irq_postinstall;
3731 dev->driver->irq_uninstall = gen8_irq_uninstall;
3732 dev->driver->enable_vblank = gen8_enable_vblank;
3733 dev->driver->disable_vblank = gen8_disable_vblank;
3734 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3414 } else if (HAS_PCH_SPLIT(dev)) { 3735 } else if (HAS_PCH_SPLIT(dev)) {
3415 dev->driver->irq_handler = ironlake_irq_handler; 3736 dev->driver->irq_handler = ironlake_irq_handler;
3416 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3737 dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 48c3aef5acaa..9ec524167c83 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4002,6 +4002,74 @@
4002#define GTIIR 0x44018 4002#define GTIIR 0x44018
4003#define GTIER 0x4401c 4003#define GTIER 0x4401c
4004 4004
4005#define GEN8_MASTER_IRQ 0x44200
4006#define GEN8_MASTER_IRQ_CONTROL (1<<31)
4007#define GEN8_PCU_IRQ (1<<30)
4008#define GEN8_DE_PCH_IRQ (1<<23)
4009#define GEN8_DE_MISC_IRQ (1<<22)
4010#define GEN8_DE_PORT_IRQ (1<<20)
4011#define GEN8_DE_PIPE_C_IRQ (1<<18)
4012#define GEN8_DE_PIPE_B_IRQ (1<<17)
4013#define GEN8_DE_PIPE_A_IRQ (1<<16)
4014#define GEN8_GT_VECS_IRQ (1<<6)
4015#define GEN8_GT_VCS2_IRQ (1<<3)
4016#define GEN8_GT_VCS1_IRQ (1<<2)
4017#define GEN8_GT_BCS_IRQ (1<<1)
4018#define GEN8_GT_RCS_IRQ (1<<0)
4019/* Lazy definition */
4020#define GEN8_GT_IRQS 0x000000ff
4021#define GEN8_DE_IRQS 0x01ff0000
4022#define GEN8_RSVD_IRQS 0xB700ff00
4023
4024#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
4025#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
4026#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
4027#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
4028
4029#define GEN8_BCS_IRQ_SHIFT 16
4030#define GEN8_RCS_IRQ_SHIFT 0
4031#define GEN8_VCS2_IRQ_SHIFT 16
4032#define GEN8_VCS1_IRQ_SHIFT 0
4033#define GEN8_VECS_IRQ_SHIFT 0
4034
4035#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
4036#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
4037#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
4038#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
4039#define GEN8_PIPE_UNDERRUN (1 << 31)
4040#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
4041#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
4042#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
4043#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
4044#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
4045#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
4046#define GEN8_PIPE_FLIP_DONE (1 << 4)
4047#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4048#define GEN8_PIPE_VSYNC (1 << 1)
4049#define GEN8_PIPE_VBLANK (1 << 0)
4050#define GEN8_DE_PIPE_IRQ_ERRORS (GEN8_PIPE_UNDERRUN | \
4051 GEN8_PIPE_CDCLK_CRC_ERROR | \
4052 GEN8_PIPE_CURSOR_FAULT | \
4053 GEN8_PIPE_SPRITE_FAULT | \
4054 GEN8_PIPE_PRIMARY_FAULT)
4055
4056#define GEN8_DE_PORT_ISR 0x44440
4057#define GEN8_DE_PORT_IMR 0x44444
4058#define GEN8_DE_PORT_IIR 0x44448
4059#define GEN8_DE_PORT_IER 0x4444c
4060#define _PORT_DP_A_HOTPLUG (1 << 3)
4061
4062#define GEN8_DE_MISC_ISR 0x44460
4063#define GEN8_DE_MISC_IMR 0x44464
4064#define GEN8_DE_MISC_IIR 0x44468
4065#define GEN8_DE_MISC_IER 0x4446c
4066#define GEN8_DE_MISC_GSE (1 << 27)
4067
4068#define GEN8_PCU_ISR 0x444e0
4069#define GEN8_PCU_IMR 0x444e4
4070#define GEN8_PCU_IIR 0x444e8
4071#define GEN8_PCU_IER 0x444ec
4072
4005#define ILK_DISPLAY_CHICKEN2 0x42004 4073#define ILK_DISPLAY_CHICKEN2 0x42004
4006/* Required on all Ironlake and Sandybridge according to the B-Spec. */ 4074/* Required on all Ironlake and Sandybridge according to the B-Spec. */
4007#define ILK_ELPIN_409_SELECT (1 << 25) 4075#define ILK_ELPIN_409_SELECT (1 << 25)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2dec134f75eb..2fda12607b78 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1066,6 +1066,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1066 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1066 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1067} 1067}
1068 1068
1069static bool
1070gen8_ring_get_irq(struct intel_ring_buffer *ring)
1071{
1072 struct drm_device *dev = ring->dev;
1073 struct drm_i915_private *dev_priv = dev->dev_private;
1074 unsigned long flags;
1075
1076 if (!dev->irq_enabled)
1077 return false;
1078
1079 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1080 if (ring->irq_refcount++ == 0) {
1081 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1082 I915_WRITE_IMR(ring,
1083 ~(ring->irq_enable_mask |
1084 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1085 } else {
1086 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1087 }
1088 POSTING_READ(RING_IMR(ring->mmio_base));
1089 }
1090 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1091
1092 return true;
1093}
1094
1095static void
1096gen8_ring_put_irq(struct intel_ring_buffer *ring)
1097{
1098 struct drm_device *dev = ring->dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private;
1100 unsigned long flags;
1101
1102 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1103 if (--ring->irq_refcount == 0) {
1104 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1105 I915_WRITE_IMR(ring,
1106 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1107 } else {
1108 I915_WRITE_IMR(ring, ~0);
1109 }
1110 POSTING_READ(RING_IMR(ring->mmio_base));
1111 }
1112 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1113}
1114
1069static int 1115static int
1070i965_dispatch_execbuffer(struct intel_ring_buffer *ring, 1116i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1071 u32 offset, u32 length, 1117 u32 offset, u32 length,
@@ -1732,8 +1778,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1732 ring->flush = gen7_render_ring_flush; 1778 ring->flush = gen7_render_ring_flush;
1733 if (INTEL_INFO(dev)->gen == 6) 1779 if (INTEL_INFO(dev)->gen == 6)
1734 ring->flush = gen6_render_ring_flush; 1780 ring->flush = gen6_render_ring_flush;
1735 ring->irq_get = gen6_ring_get_irq; 1781 if (INTEL_INFO(dev)->gen >= 8) {
1736 ring->irq_put = gen6_ring_put_irq; 1782 ring->irq_get = gen8_ring_get_irq;
1783 ring->irq_put = gen8_ring_put_irq;
1784 } else {
1785 ring->irq_get = gen6_ring_get_irq;
1786 ring->irq_put = gen6_ring_put_irq;
1787 }
1737 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1788 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1738 ring->get_seqno = gen6_ring_get_seqno; 1789 ring->get_seqno = gen6_ring_get_seqno;
1739 ring->set_seqno = ring_set_seqno; 1790 ring->set_seqno = ring_set_seqno;
@@ -1897,9 +1948,16 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1897 ring->add_request = gen6_add_request; 1948 ring->add_request = gen6_add_request;
1898 ring->get_seqno = gen6_ring_get_seqno; 1949 ring->get_seqno = gen6_ring_get_seqno;
1899 ring->set_seqno = ring_set_seqno; 1950 ring->set_seqno = ring_set_seqno;
1900 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1951 if (INTEL_INFO(dev)->gen >= 8) {
1901 ring->irq_get = gen6_ring_get_irq; 1952 ring->irq_enable_mask =
1902 ring->irq_put = gen6_ring_put_irq; 1953 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1954 ring->irq_get = gen8_ring_get_irq;
1955 ring->irq_put = gen8_ring_put_irq;
1956 } else {
1957 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1958 ring->irq_get = gen6_ring_get_irq;
1959 ring->irq_put = gen6_ring_put_irq;
1960 }
1903 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1961 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1904 ring->sync_to = gen6_ring_sync; 1962 ring->sync_to = gen6_ring_sync;
1905 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; 1963 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
@@ -1946,9 +2004,16 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1946 ring->add_request = gen6_add_request; 2004 ring->add_request = gen6_add_request;
1947 ring->get_seqno = gen6_ring_get_seqno; 2005 ring->get_seqno = gen6_ring_get_seqno;
1948 ring->set_seqno = ring_set_seqno; 2006 ring->set_seqno = ring_set_seqno;
1949 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2007 if (INTEL_INFO(dev)->gen >= 8) {
1950 ring->irq_get = gen6_ring_get_irq; 2008 ring->irq_enable_mask =
1951 ring->irq_put = gen6_ring_put_irq; 2009 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2010 ring->irq_get = gen8_ring_get_irq;
2011 ring->irq_put = gen8_ring_put_irq;
2012 } else {
2013 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2014 ring->irq_get = gen6_ring_get_irq;
2015 ring->irq_put = gen6_ring_put_irq;
2016 }
1952 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2017 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1953 ring->sync_to = gen6_ring_sync; 2018 ring->sync_to = gen6_ring_sync;
1954 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; 2019 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
@@ -1978,10 +2043,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
1978 ring->add_request = gen6_add_request; 2043 ring->add_request = gen6_add_request;
1979 ring->get_seqno = gen6_ring_get_seqno; 2044 ring->get_seqno = gen6_ring_get_seqno;
1980 ring->set_seqno = ring_set_seqno; 2045 ring->set_seqno = ring_set_seqno;
1981 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1982 ring->irq_get = hsw_vebox_get_irq;
1983 ring->irq_put = hsw_vebox_put_irq;
1984 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2046 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2047
2048 if (INTEL_INFO(dev)->gen >= 8) {
2049 ring->irq_enable_mask =
2050 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT) |
2051 GT_RENDER_CS_MASTER_ERROR_INTERRUPT;
2052 ring->irq_get = gen8_ring_get_irq;
2053 ring->irq_put = gen8_ring_put_irq;
2054 } else {
2055 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2056 ring->irq_get = hsw_vebox_get_irq;
2057 ring->irq_put = hsw_vebox_put_irq;
2058 }
1985 ring->sync_to = gen6_ring_sync; 2059 ring->sync_to = gen6_ring_sync;
1986 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; 2060 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1987 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; 2061 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;