diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/agp/intel-agp.h | 1 | ||||
-rw-r--r-- | drivers/char/agp/intel-gtt.c | 56 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 93 |
5 files changed, 126 insertions, 46 deletions
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c195bfeade11..5feebe2800e9 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -130,6 +130,7 @@ | |||
130 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | 130 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) |
131 | 131 | ||
132 | #define I915_IFPADDR 0x60 | 132 | #define I915_IFPADDR 0x60 |
133 | #define I830_HIC 0x70 | ||
133 | 134 | ||
134 | /* Intel 965G registers */ | 135 | /* Intel 965G registers */ |
135 | #define I965_MSAC 0x62 | 136 | #define I965_MSAC 0x62 |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fab3d3265adb..0d09b537bb9a 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include <linux/agp_backend.h> | 23 | #include <linux/agp_backend.h> |
24 | #include <linux/delay.h> | ||
24 | #include <asm/smp.h> | 25 | #include <asm/smp.h> |
25 | #include "agp.h" | 26 | #include "agp.h" |
26 | #include "intel-agp.h" | 27 | #include "intel-agp.h" |
@@ -70,12 +71,8 @@ static struct _intel_private { | |||
70 | u32 __iomem *gtt; /* I915G */ | 71 | u32 __iomem *gtt; /* I915G */ |
71 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ | 72 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
72 | int num_dcache_entries; | 73 | int num_dcache_entries; |
73 | union { | 74 | void __iomem *i9xx_flush_page; |
74 | void __iomem *i9xx_flush_page; | ||
75 | void *i8xx_flush_page; | ||
76 | }; | ||
77 | char *i81x_gtt_table; | 75 | char *i81x_gtt_table; |
78 | struct page *i8xx_page; | ||
79 | struct resource ifp_resource; | 76 | struct resource ifp_resource; |
80 | int resource_valid; | 77 | int resource_valid; |
81 | struct page *scratch_page; | 78 | struct page *scratch_page; |
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void) | |||
722 | 719 | ||
723 | static void i830_cleanup(void) | 720 | static void i830_cleanup(void) |
724 | { | 721 | { |
725 | if (intel_private.i8xx_flush_page) { | ||
726 | kunmap(intel_private.i8xx_flush_page); | ||
727 | intel_private.i8xx_flush_page = NULL; | ||
728 | } | ||
729 | |||
730 | __free_page(intel_private.i8xx_page); | ||
731 | intel_private.i8xx_page = NULL; | ||
732 | } | ||
733 | |||
734 | static void intel_i830_setup_flush(void) | ||
735 | { | ||
736 | /* return if we've already set the flush mechanism up */ | ||
737 | if (intel_private.i8xx_page) | ||
738 | return; | ||
739 | |||
740 | intel_private.i8xx_page = alloc_page(GFP_KERNEL); | ||
741 | if (!intel_private.i8xx_page) | ||
742 | return; | ||
743 | |||
744 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
745 | if (!intel_private.i8xx_flush_page) | ||
746 | i830_cleanup(); | ||
747 | } | 722 | } |
748 | 723 | ||
749 | /* The chipset_flush interface needs to get data that has already been | 724 | /* The chipset_flush interface needs to get data that has already been |
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void) | |||
758 | */ | 733 | */ |
759 | static void i830_chipset_flush(void) | 734 | static void i830_chipset_flush(void) |
760 | { | 735 | { |
761 | unsigned int *pg = intel_private.i8xx_flush_page; | 736 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
737 | |||
738 | /* Forcibly evict everything from the CPU write buffers. | ||
739 | * clflush appears to be insufficient. | ||
740 | */ | ||
741 | wbinvd_on_all_cpus(); | ||
742 | |||
743 | /* Now we've only seen documents for this magic bit on 855GM, | ||
744 | * we hope it exists for the other gen2 chipsets... | ||
745 | * | ||
746 | * Also works as advertised on my 845G. | ||
747 | */ | ||
748 | writel(readl(intel_private.registers+I830_HIC) | (1<<31), | ||
749 | intel_private.registers+I830_HIC); | ||
762 | 750 | ||
763 | memset(pg, 0, 1024); | 751 | while (readl(intel_private.registers+I830_HIC) & (1<<31)) { |
752 | if (time_after(jiffies, timeout)) | ||
753 | break; | ||
764 | 754 | ||
765 | if (cpu_has_clflush) | 755 | udelay(50); |
766 | clflush_cache_range(pg, 1024); | 756 | } |
767 | else if (wbinvd_on_all_cpus() != 0) | ||
768 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
769 | } | 757 | } |
770 | 758 | ||
771 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, | 759 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
@@ -849,8 +837,6 @@ static int i830_setup(void) | |||
849 | 837 | ||
850 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; | 838 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; |
851 | 839 | ||
852 | intel_i830_setup_flush(); | ||
853 | |||
854 | return 0; | 840 | return 0; |
855 | } | 841 | } |
856 | 842 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9932c5..79a04fde69b5 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
184 | static bool | 184 | static bool |
185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
186 | { | 186 | { |
187 | int tile_width; | 187 | int tile_width, tile_height; |
188 | 188 | ||
189 | /* Linear is always fine */ | 189 | /* Linear is always fine */ |
190 | if (tiling_mode == I915_TILING_NONE) | 190 | if (tiling_mode == I915_TILING_NONE) |
@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
215 | } | 215 | } |
216 | } | 216 | } |
217 | 217 | ||
218 | if (IS_GEN2(dev) || | ||
219 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) | ||
220 | tile_height = 32; | ||
221 | else | ||
222 | tile_height = 8; | ||
223 | /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even | ||
224 | * number of tile rows. */ | ||
225 | if (IS_GEN2(dev)) | ||
226 | tile_height *= 2; | ||
227 | |||
228 | /* Size needs to be aligned to a full tile row */ | ||
229 | if (size & (tile_height * stride - 1)) | ||
230 | return false; | ||
231 | |||
218 | /* 965+ just needs multiples of tile width */ | 232 | /* 965+ just needs multiples of tile width */ |
219 | if (INTEL_INFO(dev)->gen >= 4) { | 233 | if (INTEL_INFO(dev)->gen >= 4) { |
220 | if (stride & (tile_width - 1)) | 234 | if (stride & (tile_width - 1)) |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 97f946dcc1aa..8a9e08bf1cf7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
316 | struct drm_mode_config *mode_config = &dev->mode_config; | 316 | struct drm_mode_config *mode_config = &dev->mode_config; |
317 | struct intel_encoder *encoder; | 317 | struct intel_encoder *encoder; |
318 | 318 | ||
319 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | ||
320 | |||
319 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 321 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
320 | if (encoder->hot_plug) | 322 | if (encoder->hot_plug) |
321 | encoder->hot_plug(encoder); | 323 | encoder->hot_plug(encoder); |
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1649 | } else { | 1651 | } else { |
1650 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1652 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1651 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1653 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1652 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | 1654 | hotplug_mask |= SDE_AUX_MASK; |
1653 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1654 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1657 | dev_priv->pch_irq_mask = ~hotplug_mask; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b006536b3d2..9ca1bb2554fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1631 | 1631 | ||
1632 | wait_event(dev_priv->pending_flip_queue, | 1632 | wait_event(dev_priv->pending_flip_queue, |
1633 | atomic_read(&dev_priv->mm.wedged) || | ||
1633 | atomic_read(&obj->pending_flip) == 0); | 1634 | atomic_read(&obj->pending_flip) == 0); |
1634 | 1635 | ||
1635 | /* Big Hammer, we also need to ensure that any pending | 1636 | /* Big Hammer, we also need to ensure that any pending |
1636 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 1637 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1637 | * current scanout is retired before unpinning the old | 1638 | * current scanout is retired before unpinning the old |
1638 | * framebuffer. | 1639 | * framebuffer. |
1640 | * | ||
1641 | * This should only fail upon a hung GPU, in which case we | ||
1642 | * can safely continue. | ||
1639 | */ | 1643 | */ |
1640 | ret = i915_gem_object_flush_gpu(obj, false); | 1644 | ret = i915_gem_object_flush_gpu(obj, false); |
1641 | if (ret) { | 1645 | (void) ret; |
1642 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
1643 | mutex_unlock(&dev->struct_mutex); | ||
1644 | return ret; | ||
1645 | } | ||
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2045 | atomic_read(&obj->pending_flip) == 0); | 2045 | atomic_read(&obj->pending_flip) == 0); |
2046 | } | 2046 | } |
2047 | 2047 | ||
2048 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | ||
2049 | { | ||
2050 | struct drm_device *dev = crtc->dev; | ||
2051 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
2052 | struct intel_encoder *encoder; | ||
2053 | |||
2054 | /* | ||
2055 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | ||
2056 | * must be driven by its own crtc; no sharing is possible. | ||
2057 | */ | ||
2058 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
2059 | if (encoder->base.crtc != crtc) | ||
2060 | continue; | ||
2061 | |||
2062 | switch (encoder->type) { | ||
2063 | case INTEL_OUTPUT_EDP: | ||
2064 | if (!intel_encoder_is_pch_edp(&encoder->base)) | ||
2065 | return false; | ||
2066 | continue; | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2070 | return true; | ||
2071 | } | ||
2072 | |||
2048 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2073 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2049 | { | 2074 | { |
2050 | struct drm_device *dev = crtc->dev; | 2075 | struct drm_device *dev = crtc->dev; |
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2053 | int pipe = intel_crtc->pipe; | 2078 | int pipe = intel_crtc->pipe; |
2054 | int plane = intel_crtc->plane; | 2079 | int plane = intel_crtc->plane; |
2055 | u32 reg, temp; | 2080 | u32 reg, temp; |
2081 | bool is_pch_port = false; | ||
2056 | 2082 | ||
2057 | if (intel_crtc->active) | 2083 | if (intel_crtc->active) |
2058 | return; | 2084 | return; |
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2066 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | 2092 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
2067 | } | 2093 | } |
2068 | 2094 | ||
2069 | ironlake_fdi_enable(crtc); | 2095 | is_pch_port = intel_crtc_driving_pch(crtc); |
2096 | |||
2097 | if (is_pch_port) | ||
2098 | ironlake_fdi_enable(crtc); | ||
2099 | else { | ||
2100 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2101 | reg = FDI_TX_CTL(pipe); | ||
2102 | temp = I915_READ(reg); | ||
2103 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2104 | POSTING_READ(reg); | ||
2105 | |||
2106 | reg = FDI_RX_CTL(pipe); | ||
2107 | temp = I915_READ(reg); | ||
2108 | temp &= ~(0x7 << 16); | ||
2109 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2110 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2111 | |||
2112 | POSTING_READ(reg); | ||
2113 | udelay(100); | ||
2114 | |||
2115 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2116 | if (HAS_PCH_IBX(dev)) | ||
2117 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2118 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2119 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2120 | |||
2121 | /* still set train pattern 1 */ | ||
2122 | reg = FDI_TX_CTL(pipe); | ||
2123 | temp = I915_READ(reg); | ||
2124 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2125 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2126 | I915_WRITE(reg, temp); | ||
2127 | |||
2128 | reg = FDI_RX_CTL(pipe); | ||
2129 | temp = I915_READ(reg); | ||
2130 | if (HAS_PCH_CPT(dev)) { | ||
2131 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2132 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2133 | } else { | ||
2134 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2135 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2136 | } | ||
2137 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2138 | temp &= ~(0x07 << 16); | ||
2139 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2140 | I915_WRITE(reg, temp); | ||
2141 | |||
2142 | POSTING_READ(reg); | ||
2143 | udelay(100); | ||
2144 | } | ||
2070 | 2145 | ||
2071 | /* Enable panel fitting for LVDS */ | 2146 | /* Enable panel fitting for LVDS */ |
2072 | if (dev_priv->pch_pf_size && | 2147 | if (dev_priv->pch_pf_size && |
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2100 | intel_flush_display_plane(dev, plane); | 2175 | intel_flush_display_plane(dev, plane); |
2101 | } | 2176 | } |
2102 | 2177 | ||
2178 | /* Skip the PCH stuff if possible */ | ||
2179 | if (!is_pch_port) | ||
2180 | goto done; | ||
2181 | |||
2103 | /* For PCH output, training FDI link */ | 2182 | /* For PCH output, training FDI link */ |
2104 | if (IS_GEN6(dev)) | 2183 | if (IS_GEN6(dev)) |
2105 | gen6_fdi_link_train(crtc); | 2184 | gen6_fdi_link_train(crtc); |
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2184 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2263 | I915_WRITE(reg, temp | TRANS_ENABLE); |
2185 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2264 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
2186 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 2265 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
2187 | 2266 | done: | |
2188 | intel_crtc_load_lut(crtc); | 2267 | intel_crtc_load_lut(crtc); |
2189 | intel_update_fbc(dev); | 2268 | intel_update_fbc(dev); |
2190 | intel_crtc_update_cursor(crtc, true); | 2269 | intel_crtc_update_cursor(crtc, true); |