aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2014-12-08 11:09:10 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-12-10 11:47:20 -0500
commit7ff0ebcc1e30e3216c8c62ee71f59ac830b10364 (patch)
treef87cf3981ac495f0af773a4a9a0a372980dc4bfd /drivers/gpu
parent15a17aae5f803551981a7acc6a4058b247a7452c (diff)
drm/i915: Move FBC stuff to intel_fbc.c
No functional changes. This is just the begin of a FBC rework. v2 (Paulo): - Revert intel_fbc_init() changed parameter. - Revert set_no_fbc_reason() rename. - Rebase. Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c675
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c651
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
8 files changed, 698 insertions, 669 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e4083e41a600..3cf70a61b44f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -47,6 +47,7 @@ i915-y += intel_renderstate_gen6.o \
47i915-y += intel_audio.o \ 47i915-y += intel_audio.o \
48 intel_bios.o \ 48 intel_bios.o \
49 intel_display.o \ 49 intel_display.o \
50 intel_fbc.o \
50 intel_fifo_underrun.o \ 51 intel_fifo_underrun.o \
51 intel_frontbuffer.o \ 52 intel_frontbuffer.o \
52 intel_modes.o \ 53 intel_modes.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 95dfa2dd35b9..c74dc946cbf6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2963,9 +2963,6 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2963 bool force_restore); 2963 bool force_restore);
2964extern void i915_redisable_vga(struct drm_device *dev); 2964extern void i915_redisable_vga(struct drm_device *dev);
2965extern void i915_redisable_vga_power_on(struct drm_device *dev); 2965extern void i915_redisable_vga_power_on(struct drm_device *dev);
2966extern bool intel_fbc_enabled(struct drm_device *dev);
2967extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
2968extern void intel_disable_fbc(struct drm_device *dev);
2969extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2966extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2970extern void intel_init_pch_refclk(struct drm_device *dev); 2967extern void intel_init_pch_refclk(struct drm_device *dev);
2971extern void gen6_set_rps(struct drm_device *dev, u8 val); 2968extern void gen6_set_rps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index dfe661743398..1e4999dd3ed5 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -264,7 +264,7 @@ static void i915_restore_display(struct drm_device *dev)
264 } 264 }
265 265
266 /* only restore FBC info on the platform that supports FBC*/ 266 /* only restore FBC info on the platform that supports FBC*/
267 intel_disable_fbc(dev); 267 intel_fbc_disable(dev);
268 268
269 /* restore FBC interval */ 269 /* restore FBC interval */
270 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) 270 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d5153a4f90fe..841af6c1f50b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4201,7 +4201,7 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4201 hsw_enable_ips(intel_crtc); 4201 hsw_enable_ips(intel_crtc);
4202 4202
4203 mutex_lock(&dev->struct_mutex); 4203 mutex_lock(&dev->struct_mutex);
4204 intel_update_fbc(dev); 4204 intel_fbc_update(dev);
4205 mutex_unlock(&dev->struct_mutex); 4205 mutex_unlock(&dev->struct_mutex);
4206 4206
4207 /* 4207 /*
@@ -4223,7 +4223,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4223 intel_crtc_wait_for_pending_flips(crtc); 4223 intel_crtc_wait_for_pending_flips(crtc);
4224 4224
4225 if (dev_priv->fbc.plane == plane) 4225 if (dev_priv->fbc.plane == plane)
4226 intel_disable_fbc(dev); 4226 intel_fbc_disable(dev);
4227 4227
4228 hsw_disable_ips(intel_crtc); 4228 hsw_disable_ips(intel_crtc);
4229 4229
@@ -4527,7 +4527,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4527 intel_update_watermarks(crtc); 4527 intel_update_watermarks(crtc);
4528 4528
4529 mutex_lock(&dev->struct_mutex); 4529 mutex_lock(&dev->struct_mutex);
4530 intel_update_fbc(dev); 4530 intel_fbc_update(dev);
4531 mutex_unlock(&dev->struct_mutex); 4531 mutex_unlock(&dev->struct_mutex);
4532} 4532}
4533 4533
@@ -4584,7 +4584,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4584 intel_update_watermarks(crtc); 4584 intel_update_watermarks(crtc);
4585 4585
4586 mutex_lock(&dev->struct_mutex); 4586 mutex_lock(&dev->struct_mutex);
4587 intel_update_fbc(dev); 4587 intel_fbc_update(dev);
4588 mutex_unlock(&dev->struct_mutex); 4588 mutex_unlock(&dev->struct_mutex);
4589 4589
4590 if (intel_crtc_to_shared_dpll(intel_crtc)) 4590 if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -5189,7 +5189,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
5189 intel_update_watermarks(crtc); 5189 intel_update_watermarks(crtc);
5190 5190
5191 mutex_lock(&dev->struct_mutex); 5191 mutex_lock(&dev->struct_mutex);
5192 intel_update_fbc(dev); 5192 intel_fbc_update(dev);
5193 mutex_unlock(&dev->struct_mutex); 5193 mutex_unlock(&dev->struct_mutex);
5194} 5194}
5195 5195
@@ -8950,7 +8950,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
8950 drm_gem_object_unreference(&work->pending_flip_obj->base); 8950 drm_gem_object_unreference(&work->pending_flip_obj->base);
8951 drm_gem_object_unreference(&work->old_fb_obj->base); 8951 drm_gem_object_unreference(&work->old_fb_obj->base);
8952 8952
8953 intel_update_fbc(dev); 8953 intel_fbc_update(dev);
8954 8954
8955 if (work->flip_queued_req) 8955 if (work->flip_queued_req)
8956 i915_gem_request_assign(&work->flip_queued_req, NULL); 8956 i915_gem_request_assign(&work->flip_queued_req, NULL);
@@ -9747,7 +9747,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9747 i915_gem_track_fb(work->old_fb_obj, obj, 9747 i915_gem_track_fb(work->old_fb_obj, obj,
9748 INTEL_FRONTBUFFER_PRIMARY(pipe)); 9748 INTEL_FRONTBUFFER_PRIMARY(pipe));
9749 9749
9750 intel_disable_fbc(dev); 9750 intel_fbc_disable(dev);
9751 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 9751 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9752 mutex_unlock(&dev->struct_mutex); 9752 mutex_unlock(&dev->struct_mutex);
9753 9753
@@ -11816,7 +11816,7 @@ intel_commit_primary_plane(struct drm_plane *plane,
11816 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11816 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11817 dev_priv->fbc.plane == intel_crtc->plane && 11817 dev_priv->fbc.plane == intel_crtc->plane &&
11818 intel_plane->rotation != BIT(DRM_ROTATE_0)) { 11818 intel_plane->rotation != BIT(DRM_ROTATE_0)) {
11819 intel_disable_fbc(dev); 11819 intel_fbc_disable(dev);
11820 } 11820 }
11821 11821
11822 if (state->visible) { 11822 if (state->visible) {
@@ -11851,7 +11851,7 @@ intel_commit_primary_plane(struct drm_plane *plane,
11851 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 11851 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
11852 11852
11853 mutex_lock(&dev->struct_mutex); 11853 mutex_lock(&dev->struct_mutex);
11854 intel_update_fbc(dev); 11854 intel_fbc_update(dev);
11855 mutex_unlock(&dev->struct_mutex); 11855 mutex_unlock(&dev->struct_mutex);
11856 } 11856 }
11857} 11857}
@@ -13050,7 +13050,7 @@ void intel_modeset_init(struct drm_device *dev)
13050 intel_setup_outputs(dev); 13050 intel_setup_outputs(dev);
13051 13051
13052 /* Just in case the BIOS is doing something questionable. */ 13052 /* Just in case the BIOS is doing something questionable. */
13053 intel_disable_fbc(dev); 13053 intel_fbc_disable(dev);
13054 13054
13055 drm_modeset_lock_all(dev); 13055 drm_modeset_lock_all(dev);
13056 intel_modeset_setup_hw_state(dev, false); 13056 intel_modeset_setup_hw_state(dev, false);
@@ -13567,7 +13567,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
13567 13567
13568 intel_unregister_dsm_handler(); 13568 intel_unregister_dsm_handler();
13569 13569
13570 intel_disable_fbc(dev); 13570 intel_fbc_disable(dev);
13571 13571
13572 ironlake_teardown_rc6(dev); 13572 ironlake_teardown_rc6(dev);
13573 13573
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 61a88fa69978..588b618ab668 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1063,6 +1063,13 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
1063} 1063}
1064#endif 1064#endif
1065 1065
1066/* intel_fbc.c */
1067bool intel_fbc_enabled(struct drm_device *dev);
1068void intel_fbc_update(struct drm_device *dev);
1069void intel_fbc_init(struct drm_i915_private *dev_priv);
1070void intel_fbc_disable(struct drm_device *dev);
1071void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
1072
1066/* intel_hdmi.c */ 1073/* intel_hdmi.c */
1067void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); 1074void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
1068void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 1075void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1169,8 +1176,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
1169 bool enabled, bool scaled); 1176 bool enabled, bool scaled);
1170void intel_init_pm(struct drm_device *dev); 1177void intel_init_pm(struct drm_device *dev);
1171void intel_pm_setup(struct drm_device *dev); 1178void intel_pm_setup(struct drm_device *dev);
1172bool intel_fbc_enabled(struct drm_device *dev);
1173void intel_update_fbc(struct drm_device *dev);
1174void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1179void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1175void intel_gpu_ips_teardown(void); 1180void intel_gpu_ips_teardown(void);
1176void intel_init_gt_powersave(struct drm_device *dev); 1181void intel_init_gt_powersave(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
new file mode 100644
index 000000000000..f1eeb86a3d1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -0,0 +1,675 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25#include "i915_drv.h"
26
27/* FBC, or Frame Buffer Compression, is a technique employed to compress the
28 * framebuffer contents in-memory, aiming at reducing the required bandwidth
29 * during in-memory transfers and, therefore, reduce the power packet.
30 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns.
33 *
34 * FBC-related functionality can be enabled by the means of the
35 * i915.i915_fbc_enable parameter
36 */
37
38static void i8xx_fbc_disable(struct drm_device *dev)
39{
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u32 fbc_ctl;
42
43 dev_priv->fbc.enabled = false;
44
45 /* Disable compression */
46 fbc_ctl = I915_READ(FBC_CONTROL);
47 if ((fbc_ctl & FBC_CTL_EN) == 0)
48 return;
49
50 fbc_ctl &= ~FBC_CTL_EN;
51 I915_WRITE(FBC_CONTROL, fbc_ctl);
52
53 /* Wait for compressing bit to clear */
54 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
55 DRM_DEBUG_KMS("FBC idle timed out\n");
56 return;
57 }
58
59 DRM_DEBUG_KMS("disabled FBC\n");
60}
61
62static void i8xx_fbc_enable(struct drm_crtc *crtc)
63{
64 struct drm_device *dev = crtc->dev;
65 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct drm_framebuffer *fb = crtc->primary->fb;
67 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
68 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
69 int cfb_pitch;
70 int i;
71 u32 fbc_ctl;
72
73 dev_priv->fbc.enabled = true;
74
75 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
76 if (fb->pitches[0] < cfb_pitch)
77 cfb_pitch = fb->pitches[0];
78
79 /* FBC_CTL wants 32B or 64B units */
80 if (IS_GEN2(dev))
81 cfb_pitch = (cfb_pitch / 32) - 1;
82 else
83 cfb_pitch = (cfb_pitch / 64) - 1;
84
85 /* Clear old tags */
86 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
87 I915_WRITE(FBC_TAG + (i * 4), 0);
88
89 if (IS_GEN4(dev)) {
90 u32 fbc_ctl2;
91
92 /* Set it up... */
93 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
94 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
95 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
96 I915_WRITE(FBC_FENCE_OFF, crtc->y);
97 }
98
99 /* enable it... */
100 fbc_ctl = I915_READ(FBC_CONTROL);
101 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
102 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
103 if (IS_I945GM(dev))
104 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
105 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
106 fbc_ctl |= obj->fence_reg;
107 I915_WRITE(FBC_CONTROL, fbc_ctl);
108
109 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
110 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
111}
112
113static bool i8xx_fbc_enabled(struct drm_device *dev)
114{
115 struct drm_i915_private *dev_priv = dev->dev_private;
116
117 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
118}
119
120static void g4x_fbc_enable(struct drm_crtc *crtc)
121{
122 struct drm_device *dev = crtc->dev;
123 struct drm_i915_private *dev_priv = dev->dev_private;
124 struct drm_framebuffer *fb = crtc->primary->fb;
125 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
127 u32 dpfc_ctl;
128
129 dev_priv->fbc.enabled = true;
130
131 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
132 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
133 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
134 else
135 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
136 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
137
138 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
139
140 /* enable it... */
141 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
142
143 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
144}
145
146static void g4x_fbc_disable(struct drm_device *dev)
147{
148 struct drm_i915_private *dev_priv = dev->dev_private;
149 u32 dpfc_ctl;
150
151 dev_priv->fbc.enabled = false;
152
153 /* Disable compression */
154 dpfc_ctl = I915_READ(DPFC_CONTROL);
155 if (dpfc_ctl & DPFC_CTL_EN) {
156 dpfc_ctl &= ~DPFC_CTL_EN;
157 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
158
159 DRM_DEBUG_KMS("disabled FBC\n");
160 }
161}
162
163static bool g4x_fbc_enabled(struct drm_device *dev)
164{
165 struct drm_i915_private *dev_priv = dev->dev_private;
166
167 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
168}
169
170static void snb_fbc_blit_update(struct drm_device *dev)
171{
172 struct drm_i915_private *dev_priv = dev->dev_private;
173 u32 blt_ecoskpd;
174
175 /* Make sure blitter notifies FBC of writes */
176
177 /* Blitter is part of Media powerwell on VLV. No impact of
178 * his param in other platforms for now */
179 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
180
181 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
182 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
183 GEN6_BLITTER_LOCK_SHIFT;
184 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
185 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
186 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
187 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
188 GEN6_BLITTER_LOCK_SHIFT);
189 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
190 POSTING_READ(GEN6_BLITTER_ECOSKPD);
191
192 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
193}
194
195static void ilk_fbc_enable(struct drm_crtc *crtc)
196{
197 struct drm_device *dev = crtc->dev;
198 struct drm_i915_private *dev_priv = dev->dev_private;
199 struct drm_framebuffer *fb = crtc->primary->fb;
200 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
202 u32 dpfc_ctl;
203
204 dev_priv->fbc.enabled = true;
205
206 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
207 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
208 dev_priv->fbc.threshold++;
209
210 switch (dev_priv->fbc.threshold) {
211 case 4:
212 case 3:
213 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
214 break;
215 case 2:
216 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
217 break;
218 case 1:
219 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
220 break;
221 }
222 dpfc_ctl |= DPFC_CTL_FENCE_EN;
223 if (IS_GEN5(dev))
224 dpfc_ctl |= obj->fence_reg;
225
226 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
227 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
228 /* enable it... */
229 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
230
231 if (IS_GEN6(dev)) {
232 I915_WRITE(SNB_DPFC_CTL_SA,
233 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
234 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
235 snb_fbc_blit_update(dev);
236 }
237
238 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
239}
240
241static void ilk_fbc_disable(struct drm_device *dev)
242{
243 struct drm_i915_private *dev_priv = dev->dev_private;
244 u32 dpfc_ctl;
245
246 dev_priv->fbc.enabled = false;
247
248 /* Disable compression */
249 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
250 if (dpfc_ctl & DPFC_CTL_EN) {
251 dpfc_ctl &= ~DPFC_CTL_EN;
252 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
253
254 DRM_DEBUG_KMS("disabled FBC\n");
255 }
256}
257
258static bool ilk_fbc_enabled(struct drm_device *dev)
259{
260 struct drm_i915_private *dev_priv = dev->dev_private;
261
262 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
263}
264
265static void gen7_fbc_enable(struct drm_crtc *crtc)
266{
267 struct drm_device *dev = crtc->dev;
268 struct drm_i915_private *dev_priv = dev->dev_private;
269 struct drm_framebuffer *fb = crtc->primary->fb;
270 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
271 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
272 u32 dpfc_ctl;
273
274 dev_priv->fbc.enabled = true;
275
276 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
277 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
278 dev_priv->fbc.threshold++;
279
280 switch (dev_priv->fbc.threshold) {
281 case 4:
282 case 3:
283 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
284 break;
285 case 2:
286 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
287 break;
288 case 1:
289 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
290 break;
291 }
292
293 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
294
295 if (dev_priv->fbc.false_color)
296 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
297
298 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
299
300 if (IS_IVYBRIDGE(dev)) {
301 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
302 I915_WRITE(ILK_DISPLAY_CHICKEN1,
303 I915_READ(ILK_DISPLAY_CHICKEN1) |
304 ILK_FBCQ_DIS);
305 } else {
306 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
307 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
308 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
309 HSW_FBCQ_DIS);
310 }
311
312 I915_WRITE(SNB_DPFC_CTL_SA,
313 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
314 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
315
316 snb_fbc_blit_update(dev);
317
318 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
319}
320
321bool intel_fbc_enabled(struct drm_device *dev)
322{
323 struct drm_i915_private *dev_priv = dev->dev_private;
324
325 return dev_priv->fbc.enabled;
326}
327
328void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
329{
330 struct drm_i915_private *dev_priv = dev->dev_private;
331
332 if (!IS_GEN8(dev))
333 return;
334
335 if (!intel_fbc_enabled(dev))
336 return;
337
338 I915_WRITE(MSG_FBC_REND_STATE, value);
339}
340
341static void intel_fbc_work_fn(struct work_struct *__work)
342{
343 struct intel_fbc_work *work =
344 container_of(to_delayed_work(__work),
345 struct intel_fbc_work, work);
346 struct drm_device *dev = work->crtc->dev;
347 struct drm_i915_private *dev_priv = dev->dev_private;
348
349 mutex_lock(&dev->struct_mutex);
350 if (work == dev_priv->fbc.fbc_work) {
351 /* Double check that we haven't switched fb without cancelling
352 * the prior work.
353 */
354 if (work->crtc->primary->fb == work->fb) {
355 dev_priv->display.enable_fbc(work->crtc);
356
357 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
358 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
359 dev_priv->fbc.y = work->crtc->y;
360 }
361
362 dev_priv->fbc.fbc_work = NULL;
363 }
364 mutex_unlock(&dev->struct_mutex);
365
366 kfree(work);
367}
368
369static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
370{
371 if (dev_priv->fbc.fbc_work == NULL)
372 return;
373
374 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
375
376 /* Synchronisation is provided by struct_mutex and checking of
377 * dev_priv->fbc.fbc_work, so we can perform the cancellation
378 * entirely asynchronously.
379 */
380 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
381 /* tasklet was killed before being run, clean up */
382 kfree(dev_priv->fbc.fbc_work);
383
384 /* Mark the work as no longer wanted so that if it does
385 * wake-up (because the work was already running and waiting
386 * for our mutex), it will discover that is no longer
387 * necessary to run.
388 */
389 dev_priv->fbc.fbc_work = NULL;
390}
391
392static void intel_fbc_enable(struct drm_crtc *crtc)
393{
394 struct intel_fbc_work *work;
395 struct drm_device *dev = crtc->dev;
396 struct drm_i915_private *dev_priv = dev->dev_private;
397
398 if (!dev_priv->display.enable_fbc)
399 return;
400
401 intel_fbc_cancel_work(dev_priv);
402
403 work = kzalloc(sizeof(*work), GFP_KERNEL);
404 if (work == NULL) {
405 DRM_ERROR("Failed to allocate FBC work structure\n");
406 dev_priv->display.enable_fbc(crtc);
407 return;
408 }
409
410 work->crtc = crtc;
411 work->fb = crtc->primary->fb;
412 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
413
414 dev_priv->fbc.fbc_work = work;
415
416 /* Delay the actual enabling to let pageflipping cease and the
417 * display to settle before starting the compression. Note that
418 * this delay also serves a second purpose: it allows for a
419 * vblank to pass after disabling the FBC before we attempt
420 * to modify the control registers.
421 *
422 * A more complicated solution would involve tracking vblanks
423 * following the termination of the page-flipping sequence
424 * and indeed performing the enable as a co-routine and not
425 * waiting synchronously upon the vblank.
426 *
427 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
428 */
429 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
430}
431
432void intel_fbc_disable(struct drm_device *dev)
433{
434 struct drm_i915_private *dev_priv = dev->dev_private;
435
436 intel_fbc_cancel_work(dev_priv);
437
438 if (!dev_priv->display.disable_fbc)
439 return;
440
441 dev_priv->display.disable_fbc(dev);
442 dev_priv->fbc.plane = -1;
443}
444
445static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
446 enum no_fbc_reason reason)
447{
448 if (dev_priv->fbc.no_fbc_reason == reason)
449 return false;
450
451 dev_priv->fbc.no_fbc_reason = reason;
452 return true;
453}
454
455/**
456 * intel_fbc_update - enable/disable FBC as needed
457 * @dev: the drm_device
458 *
459 * Set up the framebuffer compression hardware at mode set time. We
460 * enable it if possible:
461 * - plane A only (on pre-965)
462 * - no pixel mulitply/line duplication
463 * - no alpha buffer discard
464 * - no dual wide
465 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
466 *
467 * We can't assume that any compression will take place (worst case),
468 * so the compressed buffer has to be the same size as the uncompressed
469 * one. It also must reside (along with the line length buffer) in
470 * stolen memory.
471 *
472 * We need to enable/disable FBC on a global basis.
473 */
474void intel_fbc_update(struct drm_device *dev)
475{
476 struct drm_i915_private *dev_priv = dev->dev_private;
477 struct drm_crtc *crtc = NULL, *tmp_crtc;
478 struct intel_crtc *intel_crtc;
479 struct drm_framebuffer *fb;
480 struct drm_i915_gem_object *obj;
481 const struct drm_display_mode *adjusted_mode;
482 unsigned int max_width, max_height;
483
484 if (!HAS_FBC(dev)) {
485 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
486 return;
487 }
488
489 if (!i915.powersave) {
490 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
491 DRM_DEBUG_KMS("fbc disabled per module param\n");
492 return;
493 }
494
495 /*
496 * If FBC is already on, we just have to verify that we can
497 * keep it that way...
498 * Need to disable if:
499 * - more than one pipe is active
500 * - changing FBC params (stride, fence, mode)
501 * - new fb is too large to fit in compressed buffer
502 * - going to an unsupported config (interlace, pixel multiply, etc.)
503 */
504 for_each_crtc(dev, tmp_crtc) {
505 if (intel_crtc_active(tmp_crtc) &&
506 to_intel_crtc(tmp_crtc)->primary_enabled) {
507 if (crtc) {
508 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
509 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
510 goto out_disable;
511 }
512 crtc = tmp_crtc;
513 }
514 }
515
516 if (!crtc || crtc->primary->fb == NULL) {
517 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
518 DRM_DEBUG_KMS("no output, disabling\n");
519 goto out_disable;
520 }
521
522 intel_crtc = to_intel_crtc(crtc);
523 fb = crtc->primary->fb;
524 obj = intel_fb_obj(fb);
525 adjusted_mode = &intel_crtc->config.adjusted_mode;
526
527 if (i915.enable_fbc < 0) {
528 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
529 DRM_DEBUG_KMS("disabled per chip default\n");
530 goto out_disable;
531 }
532 if (!i915.enable_fbc) {
533 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
534 DRM_DEBUG_KMS("fbc disabled per module param\n");
535 goto out_disable;
536 }
537 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
538 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
539 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
540 DRM_DEBUG_KMS("mode incompatible with compression, "
541 "disabling\n");
542 goto out_disable;
543 }
544
545 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
546 max_width = 4096;
547 max_height = 4096;
548 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
549 max_width = 4096;
550 max_height = 2048;
551 } else {
552 max_width = 2048;
553 max_height = 1536;
554 }
555 if (intel_crtc->config.pipe_src_w > max_width ||
556 intel_crtc->config.pipe_src_h > max_height) {
557 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
558 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
559 goto out_disable;
560 }
561 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
562 intel_crtc->plane != PLANE_A) {
563 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
564 DRM_DEBUG_KMS("plane not A, disabling compression\n");
565 goto out_disable;
566 }
567
568 /* The use of a CPU fence is mandatory in order to detect writes
569 * by the CPU to the scanout and trigger updates to the FBC.
570 */
571 if (obj->tiling_mode != I915_TILING_X ||
572 obj->fence_reg == I915_FENCE_REG_NONE) {
573 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
574 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
575 goto out_disable;
576 }
577 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
578 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
579 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
580 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
581 goto out_disable;
582 }
583
584 /* If the kernel debugger is active, always disable compression */
585 if (in_dbg_master())
586 goto out_disable;
587
588 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
589 drm_format_plane_cpp(fb->pixel_format, 0))) {
590 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
591 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
592 goto out_disable;
593 }
594
595 /* If the scanout has not changed, don't modify the FBC settings.
596 * Note that we make the fundamental assumption that the fb->obj
597 * cannot be unpinned (and have its GTT offset and fence revoked)
598 * without first being decoupled from the scanout and FBC disabled.
599 */
600 if (dev_priv->fbc.plane == intel_crtc->plane &&
601 dev_priv->fbc.fb_id == fb->base.id &&
602 dev_priv->fbc.y == crtc->y)
603 return;
604
605 if (intel_fbc_enabled(dev)) {
606 /* We update FBC along two paths, after changing fb/crtc
607 * configuration (modeswitching) and after page-flipping
608 * finishes. For the latter, we know that not only did
609 * we disable the FBC at the start of the page-flip
610 * sequence, but also more than one vblank has passed.
611 *
612 * For the former case of modeswitching, it is possible
613 * to switch between two FBC valid configurations
614 * instantaneously so we do need to disable the FBC
615 * before we can modify its control registers. We also
616 * have to wait for the next vblank for that to take
617 * effect. However, since we delay enabling FBC we can
618 * assume that a vblank has passed since disabling and
619 * that we can safely alter the registers in the deferred
620 * callback.
621 *
622 * In the scenario that we go from a valid to invalid
623 * and then back to valid FBC configuration we have
624 * no strict enforcement that a vblank occurred since
625 * disabling the FBC. However, along all current pipe
626 * disabling paths we do need to wait for a vblank at
627 * some point. And we wait before enabling FBC anyway.
628 */
629 DRM_DEBUG_KMS("disabling active FBC for update\n");
630 intel_fbc_disable(dev);
631 }
632
633 intel_fbc_enable(crtc);
634 dev_priv->fbc.no_fbc_reason = FBC_OK;
635 return;
636
637out_disable:
638 /* Multiple disables should be harmless */
639 if (intel_fbc_enabled(dev)) {
640 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
641 intel_fbc_disable(dev);
642 }
643 i915_gem_stolen_cleanup_compression(dev);
644}
645
646void intel_fbc_init(struct drm_i915_private *dev_priv)
647{
648 if (!HAS_FBC(dev_priv)) {
649 dev_priv->fbc.enabled = false;
650 return;
651 }
652
653 if (INTEL_INFO(dev_priv)->gen >= 7) {
654 dev_priv->display.fbc_enabled = ilk_fbc_enabled;
655 dev_priv->display.enable_fbc = gen7_fbc_enable;
656 dev_priv->display.disable_fbc = ilk_fbc_disable;
657 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
658 dev_priv->display.fbc_enabled = ilk_fbc_enabled;
659 dev_priv->display.enable_fbc = ilk_fbc_enable;
660 dev_priv->display.disable_fbc = ilk_fbc_disable;
661 } else if (IS_GM45(dev_priv)) {
662 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
663 dev_priv->display.enable_fbc = g4x_fbc_enable;
664 dev_priv->display.disable_fbc = g4x_fbc_disable;
665 } else {
666 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
667 dev_priv->display.enable_fbc = i8xx_fbc_enable;
668 dev_priv->display.disable_fbc = i8xx_fbc_disable;
669
670 /* This value was pulled out of someone's hat */
671 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
672 }
673
674 dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
675}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 78911e29439c..99865c046c8a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,17 +52,6 @@
52#define INTEL_RC6p_ENABLE (1<<1) 52#define INTEL_RC6p_ENABLE (1<<1)
53#define INTEL_RC6pp_ENABLE (1<<2) 53#define INTEL_RC6pp_ENABLE (1<<2)
54 54
55/* FBC, or Frame Buffer Compression, is a technique employed to compress the
56 * framebuffer contents in-memory, aiming at reducing the required bandwidth
57 * during in-memory transfers and, therefore, reduce the power packet.
58 *
59 * The benefits of FBC are mostly visible with solid backgrounds and
60 * variation-less patterns.
61 *
62 * FBC-related functionality can be enabled by the means of the
63 * i915.i915_enable_fbc parameter
64 */
65
66static void gen9_init_clock_gating(struct drm_device *dev) 55static void gen9_init_clock_gating(struct drm_device *dev)
67{ 56{
68 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -87,613 +76,6 @@ static void gen9_init_clock_gating(struct drm_device *dev)
87 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); 76 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
88} 77}
89 78
90static void i8xx_disable_fbc(struct drm_device *dev)
91{
92 struct drm_i915_private *dev_priv = dev->dev_private;
93 u32 fbc_ctl;
94
95 dev_priv->fbc.enabled = false;
96
97 /* Disable compression */
98 fbc_ctl = I915_READ(FBC_CONTROL);
99 if ((fbc_ctl & FBC_CTL_EN) == 0)
100 return;
101
102 fbc_ctl &= ~FBC_CTL_EN;
103 I915_WRITE(FBC_CONTROL, fbc_ctl);
104
105 /* Wait for compressing bit to clear */
106 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
107 DRM_DEBUG_KMS("FBC idle timed out\n");
108 return;
109 }
110
111 DRM_DEBUG_KMS("disabled FBC\n");
112}
113
114static void i8xx_enable_fbc(struct drm_crtc *crtc)
115{
116 struct drm_device *dev = crtc->dev;
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 struct drm_framebuffer *fb = crtc->primary->fb;
119 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
121 int cfb_pitch;
122 int i;
123 u32 fbc_ctl;
124
125 dev_priv->fbc.enabled = true;
126
127 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
128 if (fb->pitches[0] < cfb_pitch)
129 cfb_pitch = fb->pitches[0];
130
131 /* FBC_CTL wants 32B or 64B units */
132 if (IS_GEN2(dev))
133 cfb_pitch = (cfb_pitch / 32) - 1;
134 else
135 cfb_pitch = (cfb_pitch / 64) - 1;
136
137 /* Clear old tags */
138 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
139 I915_WRITE(FBC_TAG + (i * 4), 0);
140
141 if (IS_GEN4(dev)) {
142 u32 fbc_ctl2;
143
144 /* Set it up... */
145 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
146 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
147 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
148 I915_WRITE(FBC_FENCE_OFF, crtc->y);
149 }
150
151 /* enable it... */
152 fbc_ctl = I915_READ(FBC_CONTROL);
153 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
154 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
155 if (IS_I945GM(dev))
156 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
157 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
158 fbc_ctl |= obj->fence_reg;
159 I915_WRITE(FBC_CONTROL, fbc_ctl);
160
161 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
162 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
163}
164
165static bool i8xx_fbc_enabled(struct drm_device *dev)
166{
167 struct drm_i915_private *dev_priv = dev->dev_private;
168
169 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
170}
171
172static void g4x_enable_fbc(struct drm_crtc *crtc)
173{
174 struct drm_device *dev = crtc->dev;
175 struct drm_i915_private *dev_priv = dev->dev_private;
176 struct drm_framebuffer *fb = crtc->primary->fb;
177 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
179 u32 dpfc_ctl;
180
181 dev_priv->fbc.enabled = true;
182
183 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
184 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
185 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
186 else
187 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
188 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
189
190 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
191
192 /* enable it... */
193 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
194
195 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
196}
197
198static void g4x_disable_fbc(struct drm_device *dev)
199{
200 struct drm_i915_private *dev_priv = dev->dev_private;
201 u32 dpfc_ctl;
202
203 dev_priv->fbc.enabled = false;
204
205 /* Disable compression */
206 dpfc_ctl = I915_READ(DPFC_CONTROL);
207 if (dpfc_ctl & DPFC_CTL_EN) {
208 dpfc_ctl &= ~DPFC_CTL_EN;
209 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
210
211 DRM_DEBUG_KMS("disabled FBC\n");
212 }
213}
214
215static bool g4x_fbc_enabled(struct drm_device *dev)
216{
217 struct drm_i915_private *dev_priv = dev->dev_private;
218
219 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
220}
221
222static void sandybridge_blit_fbc_update(struct drm_device *dev)
223{
224 struct drm_i915_private *dev_priv = dev->dev_private;
225 u32 blt_ecoskpd;
226
227 /* Make sure blitter notifies FBC of writes */
228
229 /* Blitter is part of Media powerwell on VLV. No impact of
230 * his param in other platforms for now */
231 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
232
233 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
234 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
235 GEN6_BLITTER_LOCK_SHIFT;
236 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
237 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
238 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
239 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
240 GEN6_BLITTER_LOCK_SHIFT);
241 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
242 POSTING_READ(GEN6_BLITTER_ECOSKPD);
243
244 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
245}
246
247static void ironlake_enable_fbc(struct drm_crtc *crtc)
248{
249 struct drm_device *dev = crtc->dev;
250 struct drm_i915_private *dev_priv = dev->dev_private;
251 struct drm_framebuffer *fb = crtc->primary->fb;
252 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
253 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
254 u32 dpfc_ctl;
255
256 dev_priv->fbc.enabled = true;
257
258 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
259 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
260 dev_priv->fbc.threshold++;
261
262 switch (dev_priv->fbc.threshold) {
263 case 4:
264 case 3:
265 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
266 break;
267 case 2:
268 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
269 break;
270 case 1:
271 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
272 break;
273 }
274 dpfc_ctl |= DPFC_CTL_FENCE_EN;
275 if (IS_GEN5(dev))
276 dpfc_ctl |= obj->fence_reg;
277
278 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
279 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
280 /* enable it... */
281 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
282
283 if (IS_GEN6(dev)) {
284 I915_WRITE(SNB_DPFC_CTL_SA,
285 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
286 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
287 sandybridge_blit_fbc_update(dev);
288 }
289
290 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
291}
292
293static void ironlake_disable_fbc(struct drm_device *dev)
294{
295 struct drm_i915_private *dev_priv = dev->dev_private;
296 u32 dpfc_ctl;
297
298 dev_priv->fbc.enabled = false;
299
300 /* Disable compression */
301 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
302 if (dpfc_ctl & DPFC_CTL_EN) {
303 dpfc_ctl &= ~DPFC_CTL_EN;
304 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
305
306 DRM_DEBUG_KMS("disabled FBC\n");
307 }
308}
309
310static bool ironlake_fbc_enabled(struct drm_device *dev)
311{
312 struct drm_i915_private *dev_priv = dev->dev_private;
313
314 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
315}
316
317static void gen7_enable_fbc(struct drm_crtc *crtc)
318{
319 struct drm_device *dev = crtc->dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321 struct drm_framebuffer *fb = crtc->primary->fb;
322 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
323 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
324 u32 dpfc_ctl;
325
326 dev_priv->fbc.enabled = true;
327
328 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
329 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
330 dev_priv->fbc.threshold++;
331
332 switch (dev_priv->fbc.threshold) {
333 case 4:
334 case 3:
335 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
336 break;
337 case 2:
338 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
339 break;
340 case 1:
341 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
342 break;
343 }
344
345 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
346
347 if (dev_priv->fbc.false_color)
348 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
349
350 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
351
352 if (IS_IVYBRIDGE(dev)) {
353 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
354 I915_WRITE(ILK_DISPLAY_CHICKEN1,
355 I915_READ(ILK_DISPLAY_CHICKEN1) |
356 ILK_FBCQ_DIS);
357 } else {
358 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
359 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
360 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
361 HSW_FBCQ_DIS);
362 }
363
364 I915_WRITE(SNB_DPFC_CTL_SA,
365 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
366 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
367
368 sandybridge_blit_fbc_update(dev);
369
370 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
371}
372
373bool intel_fbc_enabled(struct drm_device *dev)
374{
375 struct drm_i915_private *dev_priv = dev->dev_private;
376
377 return dev_priv->fbc.enabled;
378}
379
380void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
381{
382 struct drm_i915_private *dev_priv = dev->dev_private;
383
384 if (!IS_GEN8(dev))
385 return;
386
387 if (!intel_fbc_enabled(dev))
388 return;
389
390 I915_WRITE(MSG_FBC_REND_STATE, value);
391}
392
393static void intel_fbc_work_fn(struct work_struct *__work)
394{
395 struct intel_fbc_work *work =
396 container_of(to_delayed_work(__work),
397 struct intel_fbc_work, work);
398 struct drm_device *dev = work->crtc->dev;
399 struct drm_i915_private *dev_priv = dev->dev_private;
400
401 mutex_lock(&dev->struct_mutex);
402 if (work == dev_priv->fbc.fbc_work) {
403 /* Double check that we haven't switched fb without cancelling
404 * the prior work.
405 */
406 if (work->crtc->primary->fb == work->fb) {
407 dev_priv->display.enable_fbc(work->crtc);
408
409 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
410 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
411 dev_priv->fbc.y = work->crtc->y;
412 }
413
414 dev_priv->fbc.fbc_work = NULL;
415 }
416 mutex_unlock(&dev->struct_mutex);
417
418 kfree(work);
419}
420
421static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
422{
423 if (dev_priv->fbc.fbc_work == NULL)
424 return;
425
426 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
427
428 /* Synchronisation is provided by struct_mutex and checking of
429 * dev_priv->fbc.fbc_work, so we can perform the cancellation
430 * entirely asynchronously.
431 */
432 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
433 /* tasklet was killed before being run, clean up */
434 kfree(dev_priv->fbc.fbc_work);
435
436 /* Mark the work as no longer wanted so that if it does
437 * wake-up (because the work was already running and waiting
438 * for our mutex), it will discover that is no longer
439 * necessary to run.
440 */
441 dev_priv->fbc.fbc_work = NULL;
442}
443
444static void intel_enable_fbc(struct drm_crtc *crtc)
445{
446 struct intel_fbc_work *work;
447 struct drm_device *dev = crtc->dev;
448 struct drm_i915_private *dev_priv = dev->dev_private;
449
450 if (!dev_priv->display.enable_fbc)
451 return;
452
453 intel_cancel_fbc_work(dev_priv);
454
455 work = kzalloc(sizeof(*work), GFP_KERNEL);
456 if (work == NULL) {
457 DRM_ERROR("Failed to allocate FBC work structure\n");
458 dev_priv->display.enable_fbc(crtc);
459 return;
460 }
461
462 work->crtc = crtc;
463 work->fb = crtc->primary->fb;
464 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
465
466 dev_priv->fbc.fbc_work = work;
467
468 /* Delay the actual enabling to let pageflipping cease and the
469 * display to settle before starting the compression. Note that
470 * this delay also serves a second purpose: it allows for a
471 * vblank to pass after disabling the FBC before we attempt
472 * to modify the control registers.
473 *
474 * A more complicated solution would involve tracking vblanks
475 * following the termination of the page-flipping sequence
476 * and indeed performing the enable as a co-routine and not
477 * waiting synchronously upon the vblank.
478 *
479 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
480 */
481 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
482}
483
484void intel_disable_fbc(struct drm_device *dev)
485{
486 struct drm_i915_private *dev_priv = dev->dev_private;
487
488 intel_cancel_fbc_work(dev_priv);
489
490 if (!dev_priv->display.disable_fbc)
491 return;
492
493 dev_priv->display.disable_fbc(dev);
494 dev_priv->fbc.plane = -1;
495}
496
497static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
498 enum no_fbc_reason reason)
499{
500 if (dev_priv->fbc.no_fbc_reason == reason)
501 return false;
502
503 dev_priv->fbc.no_fbc_reason = reason;
504 return true;
505}
506
507/**
508 * intel_update_fbc - enable/disable FBC as needed
509 * @dev: the drm_device
510 *
511 * Set up the framebuffer compression hardware at mode set time. We
512 * enable it if possible:
513 * - plane A only (on pre-965)
514 * - no pixel mulitply/line duplication
515 * - no alpha buffer discard
516 * - no dual wide
517 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
518 *
519 * We can't assume that any compression will take place (worst case),
520 * so the compressed buffer has to be the same size as the uncompressed
521 * one. It also must reside (along with the line length buffer) in
522 * stolen memory.
523 *
524 * We need to enable/disable FBC on a global basis.
525 */
526void intel_update_fbc(struct drm_device *dev)
527{
528 struct drm_i915_private *dev_priv = dev->dev_private;
529 struct drm_crtc *crtc = NULL, *tmp_crtc;
530 struct intel_crtc *intel_crtc;
531 struct drm_framebuffer *fb;
532 struct drm_i915_gem_object *obj;
533 const struct drm_display_mode *adjusted_mode;
534 unsigned int max_width, max_height;
535
536 if (!HAS_FBC(dev)) {
537 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
538 return;
539 }
540
541 if (!i915.powersave) {
542 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
543 DRM_DEBUG_KMS("fbc disabled per module param\n");
544 return;
545 }
546
547 /*
548 * If FBC is already on, we just have to verify that we can
549 * keep it that way...
550 * Need to disable if:
551 * - more than one pipe is active
552 * - changing FBC params (stride, fence, mode)
553 * - new fb is too large to fit in compressed buffer
554 * - going to an unsupported config (interlace, pixel multiply, etc.)
555 */
556 for_each_crtc(dev, tmp_crtc) {
557 if (intel_crtc_active(tmp_crtc) &&
558 to_intel_crtc(tmp_crtc)->primary_enabled) {
559 if (crtc) {
560 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
561 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
562 goto out_disable;
563 }
564 crtc = tmp_crtc;
565 }
566 }
567
568 if (!crtc || crtc->primary->fb == NULL) {
569 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
570 DRM_DEBUG_KMS("no output, disabling\n");
571 goto out_disable;
572 }
573
574 intel_crtc = to_intel_crtc(crtc);
575 fb = crtc->primary->fb;
576 obj = intel_fb_obj(fb);
577 adjusted_mode = &intel_crtc->config.adjusted_mode;
578
579 if (i915.enable_fbc < 0) {
580 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
581 DRM_DEBUG_KMS("disabled per chip default\n");
582 goto out_disable;
583 }
584 if (!i915.enable_fbc) {
585 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
586 DRM_DEBUG_KMS("fbc disabled per module param\n");
587 goto out_disable;
588 }
589 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
590 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
591 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
592 DRM_DEBUG_KMS("mode incompatible with compression, "
593 "disabling\n");
594 goto out_disable;
595 }
596
597 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
598 max_width = 4096;
599 max_height = 4096;
600 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
601 max_width = 4096;
602 max_height = 2048;
603 } else {
604 max_width = 2048;
605 max_height = 1536;
606 }
607 if (intel_crtc->config.pipe_src_w > max_width ||
608 intel_crtc->config.pipe_src_h > max_height) {
609 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
610 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
611 goto out_disable;
612 }
613 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
614 intel_crtc->plane != PLANE_A) {
615 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
616 DRM_DEBUG_KMS("plane not A, disabling compression\n");
617 goto out_disable;
618 }
619
620 /* The use of a CPU fence is mandatory in order to detect writes
621 * by the CPU to the scanout and trigger updates to the FBC.
622 */
623 if (obj->tiling_mode != I915_TILING_X ||
624 obj->fence_reg == I915_FENCE_REG_NONE) {
625 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
626 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
627 goto out_disable;
628 }
629 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
630 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
631 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
632 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
633 goto out_disable;
634 }
635
636 /* If the kernel debugger is active, always disable compression */
637 if (in_dbg_master())
638 goto out_disable;
639
640 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
641 drm_format_plane_cpp(fb->pixel_format, 0))) {
642 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
643 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
644 goto out_disable;
645 }
646
647 /* If the scanout has not changed, don't modify the FBC settings.
648 * Note that we make the fundamental assumption that the fb->obj
649 * cannot be unpinned (and have its GTT offset and fence revoked)
650 * without first being decoupled from the scanout and FBC disabled.
651 */
652 if (dev_priv->fbc.plane == intel_crtc->plane &&
653 dev_priv->fbc.fb_id == fb->base.id &&
654 dev_priv->fbc.y == crtc->y)
655 return;
656
657 if (intel_fbc_enabled(dev)) {
658 /* We update FBC along two paths, after changing fb/crtc
659 * configuration (modeswitching) and after page-flipping
660 * finishes. For the latter, we know that not only did
661 * we disable the FBC at the start of the page-flip
662 * sequence, but also more than one vblank has passed.
663 *
664 * For the former case of modeswitching, it is possible
665 * to switch between two FBC valid configurations
666 * instantaneously so we do need to disable the FBC
667 * before we can modify its control registers. We also
668 * have to wait for the next vblank for that to take
669 * effect. However, since we delay enabling FBC we can
670 * assume that a vblank has passed since disabling and
671 * that we can safely alter the registers in the deferred
672 * callback.
673 *
674 * In the scenario that we go from a valid to invalid
675 * and then back to valid FBC configuration we have
676 * no strict enforcement that a vblank occurred since
677 * disabling the FBC. However, along all current pipe
678 * disabling paths we do need to wait for a vblank at
679 * some point. And we wait before enabling FBC anyway.
680 */
681 DRM_DEBUG_KMS("disabling active FBC for update\n");
682 intel_disable_fbc(dev);
683 }
684
685 intel_enable_fbc(crtc);
686 dev_priv->fbc.no_fbc_reason = FBC_OK;
687 return;
688
689out_disable:
690 /* Multiple disables should be harmless */
691 if (intel_fbc_enabled(dev)) {
692 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
693 intel_disable_fbc(dev);
694 }
695 i915_gem_stolen_cleanup_compression(dev);
696}
697 79
698static void i915_pineview_get_mem_freq(struct drm_device *dev) 80static void i915_pineview_get_mem_freq(struct drm_device *dev)
699{ 81{
@@ -6922,43 +6304,12 @@ void intel_suspend_hw(struct drm_device *dev)
6922 lpt_suspend_hw(dev); 6304 lpt_suspend_hw(dev);
6923} 6305}
6924 6306
6925static void intel_init_fbc(struct drm_i915_private *dev_priv)
6926{
6927 if (!HAS_FBC(dev_priv)) {
6928 dev_priv->fbc.enabled = false;
6929 return;
6930 }
6931
6932 if (INTEL_INFO(dev_priv)->gen >= 7) {
6933 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6934 dev_priv->display.enable_fbc = gen7_enable_fbc;
6935 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6936 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
6937 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6938 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6939 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6940 } else if (IS_GM45(dev_priv)) {
6941 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6942 dev_priv->display.enable_fbc = g4x_enable_fbc;
6943 dev_priv->display.disable_fbc = g4x_disable_fbc;
6944 } else {
6945 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6946 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6947 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6948
6949 /* This value was pulled out of someone's hat */
6950 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6951 }
6952
6953 dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
6954}
6955
6956/* Set up chip specific power management-related functions */ 6307/* Set up chip specific power management-related functions */
6957void intel_init_pm(struct drm_device *dev) 6308void intel_init_pm(struct drm_device *dev)
6958{ 6309{
6959 struct drm_i915_private *dev_priv = dev->dev_private; 6310 struct drm_i915_private *dev_priv = dev->dev_private;
6960 6311
6961 intel_init_fbc(dev_priv); 6312 intel_fbc_init(dev_priv);
6962 6313
6963 /* For cxsr */ 6314 /* For cxsr */
6964 if (IS_PINEVIEW(dev)) 6315 if (IS_PINEVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index bc5834bba2ae..c18e57d36c2c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1004,7 +1004,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
1004 hsw_enable_ips(intel_crtc); 1004 hsw_enable_ips(intel_crtc);
1005 1005
1006 mutex_lock(&dev->struct_mutex); 1006 mutex_lock(&dev->struct_mutex);
1007 intel_update_fbc(dev); 1007 intel_fbc_update(dev);
1008 mutex_unlock(&dev->struct_mutex); 1008 mutex_unlock(&dev->struct_mutex);
1009} 1009}
1010 1010
@@ -1017,7 +1017,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
1017 1017
1018 mutex_lock(&dev->struct_mutex); 1018 mutex_lock(&dev->struct_mutex);
1019 if (dev_priv->fbc.plane == intel_crtc->plane) 1019 if (dev_priv->fbc.plane == intel_crtc->plane)
1020 intel_disable_fbc(dev); 1020 intel_fbc_disable(dev);
1021 mutex_unlock(&dev->struct_mutex); 1021 mutex_unlock(&dev->struct_mutex);
1022 1022
1023 /* 1023 /*