aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c294
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c51
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h59
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c4
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c41
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c640
-rw-r--r--drivers/gpu/drm/drm_atomic.c100
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c205
-rw-r--r--drivers/gpu/drm/drm_crtc.c39
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c58
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c101
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c74
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c137
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c187
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c183
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h187
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c97
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c400
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c95
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h110
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c215
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c74
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h8
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c552
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c264
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h91
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c46
-rw-r--r--drivers/gpu/drm/i915/intel_display.c588
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c139
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h49
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h39
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c93
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c208
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c233
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c304
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h12
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c220
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c53
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c59
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c6
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c400
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c71
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c333
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c419
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c9
-rw-r--r--drivers/gpu/drm/tegra/dc.c6
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--include/drm/drmP.h10
-rw-r--r--include/drm/drm_atomic_helper.h6
-rw-r--r--include/drm/drm_crtc.h4
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h8
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/i915_pciids.h28
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/drm_fourcc.h78
-rw-r--r--include/uapi/drm/drm_mode.h9
90 files changed, 4620 insertions, 3498 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 03f1985a4bd1..7a45775518f6 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3979,6 +3979,11 @@ int num_ioctls;</synopsis>
3979!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts 3979!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
3980!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts 3980!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
3981 </sect2> 3981 </sect2>
3982 <sect2>
3983 <title>Intel GVT-g Guest Support(vGPU)</title>
3984!Pdrivers/gpu/drm/i915/i915_vgpu.c Intel GVT-g guest support
3985!Idrivers/gpu/drm/i915/i915_vgpu.c
3986 </sect2>
3982 </sect1> 3987 </sect1>
3983 <sect1> 3988 <sect1>
3984 <title>Display Hardware Handling</title> 3989 <title>Display Hardware Handling</title>
@@ -4048,6 +4053,17 @@ int num_ioctls;</synopsis>
4048!Idrivers/gpu/drm/i915/intel_fbc.c 4053!Idrivers/gpu/drm/i915/intel_fbc.c
4049 </sect2> 4054 </sect2>
4050 <sect2> 4055 <sect2>
4056 <title>Display Refresh Rate Switching (DRRS)</title>
4057!Pdrivers/gpu/drm/i915/intel_dp.c Display Refresh Rate Switching (DRRS)
4058!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_set_drrs_state
4059!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_enable
4060!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_disable
4061!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_invalidate
4062!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_flush
4063!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_drrs_init
4064
4065 </sect2>
4066 <sect2>
4051 <title>DPIO</title> 4067 <title>DPIO</title>
4052!Pdrivers/gpu/drm/i915/i915_reg.h DPIO 4068!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
4053 <table id="dpiox2"> 4069 <table id="dpiox2">
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index b3e3068c6ec0..d55c0c232e1d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -21,6 +21,7 @@
21#include <linux/clk.h> 21#include <linux/clk.h>
22#include <linux/pm.h> 22#include <linux/pm.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/pinctrl/consumer.h>
24 25
25#include <drm/drm_crtc.h> 26#include <drm/drm_crtc.h>
26#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
@@ -37,14 +38,14 @@
37 * @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device 38 * @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
38 * @event: pointer to the current page flip event 39 * @event: pointer to the current page flip event
39 * @id: CRTC id (returned by drm_crtc_index) 40 * @id: CRTC id (returned by drm_crtc_index)
40 * @dpms: DPMS mode 41 * @enabled: CRTC state
41 */ 42 */
42struct atmel_hlcdc_crtc { 43struct atmel_hlcdc_crtc {
43 struct drm_crtc base; 44 struct drm_crtc base;
44 struct atmel_hlcdc_dc *dc; 45 struct atmel_hlcdc_dc *dc;
45 struct drm_pending_vblank_event *event; 46 struct drm_pending_vblank_event *event;
46 int id; 47 int id;
47 int dpms; 48 bool enabled;
48}; 49};
49 50
50static inline struct atmel_hlcdc_crtc * 51static inline struct atmel_hlcdc_crtc *
@@ -53,86 +54,17 @@ drm_crtc_to_atmel_hlcdc_crtc(struct drm_crtc *crtc)
53 return container_of(crtc, struct atmel_hlcdc_crtc, base); 54 return container_of(crtc, struct atmel_hlcdc_crtc, base);
54} 55}
55 56
56static void atmel_hlcdc_crtc_dpms(struct drm_crtc *c, int mode) 57static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
57{
58 struct drm_device *dev = c->dev;
59 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
60 struct regmap *regmap = crtc->dc->hlcdc->regmap;
61 unsigned int status;
62
63 if (mode != DRM_MODE_DPMS_ON)
64 mode = DRM_MODE_DPMS_OFF;
65
66 if (crtc->dpms == mode)
67 return;
68
69 pm_runtime_get_sync(dev->dev);
70
71 if (mode != DRM_MODE_DPMS_ON) {
72 regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
73 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
74 (status & ATMEL_HLCDC_DISP))
75 cpu_relax();
76
77 regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
78 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
79 (status & ATMEL_HLCDC_SYNC))
80 cpu_relax();
81
82 regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
83 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
84 (status & ATMEL_HLCDC_PIXEL_CLK))
85 cpu_relax();
86
87 clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
88
89 pm_runtime_allow(dev->dev);
90 } else {
91 pm_runtime_forbid(dev->dev);
92
93 clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
94
95 regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
96 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
97 !(status & ATMEL_HLCDC_PIXEL_CLK))
98 cpu_relax();
99
100
101 regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
102 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
103 !(status & ATMEL_HLCDC_SYNC))
104 cpu_relax();
105
106 regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
107 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
108 !(status & ATMEL_HLCDC_DISP))
109 cpu_relax();
110 }
111
112 pm_runtime_put_sync(dev->dev);
113
114 crtc->dpms = mode;
115}
116
117static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
118 struct drm_display_mode *mode,
119 struct drm_display_mode *adj,
120 int x, int y,
121 struct drm_framebuffer *old_fb)
122{ 58{
123 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); 59 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
124 struct regmap *regmap = crtc->dc->hlcdc->regmap; 60 struct regmap *regmap = crtc->dc->hlcdc->regmap;
125 struct drm_plane *plane = c->primary; 61 struct drm_display_mode *adj = &c->state->adjusted_mode;
126 struct drm_framebuffer *fb;
127 unsigned long mode_rate; 62 unsigned long mode_rate;
128 struct videomode vm; 63 struct videomode vm;
129 unsigned long prate; 64 unsigned long prate;
130 unsigned int cfg; 65 unsigned int cfg;
131 int div; 66 int div;
132 67
133 if (atmel_hlcdc_dc_mode_valid(crtc->dc, adj) != MODE_OK)
134 return -EINVAL;
135
136 vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay; 68 vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
137 vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end; 69 vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
138 vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start; 70 vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start;
@@ -156,7 +88,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
156 cfg = 0; 88 cfg = 0;
157 89
158 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); 90 prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
159 mode_rate = mode->crtc_clock * 1000; 91 mode_rate = adj->crtc_clock * 1000;
160 if ((prate / 2) < mode_rate) { 92 if ((prate / 2) < mode_rate) {
161 prate *= 2; 93 prate *= 2;
162 cfg |= ATMEL_HLCDC_CLKSEL; 94 cfg |= ATMEL_HLCDC_CLKSEL;
@@ -174,10 +106,10 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
174 106
175 cfg = 0; 107 cfg = 0;
176 108
177 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 109 if (adj->flags & DRM_MODE_FLAG_NVSYNC)
178 cfg |= ATMEL_HLCDC_VSPOL; 110 cfg |= ATMEL_HLCDC_VSPOL;
179 111
180 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 112 if (adj->flags & DRM_MODE_FLAG_NHSYNC)
181 cfg |= ATMEL_HLCDC_HSPOL; 113 cfg |= ATMEL_HLCDC_HSPOL;
182 114
183 regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5), 115 regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
@@ -187,77 +119,134 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
187 ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO | 119 ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
188 ATMEL_HLCDC_GUARDTIME_MASK, 120 ATMEL_HLCDC_GUARDTIME_MASK,
189 cfg); 121 cfg);
190
191 fb = plane->fb;
192 plane->fb = old_fb;
193
194 return atmel_hlcdc_plane_update_with_mode(plane, c, fb, 0, 0,
195 adj->hdisplay, adj->vdisplay,
196 x << 16, y << 16,
197 adj->hdisplay << 16,
198 adj->vdisplay << 16,
199 adj);
200} 122}
201 123
202int atmel_hlcdc_crtc_mode_set_base(struct drm_crtc *c, int x, int y, 124static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
203 struct drm_framebuffer *old_fb) 125 const struct drm_display_mode *mode,
126 struct drm_display_mode *adjusted_mode)
204{ 127{
205 struct drm_plane *plane = c->primary; 128 return true;
206 struct drm_framebuffer *fb = plane->fb;
207 struct drm_display_mode *mode = &c->hwmode;
208
209 plane->fb = old_fb;
210
211 return plane->funcs->update_plane(plane, c, fb,
212 0, 0,
213 mode->hdisplay,
214 mode->vdisplay,
215 x << 16, y << 16,
216 mode->hdisplay << 16,
217 mode->vdisplay << 16);
218} 129}
219 130
220static void atmel_hlcdc_crtc_prepare(struct drm_crtc *crtc) 131static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
221{ 132{
222 atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 133 struct drm_device *dev = c->dev;
134 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
135 struct regmap *regmap = crtc->dc->hlcdc->regmap;
136 unsigned int status;
137
138 if (!crtc->enabled)
139 return;
140
141 drm_crtc_vblank_off(c);
142
143 pm_runtime_get_sync(dev->dev);
144
145 regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
146 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
147 (status & ATMEL_HLCDC_DISP))
148 cpu_relax();
149
150 regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
151 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
152 (status & ATMEL_HLCDC_SYNC))
153 cpu_relax();
154
155 regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
156 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
157 (status & ATMEL_HLCDC_PIXEL_CLK))
158 cpu_relax();
159
160 clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
161 pinctrl_pm_select_sleep_state(dev->dev);
162
163 pm_runtime_allow(dev->dev);
164
165 pm_runtime_put_sync(dev->dev);
166
167 crtc->enabled = false;
223} 168}
224 169
225static void atmel_hlcdc_crtc_commit(struct drm_crtc *crtc) 170static void atmel_hlcdc_crtc_enable(struct drm_crtc *c)
226{ 171{
227 atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 172 struct drm_device *dev = c->dev;
173 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
174 struct regmap *regmap = crtc->dc->hlcdc->regmap;
175 unsigned int status;
176
177 if (crtc->enabled)
178 return;
179
180 pm_runtime_get_sync(dev->dev);
181
182 pm_runtime_forbid(dev->dev);
183
184 pinctrl_pm_select_default_state(dev->dev);
185 clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
186
187 regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
188 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
189 !(status & ATMEL_HLCDC_PIXEL_CLK))
190 cpu_relax();
191
192
193 regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
194 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
195 !(status & ATMEL_HLCDC_SYNC))
196 cpu_relax();
197
198 regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
199 while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
200 !(status & ATMEL_HLCDC_DISP))
201 cpu_relax();
202
203 pm_runtime_put_sync(dev->dev);
204
205 drm_crtc_vblank_on(c);
206
207 crtc->enabled = true;
228} 208}
229 209
230static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc, 210static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
231 const struct drm_display_mode *mode, 211 struct drm_crtc_state *s)
232 struct drm_display_mode *adjusted_mode)
233{ 212{
234 return true; 213 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
214
215 if (atmel_hlcdc_dc_mode_valid(crtc->dc, &s->adjusted_mode) != MODE_OK)
216 return -EINVAL;
217
218 return atmel_hlcdc_plane_prepare_disc_area(s);
235} 219}
236 220
237static void atmel_hlcdc_crtc_disable(struct drm_crtc *crtc) 221static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
238{ 222{
239 struct drm_plane *plane; 223 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
240 224
241 atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 225 if (c->state->event) {
242 crtc->primary->funcs->disable_plane(crtc->primary); 226 c->state->event->pipe = drm_crtc_index(c);
243 227
244 drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) { 228 WARN_ON(drm_crtc_vblank_get(c) != 0);
245 if (plane->crtc != crtc)
246 continue;
247 229
248 plane->funcs->disable_plane(crtc->primary); 230 crtc->event = c->state->event;
249 plane->crtc = NULL; 231 c->state->event = NULL;
250 } 232 }
251} 233}
252 234
235static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
236{
237 /* TODO: write common plane control register if available */
238}
239
253static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = { 240static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
254 .mode_fixup = atmel_hlcdc_crtc_mode_fixup, 241 .mode_fixup = atmel_hlcdc_crtc_mode_fixup,
255 .dpms = atmel_hlcdc_crtc_dpms, 242 .mode_set = drm_helper_crtc_mode_set,
256 .mode_set = atmel_hlcdc_crtc_mode_set, 243 .mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
257 .mode_set_base = atmel_hlcdc_crtc_mode_set_base, 244 .mode_set_base = drm_helper_crtc_mode_set_base,
258 .prepare = atmel_hlcdc_crtc_prepare,
259 .commit = atmel_hlcdc_crtc_commit,
260 .disable = atmel_hlcdc_crtc_disable, 245 .disable = atmel_hlcdc_crtc_disable,
246 .enable = atmel_hlcdc_crtc_enable,
247 .atomic_check = atmel_hlcdc_crtc_atomic_check,
248 .atomic_begin = atmel_hlcdc_crtc_atomic_begin,
249 .atomic_flush = atmel_hlcdc_crtc_atomic_flush,
261}; 250};
262 251
263static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c) 252static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
@@ -306,61 +295,13 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
306 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); 295 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
307} 296}
308 297
309static int atmel_hlcdc_crtc_page_flip(struct drm_crtc *c,
310 struct drm_framebuffer *fb,
311 struct drm_pending_vblank_event *event,
312 uint32_t page_flip_flags)
313{
314 struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
315 struct atmel_hlcdc_plane_update_req req;
316 struct drm_plane *plane = c->primary;
317 struct drm_device *dev = c->dev;
318 unsigned long flags;
319 int ret = 0;
320
321 spin_lock_irqsave(&dev->event_lock, flags);
322 if (crtc->event)
323 ret = -EBUSY;
324 spin_unlock_irqrestore(&dev->event_lock, flags);
325
326 if (ret)
327 return ret;
328
329 memset(&req, 0, sizeof(req));
330 req.crtc_x = 0;
331 req.crtc_y = 0;
332 req.crtc_h = c->mode.crtc_vdisplay;
333 req.crtc_w = c->mode.crtc_hdisplay;
334 req.src_x = c->x << 16;
335 req.src_y = c->y << 16;
336 req.src_w = req.crtc_w << 16;
337 req.src_h = req.crtc_h << 16;
338 req.fb = fb;
339
340 ret = atmel_hlcdc_plane_prepare_update_req(plane, &req, &c->hwmode);
341 if (ret)
342 return ret;
343
344 if (event) {
345 drm_vblank_get(c->dev, crtc->id);
346 spin_lock_irqsave(&dev->event_lock, flags);
347 crtc->event = event;
348 spin_unlock_irqrestore(&dev->event_lock, flags);
349 }
350
351 ret = atmel_hlcdc_plane_apply_update_req(plane, &req);
352 if (ret)
353 crtc->event = NULL;
354 else
355 plane->fb = fb;
356
357 return ret;
358}
359
360static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = { 298static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
361 .page_flip = atmel_hlcdc_crtc_page_flip, 299 .page_flip = drm_atomic_helper_page_flip,
362 .set_config = drm_crtc_helper_set_config, 300 .set_config = drm_atomic_helper_set_config,
363 .destroy = atmel_hlcdc_crtc_destroy, 301 .destroy = atmel_hlcdc_crtc_destroy,
302 .reset = drm_atomic_helper_crtc_reset,
303 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
304 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
364}; 305};
365 306
366int atmel_hlcdc_crtc_create(struct drm_device *dev) 307int atmel_hlcdc_crtc_create(struct drm_device *dev)
@@ -375,7 +316,6 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
375 if (!crtc) 316 if (!crtc)
376 return -ENOMEM; 317 return -ENOMEM;
377 318
378 crtc->dpms = DRM_MODE_DPMS_OFF;
379 crtc->dc = dc; 319 crtc->dc = dc;
380 320
381 ret = drm_crtc_init_with_planes(dev, &crtc->base, 321 ret = drm_crtc_init_with_planes(dev, &crtc->base,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c1cb17493e0d..c4bb1f9f95c6 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -222,6 +222,8 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
222static const struct drm_mode_config_funcs mode_config_funcs = { 222static const struct drm_mode_config_funcs mode_config_funcs = {
223 .fb_create = atmel_hlcdc_fb_create, 223 .fb_create = atmel_hlcdc_fb_create,
224 .output_poll_changed = atmel_hlcdc_fb_output_poll_changed, 224 .output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
225 .atomic_check = drm_atomic_helper_check,
226 .atomic_commit = drm_atomic_helper_commit,
225}; 227};
226 228
227static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) 229static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
@@ -317,6 +319,8 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
317 goto err_periph_clk_disable; 319 goto err_periph_clk_disable;
318 } 320 }
319 321
322 drm_mode_config_reset(dev);
323
320 ret = drm_vblank_init(dev, 1); 324 ret = drm_vblank_init(dev, 1);
321 if (ret < 0) { 325 if (ret < 0) {
322 dev_err(dev->dev, "failed to initialize vblank\n"); 326 dev_err(dev->dev, "failed to initialize vblank\n");
@@ -555,6 +559,52 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
555 return 0; 559 return 0;
556} 560}
557 561
562#ifdef CONFIG_PM
563static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
564{
565 struct drm_device *drm_dev = dev_get_drvdata(dev);
566 struct drm_crtc *crtc;
567
568 if (pm_runtime_suspended(dev))
569 return 0;
570
571 drm_modeset_lock_all(drm_dev);
572 list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
573 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
574 if (crtc->enabled) {
575 crtc_funcs->disable(crtc);
576 /* save enable state for resume */
577 crtc->enabled = true;
578 }
579 }
580 drm_modeset_unlock_all(drm_dev);
581 return 0;
582}
583
584static int atmel_hlcdc_dc_drm_resume(struct device *dev)
585{
586 struct drm_device *drm_dev = dev_get_drvdata(dev);
587 struct drm_crtc *crtc;
588
589 if (pm_runtime_suspended(dev))
590 return 0;
591
592 drm_modeset_lock_all(drm_dev);
593 list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
594 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
595 if (crtc->enabled) {
596 crtc->enabled = false;
597 crtc_funcs->enable(crtc);
598 }
599 }
600 drm_modeset_unlock_all(drm_dev);
601 return 0;
602}
603#endif
604
605static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
606 atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume);
607
558static const struct of_device_id atmel_hlcdc_dc_of_match[] = { 608static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
559 { .compatible = "atmel,hlcdc-display-controller" }, 609 { .compatible = "atmel,hlcdc-display-controller" },
560 { }, 610 { },
@@ -565,6 +615,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
565 .remove = atmel_hlcdc_dc_drm_remove, 615 .remove = atmel_hlcdc_dc_drm_remove,
566 .driver = { 616 .driver = {
567 .name = "atmel-hlcdc-display-controller", 617 .name = "atmel-hlcdc-display-controller",
618 .pm = &atmel_hlcdc_dc_drm_pm_ops,
568 .of_match_table = atmel_hlcdc_dc_of_match, 619 .of_match_table = atmel_hlcdc_dc_of_match,
569 }, 620 },
570}; 621};
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 7bc96af3397a..1ea9c2ccd8a7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -26,11 +26,14 @@
26#include <linux/irqdomain.h> 26#include <linux/irqdomain.h>
27#include <linux/pwm.h> 27#include <linux/pwm.h>
28 28
29#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_helper.h>
29#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
30#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
31#include <drm/drm_fb_cma_helper.h> 33#include <drm/drm_fb_cma_helper.h>
32#include <drm/drm_gem_cma_helper.h> 34#include <drm/drm_gem_cma_helper.h>
33#include <drm/drm_panel.h> 35#include <drm/drm_panel.h>
36#include <drm/drm_plane_helper.h>
34#include <drm/drmP.h> 37#include <drm/drmP.h>
35 38
36#include "atmel_hlcdc_layer.h" 39#include "atmel_hlcdc_layer.h"
@@ -69,7 +72,6 @@ struct atmel_hlcdc_dc_desc {
69 */ 72 */
70struct atmel_hlcdc_plane_properties { 73struct atmel_hlcdc_plane_properties {
71 struct drm_property *alpha; 74 struct drm_property *alpha;
72 struct drm_property *rotation;
73}; 75};
74 76
75/** 77/**
@@ -84,7 +86,6 @@ struct atmel_hlcdc_plane {
84 struct drm_plane base; 86 struct drm_plane base;
85 struct atmel_hlcdc_layer layer; 87 struct atmel_hlcdc_layer layer;
86 struct atmel_hlcdc_plane_properties *properties; 88 struct atmel_hlcdc_plane_properties *properties;
87 unsigned int rotation;
88}; 89};
89 90
90static inline struct atmel_hlcdc_plane * 91static inline struct atmel_hlcdc_plane *
@@ -100,43 +101,6 @@ atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *l)
100} 101}
101 102
102/** 103/**
103 * Atmel HLCDC Plane update request structure.
104 *
105 * @crtc_x: x position of the plane relative to the CRTC
106 * @crtc_y: y position of the plane relative to the CRTC
107 * @crtc_w: visible width of the plane
108 * @crtc_h: visible height of the plane
109 * @src_x: x buffer position
110 * @src_y: y buffer position
111 * @src_w: buffer width
112 * @src_h: buffer height
113 * @fb: framebuffer object object
114 * @bpp: bytes per pixel deduced from pixel_format
115 * @offsets: offsets to apply to the GEM buffers
116 * @xstride: value to add to the pixel pointer between each line
117 * @pstride: value to add to the pixel pointer between each pixel
118 * @nplanes: number of planes (deduced from pixel_format)
119 */
120struct atmel_hlcdc_plane_update_req {
121 int crtc_x;
122 int crtc_y;
123 unsigned int crtc_w;
124 unsigned int crtc_h;
125 uint32_t src_x;
126 uint32_t src_y;
127 uint32_t src_w;
128 uint32_t src_h;
129 struct drm_framebuffer *fb;
130
131 /* These fields are private and should not be touched */
132 int bpp[ATMEL_HLCDC_MAX_PLANES];
133 unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
134 int xstride[ATMEL_HLCDC_MAX_PLANES];
135 int pstride[ATMEL_HLCDC_MAX_PLANES];
136 int nplanes;
137};
138
139/**
140 * Atmel HLCDC Planes. 104 * Atmel HLCDC Planes.
141 * 105 *
142 * This structure stores the instantiated HLCDC Planes and can be accessed by 106 * This structure stores the instantiated HLCDC Planes and can be accessed by
@@ -184,22 +148,7 @@ int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
184struct atmel_hlcdc_planes * 148struct atmel_hlcdc_planes *
185atmel_hlcdc_create_planes(struct drm_device *dev); 149atmel_hlcdc_create_planes(struct drm_device *dev);
186 150
187int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p, 151int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state);
188 struct atmel_hlcdc_plane_update_req *req,
189 const struct drm_display_mode *mode);
190
191int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
192 struct atmel_hlcdc_plane_update_req *req);
193
194int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
195 struct drm_crtc *crtc,
196 struct drm_framebuffer *fb,
197 int crtc_x, int crtc_y,
198 unsigned int crtc_w,
199 unsigned int crtc_h,
200 uint32_t src_x, uint32_t src_y,
201 uint32_t src_w, uint32_t src_h,
202 const struct drm_display_mode *mode);
203 152
204void atmel_hlcdc_crtc_irq(struct drm_crtc *c); 153void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
205 154
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index e79bd9ba474b..377e43cea9dd 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -298,7 +298,7 @@ void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
298 spin_unlock_irqrestore(&layer->lock, flags); 298 spin_unlock_irqrestore(&layer->lock, flags);
299} 299}
300 300
301int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) 301void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
302{ 302{
303 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; 303 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
304 struct atmel_hlcdc_layer_update *upd = &layer->update; 304 struct atmel_hlcdc_layer_update *upd = &layer->update;
@@ -341,8 +341,6 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
341 dma->status = ATMEL_HLCDC_LAYER_DISABLED; 341 dma->status = ATMEL_HLCDC_LAYER_DISABLED;
342 342
343 spin_unlock_irqrestore(&layer->lock, flags); 343 spin_unlock_irqrestore(&layer->lock, flags);
344
345 return 0;
346} 344}
347 345
348int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer) 346int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
index 27e56c0862ec..9beabc940bce 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h
@@ -120,6 +120,7 @@
120#define ATMEL_HLCDC_LAYER_DISCEN BIT(11) 120#define ATMEL_HLCDC_LAYER_DISCEN BIT(11)
121#define ATMEL_HLCDC_LAYER_GA_SHIFT 16 121#define ATMEL_HLCDC_LAYER_GA_SHIFT 16
122#define ATMEL_HLCDC_LAYER_GA_MASK GENMASK(23, ATMEL_HLCDC_LAYER_GA_SHIFT) 122#define ATMEL_HLCDC_LAYER_GA_MASK GENMASK(23, ATMEL_HLCDC_LAYER_GA_SHIFT)
123#define ATMEL_HLCDC_LAYER_GA(x) ((x) << ATMEL_HLCDC_LAYER_GA_SHIFT)
123 124
124#define ATMEL_HLCDC_LAYER_CSC_CFG(p, o) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.csc + o) 125#define ATMEL_HLCDC_LAYER_CSC_CFG(p, o) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.csc + o)
125 126
@@ -376,7 +377,7 @@ int atmel_hlcdc_layer_init(struct drm_device *dev,
376void atmel_hlcdc_layer_cleanup(struct drm_device *dev, 377void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
377 struct atmel_hlcdc_layer *layer); 378 struct atmel_hlcdc_layer *layer);
378 379
379int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer); 380void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer);
380 381
381int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer); 382int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer);
382 383
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index c402192362c5..9c4513005310 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -86,25 +86,22 @@ atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output)
86 return container_of(output, struct atmel_hlcdc_panel, base); 86 return container_of(output, struct atmel_hlcdc_panel, base);
87} 87}
88 88
89static void atmel_hlcdc_panel_encoder_dpms(struct drm_encoder *encoder, 89static void atmel_hlcdc_panel_encoder_enable(struct drm_encoder *encoder)
90 int mode)
91{ 90{
92 struct atmel_hlcdc_rgb_output *rgb = 91 struct atmel_hlcdc_rgb_output *rgb =
93 drm_encoder_to_atmel_hlcdc_rgb_output(encoder); 92 drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
94 struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb); 93 struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
95 94
96 if (mode != DRM_MODE_DPMS_ON) 95 drm_panel_enable(panel->panel);
97 mode = DRM_MODE_DPMS_OFF; 96}
98
99 if (mode == rgb->dpms)
100 return;
101 97
102 if (mode != DRM_MODE_DPMS_ON) 98static void atmel_hlcdc_panel_encoder_disable(struct drm_encoder *encoder)
103 drm_panel_disable(panel->panel); 99{
104 else 100 struct atmel_hlcdc_rgb_output *rgb =
105 drm_panel_enable(panel->panel); 101 drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
102 struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
106 103
107 rgb->dpms = mode; 104 drm_panel_disable(panel->panel);
108} 105}
109 106
110static bool 107static bool
@@ -115,16 +112,6 @@ atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder,
115 return true; 112 return true;
116} 113}
117 114
118static void atmel_hlcdc_panel_encoder_prepare(struct drm_encoder *encoder)
119{
120 atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
121}
122
123static void atmel_hlcdc_panel_encoder_commit(struct drm_encoder *encoder)
124{
125 atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
126}
127
128static void 115static void
129atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder, 116atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
130 struct drm_display_mode *mode, 117 struct drm_display_mode *mode,
@@ -156,11 +143,10 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
156} 143}
157 144
158static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = { 145static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
159 .dpms = atmel_hlcdc_panel_encoder_dpms,
160 .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup, 146 .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
161 .prepare = atmel_hlcdc_panel_encoder_prepare,
162 .commit = atmel_hlcdc_panel_encoder_commit,
163 .mode_set = atmel_hlcdc_rgb_encoder_mode_set, 147 .mode_set = atmel_hlcdc_rgb_encoder_mode_set,
148 .disable = atmel_hlcdc_panel_encoder_disable,
149 .enable = atmel_hlcdc_panel_encoder_enable,
164}; 150};
165 151
166static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder) 152static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
@@ -226,10 +212,13 @@ atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector)
226} 212}
227 213
228static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = { 214static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
229 .dpms = drm_helper_connector_dpms, 215 .dpms = drm_atomic_helper_connector_dpms,
230 .detect = atmel_hlcdc_panel_connector_detect, 216 .detect = atmel_hlcdc_panel_connector_detect,
231 .fill_modes = drm_helper_probe_single_connector_modes, 217 .fill_modes = drm_helper_probe_single_connector_modes,
232 .destroy = atmel_hlcdc_panel_connector_destroy, 218 .destroy = atmel_hlcdc_panel_connector_destroy,
219 .reset = drm_atomic_helper_connector_reset,
220 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
221 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
233}; 222};
234 223
235static int atmel_hlcdc_create_panel_output(struct drm_device *dev, 224static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index c5892dcfd745..be9fa8220499 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -19,6 +19,59 @@
19 19
20#include "atmel_hlcdc_dc.h" 20#include "atmel_hlcdc_dc.h"
21 21
22/**
23 * Atmel HLCDC Plane state structure.
24 *
25 * @base: DRM plane state
26 * @crtc_x: x position of the plane relative to the CRTC
27 * @crtc_y: y position of the plane relative to the CRTC
28 * @crtc_w: visible width of the plane
29 * @crtc_h: visible height of the plane
30 * @src_x: x buffer position
31 * @src_y: y buffer position
32 * @src_w: buffer width
33 * @src_h: buffer height
34 * @alpha: alpha blending of the plane
35 * @bpp: bytes per pixel deduced from pixel_format
36 * @offsets: offsets to apply to the GEM buffers
37 * @xstride: value to add to the pixel pointer between each line
38 * @pstride: value to add to the pixel pointer between each pixel
39 * @nplanes: number of planes (deduced from pixel_format)
40 */
41struct atmel_hlcdc_plane_state {
42 struct drm_plane_state base;
43 int crtc_x;
44 int crtc_y;
45 unsigned int crtc_w;
46 unsigned int crtc_h;
47 uint32_t src_x;
48 uint32_t src_y;
49 uint32_t src_w;
50 uint32_t src_h;
51
52 u8 alpha;
53
54 bool disc_updated;
55
56 int disc_x;
57 int disc_y;
58 int disc_w;
59 int disc_h;
60
61 /* These fields are private and should not be touched */
62 int bpp[ATMEL_HLCDC_MAX_PLANES];
63 unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
64 int xstride[ATMEL_HLCDC_MAX_PLANES];
65 int pstride[ATMEL_HLCDC_MAX_PLANES];
66 int nplanes;
67};
68
69static inline struct atmel_hlcdc_plane_state *
70drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s)
71{
72 return container_of(s, struct atmel_hlcdc_plane_state, base);
73}
74
22#define SUBPIXEL_MASK 0xffff 75#define SUBPIXEL_MASK 0xffff
23 76
24static uint32_t rgb_formats[] = { 77static uint32_t rgb_formats[] = {
@@ -128,7 +181,7 @@ static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
128 return 0; 181 return 0;
129} 182}
130 183
131static bool atmel_hlcdc_format_embedds_alpha(u32 format) 184static bool atmel_hlcdc_format_embeds_alpha(u32 format)
132{ 185{
133 int i; 186 int i;
134 187
@@ -204,7 +257,7 @@ static u32 heo_upscaling_ycoef[] = {
204 257
205static void 258static void
206atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, 259atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
207 struct atmel_hlcdc_plane_update_req *req) 260 struct atmel_hlcdc_plane_state *state)
208{ 261{
209 const struct atmel_hlcdc_layer_cfg_layout *layout = 262 const struct atmel_hlcdc_layer_cfg_layout *layout =
210 &plane->layer.desc->layout; 263 &plane->layer.desc->layout;
@@ -213,69 +266,69 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
213 atmel_hlcdc_layer_update_cfg(&plane->layer, 266 atmel_hlcdc_layer_update_cfg(&plane->layer,
214 layout->size, 267 layout->size,
215 0xffffffff, 268 0xffffffff,
216 (req->crtc_w - 1) | 269 (state->crtc_w - 1) |
217 ((req->crtc_h - 1) << 16)); 270 ((state->crtc_h - 1) << 16));
218 271
219 if (layout->memsize) 272 if (layout->memsize)
220 atmel_hlcdc_layer_update_cfg(&plane->layer, 273 atmel_hlcdc_layer_update_cfg(&plane->layer,
221 layout->memsize, 274 layout->memsize,
222 0xffffffff, 275 0xffffffff,
223 (req->src_w - 1) | 276 (state->src_w - 1) |
224 ((req->src_h - 1) << 16)); 277 ((state->src_h - 1) << 16));
225 278
226 if (layout->pos) 279 if (layout->pos)
227 atmel_hlcdc_layer_update_cfg(&plane->layer, 280 atmel_hlcdc_layer_update_cfg(&plane->layer,
228 layout->pos, 281 layout->pos,
229 0xffffffff, 282 0xffffffff,
230 req->crtc_x | 283 state->crtc_x |
231 (req->crtc_y << 16)); 284 (state->crtc_y << 16));
232 285
233 /* TODO: rework the rescaling part */ 286 /* TODO: rework the rescaling part */
234 if (req->crtc_w != req->src_w || req->crtc_h != req->src_h) { 287 if (state->crtc_w != state->src_w || state->crtc_h != state->src_h) {
235 u32 factor_reg = 0; 288 u32 factor_reg = 0;
236 289
237 if (req->crtc_w != req->src_w) { 290 if (state->crtc_w != state->src_w) {
238 int i; 291 int i;
239 u32 factor; 292 u32 factor;
240 u32 *coeff_tab = heo_upscaling_xcoef; 293 u32 *coeff_tab = heo_upscaling_xcoef;
241 u32 max_memsize; 294 u32 max_memsize;
242 295
243 if (req->crtc_w < req->src_w) 296 if (state->crtc_w < state->src_w)
244 coeff_tab = heo_downscaling_xcoef; 297 coeff_tab = heo_downscaling_xcoef;
245 for (i = 0; i < ARRAY_SIZE(heo_upscaling_xcoef); i++) 298 for (i = 0; i < ARRAY_SIZE(heo_upscaling_xcoef); i++)
246 atmel_hlcdc_layer_update_cfg(&plane->layer, 299 atmel_hlcdc_layer_update_cfg(&plane->layer,
247 17 + i, 300 17 + i,
248 0xffffffff, 301 0xffffffff,
249 coeff_tab[i]); 302 coeff_tab[i]);
250 factor = ((8 * 256 * req->src_w) - (256 * 4)) / 303 factor = ((8 * 256 * state->src_w) - (256 * 4)) /
251 req->crtc_w; 304 state->crtc_w;
252 factor++; 305 factor++;
253 max_memsize = ((factor * req->crtc_w) + (256 * 4)) / 306 max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
254 2048; 307 2048;
255 if (max_memsize > req->src_w) 308 if (max_memsize > state->src_w)
256 factor--; 309 factor--;
257 factor_reg |= factor | 0x80000000; 310 factor_reg |= factor | 0x80000000;
258 } 311 }
259 312
260 if (req->crtc_h != req->src_h) { 313 if (state->crtc_h != state->src_h) {
261 int i; 314 int i;
262 u32 factor; 315 u32 factor;
263 u32 *coeff_tab = heo_upscaling_ycoef; 316 u32 *coeff_tab = heo_upscaling_ycoef;
264 u32 max_memsize; 317 u32 max_memsize;
265 318
266 if (req->crtc_w < req->src_w) 319 if (state->crtc_w < state->src_w)
267 coeff_tab = heo_downscaling_ycoef; 320 coeff_tab = heo_downscaling_ycoef;
268 for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++) 321 for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
269 atmel_hlcdc_layer_update_cfg(&plane->layer, 322 atmel_hlcdc_layer_update_cfg(&plane->layer,
270 33 + i, 323 33 + i,
271 0xffffffff, 324 0xffffffff,
272 coeff_tab[i]); 325 coeff_tab[i]);
273 factor = ((8 * 256 * req->src_w) - (256 * 4)) / 326 factor = ((8 * 256 * state->src_w) - (256 * 4)) /
274 req->crtc_w; 327 state->crtc_w;
275 factor++; 328 factor++;
276 max_memsize = ((factor * req->crtc_w) + (256 * 4)) / 329 max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
277 2048; 330 2048;
278 if (max_memsize > req->src_w) 331 if (max_memsize > state->src_w)
279 factor--; 332 factor--;
280 factor_reg |= (factor << 16) | 0x80000000; 333 factor_reg |= (factor << 16) | 0x80000000;
281 } 334 }
@@ -287,7 +340,7 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
287 340
288static void 341static void
289atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane, 342atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
290 struct atmel_hlcdc_plane_update_req *req) 343 struct atmel_hlcdc_plane_state *state)
291{ 344{
292 const struct atmel_hlcdc_layer_cfg_layout *layout = 345 const struct atmel_hlcdc_layer_cfg_layout *layout =
293 &plane->layer.desc->layout; 346 &plane->layer.desc->layout;
@@ -297,10 +350,11 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
297 cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL | 350 cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
298 ATMEL_HLCDC_LAYER_ITER; 351 ATMEL_HLCDC_LAYER_ITER;
299 352
300 if (atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format)) 353 if (atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format))
301 cfg |= ATMEL_HLCDC_LAYER_LAEN; 354 cfg |= ATMEL_HLCDC_LAYER_LAEN;
302 else 355 else
303 cfg |= ATMEL_HLCDC_LAYER_GAEN; 356 cfg |= ATMEL_HLCDC_LAYER_GAEN |
357 ATMEL_HLCDC_LAYER_GA(state->alpha);
304 } 358 }
305 359
306 atmel_hlcdc_layer_update_cfg(&plane->layer, 360 atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -312,24 +366,26 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
312 ATMEL_HLCDC_LAYER_ITER2BL | 366 ATMEL_HLCDC_LAYER_ITER2BL |
313 ATMEL_HLCDC_LAYER_ITER | 367 ATMEL_HLCDC_LAYER_ITER |
314 ATMEL_HLCDC_LAYER_GAEN | 368 ATMEL_HLCDC_LAYER_GAEN |
369 ATMEL_HLCDC_LAYER_GA_MASK |
315 ATMEL_HLCDC_LAYER_LAEN | 370 ATMEL_HLCDC_LAYER_LAEN |
316 ATMEL_HLCDC_LAYER_OVR | 371 ATMEL_HLCDC_LAYER_OVR |
317 ATMEL_HLCDC_LAYER_DMA, cfg); 372 ATMEL_HLCDC_LAYER_DMA, cfg);
318} 373}
319 374
320static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, 375static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
321 struct atmel_hlcdc_plane_update_req *req) 376 struct atmel_hlcdc_plane_state *state)
322{ 377{
323 u32 cfg; 378 u32 cfg;
324 int ret; 379 int ret;
325 380
326 ret = atmel_hlcdc_format_to_plane_mode(req->fb->pixel_format, &cfg); 381 ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->pixel_format,
382 &cfg);
327 if (ret) 383 if (ret)
328 return; 384 return;
329 385
330 if ((req->fb->pixel_format == DRM_FORMAT_YUV422 || 386 if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
331 req->fb->pixel_format == DRM_FORMAT_NV61) && 387 state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
332 (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)))) 388 (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
333 cfg |= ATMEL_HLCDC_YUV422ROT; 389 cfg |= ATMEL_HLCDC_YUV422ROT;
334 390
335 atmel_hlcdc_layer_update_cfg(&plane->layer, 391 atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -341,7 +397,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
341 * Rotation optimization is not working on RGB888 (rotation is still 397 * Rotation optimization is not working on RGB888 (rotation is still
342 * working but without any optimization). 398 * working but without any optimization).
343 */ 399 */
344 if (req->fb->pixel_format == DRM_FORMAT_RGB888) 400 if (state->base.fb->pixel_format == DRM_FORMAT_RGB888)
345 cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS; 401 cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS;
346 else 402 else
347 cfg = 0; 403 cfg = 0;
@@ -352,73 +408,142 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
352} 408}
353 409
354static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane, 410static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
355 struct atmel_hlcdc_plane_update_req *req) 411 struct atmel_hlcdc_plane_state *state)
356{ 412{
357 struct atmel_hlcdc_layer *layer = &plane->layer; 413 struct atmel_hlcdc_layer *layer = &plane->layer;
358 const struct atmel_hlcdc_layer_cfg_layout *layout = 414 const struct atmel_hlcdc_layer_cfg_layout *layout =
359 &layer->desc->layout; 415 &layer->desc->layout;
360 int i; 416 int i;
361 417
362 atmel_hlcdc_layer_update_set_fb(&plane->layer, req->fb, req->offsets); 418 atmel_hlcdc_layer_update_set_fb(&plane->layer, state->base.fb,
419 state->offsets);
363 420
364 for (i = 0; i < req->nplanes; i++) { 421 for (i = 0; i < state->nplanes; i++) {
365 if (layout->xstride[i]) { 422 if (layout->xstride[i]) {
366 atmel_hlcdc_layer_update_cfg(&plane->layer, 423 atmel_hlcdc_layer_update_cfg(&plane->layer,
367 layout->xstride[i], 424 layout->xstride[i],
368 0xffffffff, 425 0xffffffff,
369 req->xstride[i]); 426 state->xstride[i]);
370 } 427 }
371 428
372 if (layout->pstride[i]) { 429 if (layout->pstride[i]) {
373 atmel_hlcdc_layer_update_cfg(&plane->layer, 430 atmel_hlcdc_layer_update_cfg(&plane->layer,
374 layout->pstride[i], 431 layout->pstride[i],
375 0xffffffff, 432 0xffffffff,
376 req->pstride[i]); 433 state->pstride[i]);
377 } 434 }
378 } 435 }
379} 436}
380 437
381static int atmel_hlcdc_plane_check_update_req(struct drm_plane *p, 438int
382 struct atmel_hlcdc_plane_update_req *req, 439atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
383 const struct drm_display_mode *mode)
384{ 440{
385 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); 441 int disc_x = 0, disc_y = 0, disc_w = 0, disc_h = 0;
386 const struct atmel_hlcdc_layer_cfg_layout *layout = 442 const struct atmel_hlcdc_layer_cfg_layout *layout;
387 &plane->layer.desc->layout; 443 struct atmel_hlcdc_plane_state *primary_state;
444 struct drm_plane_state *primary_s;
445 struct atmel_hlcdc_plane *primary;
446 struct drm_plane *ovl;
447
448 primary = drm_plane_to_atmel_hlcdc_plane(c_state->crtc->primary);
449 layout = &primary->layer.desc->layout;
450 if (!layout->disc_pos || !layout->disc_size)
451 return 0;
452
453 primary_s = drm_atomic_get_plane_state(c_state->state,
454 &primary->base);
455 if (IS_ERR(primary_s))
456 return PTR_ERR(primary_s);
457
458 primary_state = drm_plane_state_to_atmel_hlcdc_plane_state(primary_s);
459
460 drm_atomic_crtc_state_for_each_plane(ovl, c_state) {
461 struct atmel_hlcdc_plane_state *ovl_state;
462 struct drm_plane_state *ovl_s;
463
464 if (ovl == c_state->crtc->primary)
465 continue;
388 466
389 if (!layout->size && 467 ovl_s = drm_atomic_get_plane_state(c_state->state, ovl);
390 (mode->hdisplay != req->crtc_w || 468 if (IS_ERR(ovl_s))
391 mode->vdisplay != req->crtc_h)) 469 return PTR_ERR(ovl_s);
392 return -EINVAL;
393 470
394 if (plane->layer.desc->max_height && 471 ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s);
395 req->crtc_h > plane->layer.desc->max_height)
396 return -EINVAL;
397 472
398 if (plane->layer.desc->max_width && 473 if (!ovl_s->fb ||
399 req->crtc_w > plane->layer.desc->max_width) 474 atmel_hlcdc_format_embeds_alpha(ovl_s->fb->pixel_format) ||
400 return -EINVAL; 475 ovl_state->alpha != 255)
476 continue;
401 477
402 if ((req->crtc_h != req->src_h || req->crtc_w != req->src_w) && 478 /* TODO: implement a smarter hidden area detection */
403 (!layout->memsize || 479 if (ovl_state->crtc_h * ovl_state->crtc_w < disc_h * disc_w)
404 atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format))) 480 continue;
405 return -EINVAL;
406 481
407 if (req->crtc_x < 0 || req->crtc_y < 0) 482 disc_x = ovl_state->crtc_x;
408 return -EINVAL; 483 disc_y = ovl_state->crtc_y;
484 disc_h = ovl_state->crtc_h;
485 disc_w = ovl_state->crtc_w;
486 }
409 487
410 if (req->crtc_w + req->crtc_x > mode->hdisplay || 488 if (disc_x == primary_state->disc_x &&
411 req->crtc_h + req->crtc_y > mode->vdisplay) 489 disc_y == primary_state->disc_y &&
412 return -EINVAL; 490 disc_w == primary_state->disc_w &&
491 disc_h == primary_state->disc_h)
492 return 0;
493
494
495 primary_state->disc_x = disc_x;
496 primary_state->disc_y = disc_y;
497 primary_state->disc_w = disc_w;
498 primary_state->disc_h = disc_h;
499 primary_state->disc_updated = true;
413 500
414 return 0; 501 return 0;
415} 502}
416 503
417int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p, 504static void
418 struct atmel_hlcdc_plane_update_req *req, 505atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane,
419 const struct drm_display_mode *mode) 506 struct atmel_hlcdc_plane_state *state)
507{
508 const struct atmel_hlcdc_layer_cfg_layout *layout =
509 &plane->layer.desc->layout;
510 int disc_surface = 0;
511
512 if (!state->disc_updated)
513 return;
514
515 disc_surface = state->disc_h * state->disc_w;
516
517 atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config,
518 ATMEL_HLCDC_LAYER_DISCEN,
519 disc_surface ? ATMEL_HLCDC_LAYER_DISCEN : 0);
520
521 if (!disc_surface)
522 return;
523
524 atmel_hlcdc_layer_update_cfg(&plane->layer,
525 layout->disc_pos,
526 0xffffffff,
527 state->disc_x | (state->disc_y << 16));
528
529 atmel_hlcdc_layer_update_cfg(&plane->layer,
530 layout->disc_size,
531 0xffffffff,
532 (state->disc_w - 1) |
533 ((state->disc_h - 1) << 16));
534}
535
536static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
537 struct drm_plane_state *s)
420{ 538{
421 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); 539 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
540 struct atmel_hlcdc_plane_state *state =
541 drm_plane_state_to_atmel_hlcdc_plane_state(s);
542 const struct atmel_hlcdc_layer_cfg_layout *layout =
543 &plane->layer.desc->layout;
544 struct drm_framebuffer *fb = state->base.fb;
545 const struct drm_display_mode *mode;
546 struct drm_crtc_state *crtc_state;
422 unsigned int patched_crtc_w; 547 unsigned int patched_crtc_w;
423 unsigned int patched_crtc_h; 548 unsigned int patched_crtc_h;
424 unsigned int patched_src_w; 549 unsigned int patched_src_w;
@@ -430,196 +555,196 @@ int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
430 int vsub = 1; 555 int vsub = 1;
431 int i; 556 int i;
432 557
433 if ((req->src_x | req->src_y | req->src_w | req->src_h) & 558 if (!state->base.crtc || !fb)
559 return 0;
560
561 crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)];
562 mode = &crtc_state->adjusted_mode;
563
564 state->src_x = s->src_x;
565 state->src_y = s->src_y;
566 state->src_h = s->src_h;
567 state->src_w = s->src_w;
568 state->crtc_x = s->crtc_x;
569 state->crtc_y = s->crtc_y;
570 state->crtc_h = s->crtc_h;
571 state->crtc_w = s->crtc_w;
572 if ((state->src_x | state->src_y | state->src_w | state->src_h) &
434 SUBPIXEL_MASK) 573 SUBPIXEL_MASK)
435 return -EINVAL; 574 return -EINVAL;
436 575
437 req->src_x >>= 16; 576 state->src_x >>= 16;
438 req->src_y >>= 16; 577 state->src_y >>= 16;
439 req->src_w >>= 16; 578 state->src_w >>= 16;
440 req->src_h >>= 16; 579 state->src_h >>= 16;
441 580
442 req->nplanes = drm_format_num_planes(req->fb->pixel_format); 581 state->nplanes = drm_format_num_planes(fb->pixel_format);
443 if (req->nplanes > ATMEL_HLCDC_MAX_PLANES) 582 if (state->nplanes > ATMEL_HLCDC_MAX_PLANES)
444 return -EINVAL; 583 return -EINVAL;
445 584
446 /* 585 /*
447 * Swap width and size in case of 90 or 270 degrees rotation 586 * Swap width and size in case of 90 or 270 degrees rotation
448 */ 587 */
449 if (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { 588 if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
450 tmp = req->crtc_w; 589 tmp = state->crtc_w;
451 req->crtc_w = req->crtc_h; 590 state->crtc_w = state->crtc_h;
452 req->crtc_h = tmp; 591 state->crtc_h = tmp;
453 tmp = req->src_w; 592 tmp = state->src_w;
454 req->src_w = req->src_h; 593 state->src_w = state->src_h;
455 req->src_h = tmp; 594 state->src_h = tmp;
456 } 595 }
457 596
458 if (req->crtc_x + req->crtc_w > mode->hdisplay) 597 if (state->crtc_x + state->crtc_w > mode->hdisplay)
459 patched_crtc_w = mode->hdisplay - req->crtc_x; 598 patched_crtc_w = mode->hdisplay - state->crtc_x;
460 else 599 else
461 patched_crtc_w = req->crtc_w; 600 patched_crtc_w = state->crtc_w;
462 601
463 if (req->crtc_x < 0) { 602 if (state->crtc_x < 0) {
464 patched_crtc_w += req->crtc_x; 603 patched_crtc_w += state->crtc_x;
465 x_offset = -req->crtc_x; 604 x_offset = -state->crtc_x;
466 req->crtc_x = 0; 605 state->crtc_x = 0;
467 } 606 }
468 607
469 if (req->crtc_y + req->crtc_h > mode->vdisplay) 608 if (state->crtc_y + state->crtc_h > mode->vdisplay)
470 patched_crtc_h = mode->vdisplay - req->crtc_y; 609 patched_crtc_h = mode->vdisplay - state->crtc_y;
471 else 610 else
472 patched_crtc_h = req->crtc_h; 611 patched_crtc_h = state->crtc_h;
473 612
474 if (req->crtc_y < 0) { 613 if (state->crtc_y < 0) {
475 patched_crtc_h += req->crtc_y; 614 patched_crtc_h += state->crtc_y;
476 y_offset = -req->crtc_y; 615 y_offset = -state->crtc_y;
477 req->crtc_y = 0; 616 state->crtc_y = 0;
478 } 617 }
479 618
480 patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * req->src_w, 619 patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * state->src_w,
481 req->crtc_w); 620 state->crtc_w);
482 patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * req->src_h, 621 patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * state->src_h,
483 req->crtc_h); 622 state->crtc_h);
484 623
485 hsub = drm_format_horz_chroma_subsampling(req->fb->pixel_format); 624 hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
486 vsub = drm_format_vert_chroma_subsampling(req->fb->pixel_format); 625 vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
487 626
488 for (i = 0; i < req->nplanes; i++) { 627 for (i = 0; i < state->nplanes; i++) {
489 unsigned int offset = 0; 628 unsigned int offset = 0;
490 int xdiv = i ? hsub : 1; 629 int xdiv = i ? hsub : 1;
491 int ydiv = i ? vsub : 1; 630 int ydiv = i ? vsub : 1;
492 631
493 req->bpp[i] = drm_format_plane_cpp(req->fb->pixel_format, i); 632 state->bpp[i] = drm_format_plane_cpp(fb->pixel_format, i);
494 if (!req->bpp[i]) 633 if (!state->bpp[i])
495 return -EINVAL; 634 return -EINVAL;
496 635
497 switch (plane->rotation & 0xf) { 636 switch (state->base.rotation & 0xf) {
498 case BIT(DRM_ROTATE_90): 637 case BIT(DRM_ROTATE_90):
499 offset = ((y_offset + req->src_y + patched_src_w - 1) / 638 offset = ((y_offset + state->src_y + patched_src_w - 1) /
500 ydiv) * req->fb->pitches[i]; 639 ydiv) * fb->pitches[i];
501 offset += ((x_offset + req->src_x) / xdiv) * 640 offset += ((x_offset + state->src_x) / xdiv) *
502 req->bpp[i]; 641 state->bpp[i];
503 req->xstride[i] = ((patched_src_w - 1) / ydiv) * 642 state->xstride[i] = ((patched_src_w - 1) / ydiv) *
504 req->fb->pitches[i]; 643 fb->pitches[i];
505 req->pstride[i] = -req->fb->pitches[i] - req->bpp[i]; 644 state->pstride[i] = -fb->pitches[i] - state->bpp[i];
506 break; 645 break;
507 case BIT(DRM_ROTATE_180): 646 case BIT(DRM_ROTATE_180):
508 offset = ((y_offset + req->src_y + patched_src_h - 1) / 647 offset = ((y_offset + state->src_y + patched_src_h - 1) /
509 ydiv) * req->fb->pitches[i]; 648 ydiv) * fb->pitches[i];
510 offset += ((x_offset + req->src_x + patched_src_w - 1) / 649 offset += ((x_offset + state->src_x + patched_src_w - 1) /
511 xdiv) * req->bpp[i]; 650 xdiv) * state->bpp[i];
512 req->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) * 651 state->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
513 req->bpp[i]) - req->fb->pitches[i]; 652 state->bpp[i]) - fb->pitches[i];
514 req->pstride[i] = -2 * req->bpp[i]; 653 state->pstride[i] = -2 * state->bpp[i];
515 break; 654 break;
516 case BIT(DRM_ROTATE_270): 655 case BIT(DRM_ROTATE_270):
517 offset = ((y_offset + req->src_y) / ydiv) * 656 offset = ((y_offset + state->src_y) / ydiv) *
518 req->fb->pitches[i]; 657 fb->pitches[i];
519 offset += ((x_offset + req->src_x + patched_src_h - 1) / 658 offset += ((x_offset + state->src_x + patched_src_h - 1) /
520 xdiv) * req->bpp[i]; 659 xdiv) * state->bpp[i];
521 req->xstride[i] = -(((patched_src_w - 1) / ydiv) * 660 state->xstride[i] = -(((patched_src_w - 1) / ydiv) *
522 req->fb->pitches[i]) - 661 fb->pitches[i]) -
523 (2 * req->bpp[i]); 662 (2 * state->bpp[i]);
524 req->pstride[i] = req->fb->pitches[i] - req->bpp[i]; 663 state->pstride[i] = fb->pitches[i] - state->bpp[i];
525 break; 664 break;
526 case BIT(DRM_ROTATE_0): 665 case BIT(DRM_ROTATE_0):
527 default: 666 default:
528 offset = ((y_offset + req->src_y) / ydiv) * 667 offset = ((y_offset + state->src_y) / ydiv) *
529 req->fb->pitches[i]; 668 fb->pitches[i];
530 offset += ((x_offset + req->src_x) / xdiv) * 669 offset += ((x_offset + state->src_x) / xdiv) *
531 req->bpp[i]; 670 state->bpp[i];
532 req->xstride[i] = req->fb->pitches[i] - 671 state->xstride[i] = fb->pitches[i] -
533 ((patched_src_w / xdiv) * 672 ((patched_src_w / xdiv) *
534 req->bpp[i]); 673 state->bpp[i]);
535 req->pstride[i] = 0; 674 state->pstride[i] = 0;
536 break; 675 break;
537 } 676 }
538 677
539 req->offsets[i] = offset + req->fb->offsets[i]; 678 state->offsets[i] = offset + fb->offsets[i];
540 } 679 }
541 680
542 req->src_w = patched_src_w; 681 state->src_w = patched_src_w;
543 req->src_h = patched_src_h; 682 state->src_h = patched_src_h;
544 req->crtc_w = patched_crtc_w; 683 state->crtc_w = patched_crtc_w;
545 req->crtc_h = patched_crtc_h; 684 state->crtc_h = patched_crtc_h;
546 685
547 return atmel_hlcdc_plane_check_update_req(p, req, mode); 686 if (!layout->size &&
548} 687 (mode->hdisplay != state->crtc_w ||
688 mode->vdisplay != state->crtc_h))
689 return -EINVAL;
549 690
550int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p, 691 if (plane->layer.desc->max_height &&
551 struct atmel_hlcdc_plane_update_req *req) 692 state->crtc_h > plane->layer.desc->max_height)
552{ 693 return -EINVAL;
553 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
554 int ret;
555 694
556 ret = atmel_hlcdc_layer_update_start(&plane->layer); 695 if (plane->layer.desc->max_width &&
557 if (ret) 696 state->crtc_w > plane->layer.desc->max_width)
558 return ret; 697 return -EINVAL;
559 698
560 atmel_hlcdc_plane_update_pos_and_size(plane, req); 699 if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
561 atmel_hlcdc_plane_update_general_settings(plane, req); 700 (!layout->memsize ||
562 atmel_hlcdc_plane_update_format(plane, req); 701 atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format)))
563 atmel_hlcdc_plane_update_buffers(plane, req); 702 return -EINVAL;
564 703
565 atmel_hlcdc_layer_update_commit(&plane->layer); 704 if (state->crtc_x < 0 || state->crtc_y < 0)
705 return -EINVAL;
706
707 if (state->crtc_w + state->crtc_x > mode->hdisplay ||
708 state->crtc_h + state->crtc_y > mode->vdisplay)
709 return -EINVAL;
566 710
567 return 0; 711 return 0;
568} 712}
569 713
570int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p, 714static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
571 struct drm_crtc *crtc, 715 struct drm_framebuffer *fb,
572 struct drm_framebuffer *fb, 716 const struct drm_plane_state *new_state)
573 int crtc_x, int crtc_y,
574 unsigned int crtc_w,
575 unsigned int crtc_h,
576 uint32_t src_x, uint32_t src_y,
577 uint32_t src_w, uint32_t src_h,
578 const struct drm_display_mode *mode)
579{ 717{
580 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); 718 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
581 struct atmel_hlcdc_plane_update_req req;
582 int ret = 0;
583
584 memset(&req, 0, sizeof(req));
585 req.crtc_x = crtc_x;
586 req.crtc_y = crtc_y;
587 req.crtc_w = crtc_w;
588 req.crtc_h = crtc_h;
589 req.src_x = src_x;
590 req.src_y = src_y;
591 req.src_w = src_w;
592 req.src_h = src_h;
593 req.fb = fb;
594
595 ret = atmel_hlcdc_plane_prepare_update_req(&plane->base, &req, mode);
596 if (ret)
597 return ret;
598 719
599 if (!req.crtc_h || !req.crtc_w) 720 return atmel_hlcdc_layer_update_start(&plane->layer);
600 return atmel_hlcdc_layer_disable(&plane->layer);
601
602 return atmel_hlcdc_plane_apply_update_req(&plane->base, &req);
603} 721}
604 722
605static int atmel_hlcdc_plane_update(struct drm_plane *p, 723static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
606 struct drm_crtc *crtc, 724 struct drm_plane_state *old_s)
607 struct drm_framebuffer *fb,
608 int crtc_x, int crtc_y,
609 unsigned int crtc_w, unsigned int crtc_h,
610 uint32_t src_x, uint32_t src_y,
611 uint32_t src_w, uint32_t src_h)
612{ 725{
613 return atmel_hlcdc_plane_update_with_mode(p, crtc, fb, crtc_x, crtc_y, 726 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
614 crtc_w, crtc_h, src_x, src_y, 727 struct atmel_hlcdc_plane_state *state =
615 src_w, src_h, &crtc->hwmode); 728 drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
729
730 if (!p->state->crtc || !p->state->fb)
731 return;
732
733 atmel_hlcdc_plane_update_pos_and_size(plane, state);
734 atmel_hlcdc_plane_update_general_settings(plane, state);
735 atmel_hlcdc_plane_update_format(plane, state);
736 atmel_hlcdc_plane_update_buffers(plane, state);
737 atmel_hlcdc_plane_update_disc_area(plane, state);
738
739 atmel_hlcdc_layer_update_commit(&plane->layer);
616} 740}
617 741
618static int atmel_hlcdc_plane_disable(struct drm_plane *p) 742static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
743 struct drm_plane_state *old_state)
619{ 744{
620 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); 745 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
621 746
622 return atmel_hlcdc_layer_disable(&plane->layer); 747 atmel_hlcdc_layer_disable(&plane->layer);
623} 748}
624 749
625static void atmel_hlcdc_plane_destroy(struct drm_plane *p) 750static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
@@ -635,38 +760,36 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
635 devm_kfree(p->dev->dev, plane); 760 devm_kfree(p->dev->dev, plane);
636} 761}
637 762
638static int atmel_hlcdc_plane_set_alpha(struct atmel_hlcdc_plane *plane, 763static int atmel_hlcdc_plane_atomic_set_property(struct drm_plane *p,
639 u8 alpha) 764 struct drm_plane_state *s,
765 struct drm_property *property,
766 uint64_t val)
640{ 767{
641 atmel_hlcdc_layer_update_start(&plane->layer); 768 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
642 atmel_hlcdc_layer_update_cfg(&plane->layer, 769 struct atmel_hlcdc_plane_properties *props = plane->properties;
643 plane->layer.desc->layout.general_config, 770 struct atmel_hlcdc_plane_state *state =
644 ATMEL_HLCDC_LAYER_GA_MASK, 771 drm_plane_state_to_atmel_hlcdc_plane_state(s);
645 alpha << ATMEL_HLCDC_LAYER_GA_SHIFT);
646 atmel_hlcdc_layer_update_commit(&plane->layer);
647
648 return 0;
649}
650 772
651static int atmel_hlcdc_plane_set_rotation(struct atmel_hlcdc_plane *plane, 773 if (property == props->alpha)
652 unsigned int rotation) 774 state->alpha = val;
653{ 775 else
654 plane->rotation = rotation; 776 return -EINVAL;
655 777
656 return 0; 778 return 0;
657} 779}
658 780
659static int atmel_hlcdc_plane_set_property(struct drm_plane *p, 781static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
660 struct drm_property *property, 782 const struct drm_plane_state *s,
661 uint64_t value) 783 struct drm_property *property,
784 uint64_t *val)
662{ 785{
663 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); 786 struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
664 struct atmel_hlcdc_plane_properties *props = plane->properties; 787 struct atmel_hlcdc_plane_properties *props = plane->properties;
788 const struct atmel_hlcdc_plane_state *state =
789 container_of(s, const struct atmel_hlcdc_plane_state, base);
665 790
666 if (property == props->alpha) 791 if (property == props->alpha)
667 atmel_hlcdc_plane_set_alpha(plane, value); 792 *val = state->alpha;
668 else if (property == props->rotation)
669 atmel_hlcdc_plane_set_rotation(plane, value);
670 else 793 else
671 return -EINVAL; 794 return -EINVAL;
672 795
@@ -694,8 +817,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
694 817
695 if (desc->layout.xstride && desc->layout.pstride) 818 if (desc->layout.xstride && desc->layout.pstride)
696 drm_object_attach_property(&plane->base.base, 819 drm_object_attach_property(&plane->base.base,
697 props->rotation, 820 plane->base.dev->mode_config.rotation_property,
698 BIT(DRM_ROTATE_0)); 821 BIT(DRM_ROTATE_0));
699 822
700 if (desc->layout.csc) { 823 if (desc->layout.csc) {
701 /* 824 /*
@@ -717,11 +840,76 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
717 } 840 }
718} 841}
719 842
843static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
844 .prepare_fb = atmel_hlcdc_plane_prepare_fb,
845 .atomic_check = atmel_hlcdc_plane_atomic_check,
846 .atomic_update = atmel_hlcdc_plane_atomic_update,
847 .atomic_disable = atmel_hlcdc_plane_atomic_disable,
848};
849
850static void atmel_hlcdc_plane_reset(struct drm_plane *p)
851{
852 struct atmel_hlcdc_plane_state *state;
853
854 if (p->state) {
855 state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
856
857 if (state->base.fb)
858 drm_framebuffer_unreference(state->base.fb);
859
860 kfree(state);
861 p->state = NULL;
862 }
863
864 state = kzalloc(sizeof(*state), GFP_KERNEL);
865 if (state) {
866 state->alpha = 255;
867 p->state = &state->base;
868 p->state->plane = p;
869 }
870}
871
872static struct drm_plane_state *
873atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p)
874{
875 struct atmel_hlcdc_plane_state *state =
876 drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
877 struct atmel_hlcdc_plane_state *copy;
878
879 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
880 if (!copy)
881 return NULL;
882
883 copy->disc_updated = false;
884
885 if (copy->base.fb)
886 drm_framebuffer_reference(copy->base.fb);
887
888 return &copy->base;
889}
890
891static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *plane,
892 struct drm_plane_state *s)
893{
894 struct atmel_hlcdc_plane_state *state =
895 drm_plane_state_to_atmel_hlcdc_plane_state(s);
896
897 if (s->fb)
898 drm_framebuffer_unreference(s->fb);
899
900 kfree(state);
901}
902
720static struct drm_plane_funcs layer_plane_funcs = { 903static struct drm_plane_funcs layer_plane_funcs = {
721 .update_plane = atmel_hlcdc_plane_update, 904 .update_plane = drm_atomic_helper_update_plane,
722 .disable_plane = atmel_hlcdc_plane_disable, 905 .disable_plane = drm_atomic_helper_disable_plane,
723 .set_property = atmel_hlcdc_plane_set_property, 906 .set_property = drm_atomic_helper_plane_set_property,
724 .destroy = atmel_hlcdc_plane_destroy, 907 .destroy = atmel_hlcdc_plane_destroy,
908 .reset = atmel_hlcdc_plane_reset,
909 .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
910 .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
911 .atomic_set_property = atmel_hlcdc_plane_atomic_set_property,
912 .atomic_get_property = atmel_hlcdc_plane_atomic_get_property,
725}; 913};
726 914
727static struct atmel_hlcdc_plane * 915static struct atmel_hlcdc_plane *
@@ -755,6 +943,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
755 if (ret) 943 if (ret)
756 return ERR_PTR(ret); 944 return ERR_PTR(ret);
757 945
946 drm_plane_helper_add(&plane->base,
947 &atmel_hlcdc_layer_plane_helper_funcs);
948
758 /* Set default property values*/ 949 /* Set default property values*/
759 atmel_hlcdc_plane_init_properties(plane, desc, props); 950 atmel_hlcdc_plane_init_properties(plane, desc, props);
760 951
@@ -774,12 +965,13 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
774 if (!props->alpha) 965 if (!props->alpha)
775 return ERR_PTR(-ENOMEM); 966 return ERR_PTR(-ENOMEM);
776 967
777 props->rotation = drm_mode_create_rotation_property(dev, 968 dev->mode_config.rotation_property =
778 BIT(DRM_ROTATE_0) | 969 drm_mode_create_rotation_property(dev,
779 BIT(DRM_ROTATE_90) | 970 BIT(DRM_ROTATE_0) |
780 BIT(DRM_ROTATE_180) | 971 BIT(DRM_ROTATE_90) |
781 BIT(DRM_ROTATE_270)); 972 BIT(DRM_ROTATE_180) |
782 if (!props->rotation) 973 BIT(DRM_ROTATE_270));
974 if (!dev->mode_config.rotation_property)
783 return ERR_PTR(-ENOMEM); 975 return ERR_PTR(-ENOMEM);
784 976
785 return props; 977 return props;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c2e9c5283136..321e098ddf04 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -92,7 +92,7 @@ drm_atomic_state_alloc(struct drm_device *dev)
92 92
93 state->dev = dev; 93 state->dev = dev;
94 94
95 DRM_DEBUG_KMS("Allocate atomic state %p\n", state); 95 DRM_DEBUG_ATOMIC("Allocate atomic state %p\n", state);
96 96
97 return state; 97 return state;
98fail: 98fail:
@@ -122,7 +122,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
122 struct drm_mode_config *config = &dev->mode_config; 122 struct drm_mode_config *config = &dev->mode_config;
123 int i; 123 int i;
124 124
125 DRM_DEBUG_KMS("Clearing atomic state %p\n", state); 125 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
126 126
127 for (i = 0; i < state->num_connector; i++) { 127 for (i = 0; i < state->num_connector; i++) {
128 struct drm_connector *connector = state->connectors[i]; 128 struct drm_connector *connector = state->connectors[i];
@@ -172,7 +172,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state)
172{ 172{
173 drm_atomic_state_clear(state); 173 drm_atomic_state_clear(state);
174 174
175 DRM_DEBUG_KMS("Freeing atomic state %p\n", state); 175 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
176 176
177 kfree_state(state); 177 kfree_state(state);
178} 178}
@@ -217,8 +217,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
217 state->crtcs[index] = crtc; 217 state->crtcs[index] = crtc;
218 crtc_state->state = state; 218 crtc_state->state = state;
219 219
220 DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n", 220 DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
221 crtc->base.id, crtc_state, state); 221 crtc->base.id, crtc_state, state);
222 222
223 return crtc_state; 223 return crtc_state;
224} 224}
@@ -293,8 +293,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
293 */ 293 */
294 294
295 if (state->active && !state->enable) { 295 if (state->active && !state->enable) {
296 DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n", 296 DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
297 crtc->base.id); 297 crtc->base.id);
298 return -EINVAL; 298 return -EINVAL;
299 } 299 }
300 300
@@ -340,8 +340,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
340 state->planes[index] = plane; 340 state->planes[index] = plane;
341 plane_state->state = state; 341 plane_state->state = state;
342 342
343 DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n", 343 DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
344 plane->base.id, plane_state, state); 344 plane->base.id, plane_state, state);
345 345
346 if (plane_state->crtc) { 346 if (plane_state->crtc) {
347 struct drm_crtc_state *crtc_state; 347 struct drm_crtc_state *crtc_state;
@@ -477,10 +477,10 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
477 477
478 /* either *both* CRTC and FB must be set, or neither */ 478 /* either *both* CRTC and FB must be set, or neither */
479 if (WARN_ON(state->crtc && !state->fb)) { 479 if (WARN_ON(state->crtc && !state->fb)) {
480 DRM_DEBUG_KMS("CRTC set but no FB\n"); 480 DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
481 return -EINVAL; 481 return -EINVAL;
482 } else if (WARN_ON(state->fb && !state->crtc)) { 482 } else if (WARN_ON(state->fb && !state->crtc)) {
483 DRM_DEBUG_KMS("FB set but no CRTC\n"); 483 DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
484 return -EINVAL; 484 return -EINVAL;
485 } 485 }
486 486
@@ -490,7 +490,7 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
490 490
491 /* Check whether this plane is usable on this CRTC */ 491 /* Check whether this plane is usable on this CRTC */
492 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 492 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
493 DRM_DEBUG_KMS("Invalid crtc for plane\n"); 493 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
494 return -EINVAL; 494 return -EINVAL;
495 } 495 }
496 496
@@ -499,8 +499,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
499 if (state->fb->pixel_format == plane->format_types[i]) 499 if (state->fb->pixel_format == plane->format_types[i])
500 break; 500 break;
501 if (i == plane->format_count) { 501 if (i == plane->format_count) {
502 DRM_DEBUG_KMS("Invalid pixel format %s\n", 502 DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
503 drm_get_format_name(state->fb->pixel_format)); 503 drm_get_format_name(state->fb->pixel_format));
504 return -EINVAL; 504 return -EINVAL;
505 } 505 }
506 506
@@ -509,9 +509,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
509 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 509 state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
510 state->crtc_h > INT_MAX || 510 state->crtc_h > INT_MAX ||
511 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 511 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
512 DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", 512 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
513 state->crtc_w, state->crtc_h, 513 state->crtc_w, state->crtc_h,
514 state->crtc_x, state->crtc_y); 514 state->crtc_x, state->crtc_y);
515 return -ERANGE; 515 return -ERANGE;
516 } 516 }
517 517
@@ -523,12 +523,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
523 state->src_x > fb_width - state->src_w || 523 state->src_x > fb_width - state->src_w ||
524 state->src_h > fb_height || 524 state->src_h > fb_height ||
525 state->src_y > fb_height - state->src_h) { 525 state->src_y > fb_height - state->src_h) {
526 DRM_DEBUG_KMS("Invalid source coordinates " 526 DRM_DEBUG_ATOMIC("Invalid source coordinates "
527 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", 527 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
528 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 528 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
529 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 529 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
530 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 530 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
531 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10); 531 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
532 return -ENOSPC; 532 return -ENOSPC;
533 } 533 }
534 534
@@ -575,7 +575,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
575 * at most the array is a bit too large. 575 * at most the array is a bit too large.
576 */ 576 */
577 if (index >= state->num_connector) { 577 if (index >= state->num_connector) {
578 DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n"); 578 DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
579 return ERR_PTR(-EAGAIN); 579 return ERR_PTR(-EAGAIN);
580 } 580 }
581 581
@@ -590,8 +590,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
590 state->connectors[index] = connector; 590 state->connectors[index] = connector;
591 connector_state->state = state; 591 connector_state->state = state;
592 592
593 DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n", 593 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
594 connector->base.id, connector_state, state); 594 connector->base.id, connector_state, state);
595 595
596 if (connector_state->crtc) { 596 if (connector_state->crtc) {
597 struct drm_crtc_state *crtc_state; 597 struct drm_crtc_state *crtc_state;
@@ -752,10 +752,11 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
752 } 752 }
753 753
754 if (crtc) 754 if (crtc)
755 DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n", 755 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
756 plane_state, crtc->base.id); 756 plane_state, crtc->base.id);
757 else 757 else
758 DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state); 758 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
759 plane_state);
759 760
760 return 0; 761 return 0;
761} 762}
@@ -782,10 +783,11 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
782 plane_state->fb = fb; 783 plane_state->fb = fb;
783 784
784 if (fb) 785 if (fb)
785 DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n", 786 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
786 fb->base.id, plane_state); 787 fb->base.id, plane_state);
787 else 788 else
788 DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state); 789 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
790 plane_state);
789} 791}
790EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 792EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
791 793
@@ -818,11 +820,11 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
818 conn_state->crtc = crtc; 820 conn_state->crtc = crtc;
819 821
820 if (crtc) 822 if (crtc)
821 DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n", 823 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
822 conn_state, crtc->base.id); 824 conn_state, crtc->base.id);
823 else 825 else
824 DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n", 826 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
825 conn_state); 827 conn_state);
826 828
827 return 0; 829 return 0;
828} 830}
@@ -858,8 +860,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
858 if (ret) 860 if (ret)
859 return ret; 861 return ret;
860 862
861 DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n", 863 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
862 crtc->base.id, state); 864 crtc->base.id, state);
863 865
864 /* 866 /*
865 * Changed connectors are already in @state, so only need to look at the 867 * Changed connectors are already in @state, so only need to look at the
@@ -901,8 +903,8 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
901 num_connected_connectors++; 903 num_connected_connectors++;
902 } 904 }
903 905
904 DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n", 906 DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
905 state, num_connected_connectors, crtc->base.id); 907 state, num_connected_connectors, crtc->base.id);
906 908
907 return num_connected_connectors; 909 return num_connected_connectors;
908} 910}
@@ -953,7 +955,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
953 int ncrtcs = config->num_crtc; 955 int ncrtcs = config->num_crtc;
954 int i, ret = 0; 956 int i, ret = 0;
955 957
956 DRM_DEBUG_KMS("checking %p\n", state); 958 DRM_DEBUG_ATOMIC("checking %p\n", state);
957 959
958 for (i = 0; i < nplanes; i++) { 960 for (i = 0; i < nplanes; i++) {
959 struct drm_plane *plane = state->planes[i]; 961 struct drm_plane *plane = state->planes[i];
@@ -963,8 +965,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
963 965
964 ret = drm_atomic_plane_check(plane, state->plane_states[i]); 966 ret = drm_atomic_plane_check(plane, state->plane_states[i]);
965 if (ret) { 967 if (ret) {
966 DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n", 968 DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
967 plane->base.id); 969 plane->base.id);
968 return ret; 970 return ret;
969 } 971 }
970 } 972 }
@@ -977,8 +979,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
977 979
978 ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]); 980 ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]);
979 if (ret) { 981 if (ret) {
980 DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n", 982 DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
981 crtc->base.id); 983 crtc->base.id);
982 return ret; 984 return ret;
983 } 985 }
984 } 986 }
@@ -996,8 +998,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
996 998
997 if (crtc_state->mode_changed || 999 if (crtc_state->mode_changed ||
998 crtc_state->active_changed) { 1000 crtc_state->active_changed) {
999 DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n", 1001 DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
1000 crtc->base.id); 1002 crtc->base.id);
1001 return -EINVAL; 1003 return -EINVAL;
1002 } 1004 }
1003 } 1005 }
@@ -1032,7 +1034,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
1032 if (ret) 1034 if (ret)
1033 return ret; 1035 return ret;
1034 1036
1035 DRM_DEBUG_KMS("commiting %p\n", state); 1037 DRM_DEBUG_ATOMIC("commiting %p\n", state);
1036 1038
1037 return config->funcs->atomic_commit(state->dev, state, false); 1039 return config->funcs->atomic_commit(state->dev, state, false);
1038} 1040}
@@ -1063,7 +1065,7 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
1063 if (ret) 1065 if (ret)
1064 return ret; 1066 return ret;
1065 1067
1066 DRM_DEBUG_KMS("commiting %p asynchronously\n", state); 1068 DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
1067 1069
1068 return config->funcs->atomic_commit(state->dev, state, true); 1070 return config->funcs->atomic_commit(state->dev, state, true);
1069} 1071}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7e3a52b97c7d..7715c40d4e74 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -116,9 +116,9 @@ steal_encoder(struct drm_atomic_state *state,
116 */ 116 */
117 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 117 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
118 118
119 DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", 119 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
120 encoder->base.id, encoder->name, 120 encoder->base.id, encoder->name,
121 encoder_crtc->base.id); 121 encoder_crtc->base.id);
122 122
123 crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); 123 crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
124 if (IS_ERR(crtc_state)) 124 if (IS_ERR(crtc_state))
@@ -130,9 +130,9 @@ steal_encoder(struct drm_atomic_state *state,
130 if (connector->state->best_encoder != encoder) 130 if (connector->state->best_encoder != encoder)
131 continue; 131 continue;
132 132
133 DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n", 133 DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
134 connector->base.id, 134 connector->base.id,
135 connector->name); 135 connector->name);
136 136
137 connector_state = drm_atomic_get_connector_state(state, 137 connector_state = drm_atomic_get_connector_state(state,
138 connector); 138 connector);
@@ -165,9 +165,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
165 if (!connector) 165 if (!connector)
166 return 0; 166 return 0;
167 167
168 DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n", 168 DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
169 connector->base.id, 169 connector->base.id,
170 connector->name); 170 connector->name);
171 171
172 if (connector->state->crtc != connector_state->crtc) { 172 if (connector->state->crtc != connector_state->crtc) {
173 if (connector->state->crtc) { 173 if (connector->state->crtc) {
@@ -186,7 +186,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
186 } 186 }
187 187
188 if (!connector_state->crtc) { 188 if (!connector_state->crtc) {
189 DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n", 189 DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
190 connector->base.id, 190 connector->base.id,
191 connector->name); 191 connector->name);
192 192
@@ -199,19 +199,19 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
199 new_encoder = funcs->best_encoder(connector); 199 new_encoder = funcs->best_encoder(connector);
200 200
201 if (!new_encoder) { 201 if (!new_encoder) {
202 DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n", 202 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
203 connector->base.id, 203 connector->base.id,
204 connector->name); 204 connector->name);
205 return -EINVAL; 205 return -EINVAL;
206 } 206 }
207 207
208 if (new_encoder == connector_state->best_encoder) { 208 if (new_encoder == connector_state->best_encoder) {
209 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", 209 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
210 connector->base.id, 210 connector->base.id,
211 connector->name, 211 connector->name,
212 new_encoder->base.id, 212 new_encoder->base.id,
213 new_encoder->name, 213 new_encoder->name,
214 connector_state->crtc->base.id); 214 connector_state->crtc->base.id);
215 215
216 return 0; 216 return 0;
217 } 217 }
@@ -222,9 +222,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
222 if (encoder_crtc) { 222 if (encoder_crtc) {
223 ret = steal_encoder(state, new_encoder, encoder_crtc); 223 ret = steal_encoder(state, new_encoder, encoder_crtc);
224 if (ret) { 224 if (ret) {
225 DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n", 225 DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
226 connector->base.id, 226 connector->base.id,
227 connector->name); 227 connector->name);
228 return ret; 228 return ret;
229 } 229 }
230 } 230 }
@@ -235,12 +235,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
235 crtc_state = state->crtc_states[idx]; 235 crtc_state = state->crtc_states[idx];
236 crtc_state->mode_changed = true; 236 crtc_state->mode_changed = true;
237 237
238 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", 238 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
239 connector->base.id, 239 connector->base.id,
240 connector->name, 240 connector->name,
241 new_encoder->base.id, 241 new_encoder->base.id,
242 new_encoder->name, 242 new_encoder->name,
243 connector_state->crtc->base.id); 243 connector_state->crtc->base.id);
244 244
245 return 0; 245 return 0;
246} 246}
@@ -292,7 +292,7 @@ mode_fixup(struct drm_atomic_state *state)
292 encoder->bridge, &crtc_state->mode, 292 encoder->bridge, &crtc_state->mode,
293 &crtc_state->adjusted_mode); 293 &crtc_state->adjusted_mode);
294 if (!ret) { 294 if (!ret) {
295 DRM_DEBUG_KMS("Bridge fixup failed\n"); 295 DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
296 return -EINVAL; 296 return -EINVAL;
297 } 297 }
298 } 298 }
@@ -301,16 +301,16 @@ mode_fixup(struct drm_atomic_state *state)
301 ret = funcs->atomic_check(encoder, crtc_state, 301 ret = funcs->atomic_check(encoder, crtc_state,
302 conn_state); 302 conn_state);
303 if (ret) { 303 if (ret) {
304 DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n", 304 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
305 encoder->base.id, encoder->name); 305 encoder->base.id, encoder->name);
306 return ret; 306 return ret;
307 } 307 }
308 } else { 308 } else {
309 ret = funcs->mode_fixup(encoder, &crtc_state->mode, 309 ret = funcs->mode_fixup(encoder, &crtc_state->mode,
310 &crtc_state->adjusted_mode); 310 &crtc_state->adjusted_mode);
311 if (!ret) { 311 if (!ret) {
312 DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n", 312 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
313 encoder->base.id, encoder->name); 313 encoder->base.id, encoder->name);
314 return -EINVAL; 314 return -EINVAL;
315 } 315 }
316 } 316 }
@@ -330,8 +330,8 @@ mode_fixup(struct drm_atomic_state *state)
330 ret = funcs->mode_fixup(crtc, &crtc_state->mode, 330 ret = funcs->mode_fixup(crtc, &crtc_state->mode,
331 &crtc_state->adjusted_mode); 331 &crtc_state->adjusted_mode);
332 if (!ret) { 332 if (!ret) {
333 DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n", 333 DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n",
334 crtc->base.id); 334 crtc->base.id);
335 return -EINVAL; 335 return -EINVAL;
336 } 336 }
337 } 337 }
@@ -384,14 +384,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
384 continue; 384 continue;
385 385
386 if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { 386 if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
387 DRM_DEBUG_KMS("[CRTC:%d] mode changed\n", 387 DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n",
388 crtc->base.id); 388 crtc->base.id);
389 crtc_state->mode_changed = true; 389 crtc_state->mode_changed = true;
390 } 390 }
391 391
392 if (crtc->state->enable != crtc_state->enable) { 392 if (crtc->state->enable != crtc_state->enable) {
393 DRM_DEBUG_KMS("[CRTC:%d] enable changed\n", 393 DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
394 crtc->base.id); 394 crtc->base.id);
395 crtc_state->mode_changed = true; 395 crtc_state->mode_changed = true;
396 } 396 }
397 } 397 }
@@ -428,17 +428,17 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
428 * a full modeset because update_connector_routing force that. 428 * a full modeset because update_connector_routing force that.
429 */ 429 */
430 if (crtc->state->active != crtc_state->active) { 430 if (crtc->state->active != crtc_state->active) {
431 DRM_DEBUG_KMS("[CRTC:%d] active changed\n", 431 DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n",
432 crtc->base.id); 432 crtc->base.id);
433 crtc_state->active_changed = true; 433 crtc_state->active_changed = true;
434 } 434 }
435 435
436 if (!needs_modeset(crtc_state)) 436 if (!needs_modeset(crtc_state))
437 continue; 437 continue;
438 438
439 DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", 439 DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
440 crtc->base.id, 440 crtc->base.id,
441 crtc_state->enable ? 'y' : 'n', 441 crtc_state->enable ? 'y' : 'n',
442 crtc_state->active ? 'y' : 'n'); 442 crtc_state->active ? 'y' : 'n');
443 443
444 ret = drm_atomic_add_affected_connectors(state, crtc); 444 ret = drm_atomic_add_affected_connectors(state, crtc);
@@ -449,8 +449,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
449 crtc); 449 crtc);
450 450
451 if (crtc_state->enable != !!num_connectors) { 451 if (crtc_state->enable != !!num_connectors) {
452 DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n", 452 DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
453 crtc->base.id); 453 crtc->base.id);
454 454
455 return -EINVAL; 455 return -EINVAL;
456 } 456 }
@@ -497,8 +497,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
497 497
498 ret = funcs->atomic_check(plane, plane_state); 498 ret = funcs->atomic_check(plane, plane_state);
499 if (ret) { 499 if (ret) {
500 DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n", 500 DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n",
501 plane->base.id); 501 plane->base.id);
502 return ret; 502 return ret;
503 } 503 }
504 } 504 }
@@ -517,8 +517,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
517 517
518 ret = funcs->atomic_check(crtc, state->crtc_states[i]); 518 ret = funcs->atomic_check(crtc, state->crtc_states[i]);
519 if (ret) { 519 if (ret) {
520 DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n", 520 DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n",
521 crtc->base.id); 521 crtc->base.id);
522 return ret; 522 return ret;
523 } 523 }
524 } 524 }
@@ -600,8 +600,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
600 600
601 funcs = encoder->helper_private; 601 funcs = encoder->helper_private;
602 602
603 DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n", 603 DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
604 encoder->base.id, encoder->name); 604 encoder->base.id, encoder->name);
605 605
606 /* 606 /*
607 * Each encoder has at most one connector (since we always steal 607 * Each encoder has at most one connector (since we always steal
@@ -639,8 +639,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
639 639
640 funcs = crtc->helper_private; 640 funcs = crtc->helper_private;
641 641
642 DRM_DEBUG_KMS("disabling [CRTC:%d]\n", 642 DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
643 crtc->base.id); 643 crtc->base.id);
644 644
645 645
646 /* Right function depends upon target state. */ 646 /* Right function depends upon target state. */
@@ -723,9 +723,9 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
723 723
724 funcs = crtc->helper_private; 724 funcs = crtc->helper_private;
725 725
726 if (crtc->state->enable) { 726 if (crtc->state->enable && funcs->mode_set_nofb) {
727 DRM_DEBUG_KMS("modeset on [CRTC:%d]\n", 727 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
728 crtc->base.id); 728 crtc->base.id);
729 729
730 funcs->mode_set_nofb(crtc); 730 funcs->mode_set_nofb(crtc);
731 } 731 }
@@ -752,14 +752,15 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
752 if (!new_crtc_state->mode_changed) 752 if (!new_crtc_state->mode_changed)
753 continue; 753 continue;
754 754
755 DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n", 755 DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
756 encoder->base.id, encoder->name); 756 encoder->base.id, encoder->name);
757 757
758 /* 758 /*
759 * Each encoder has at most one connector (since we always steal 759 * Each encoder has at most one connector (since we always steal
760 * it away), so we won't call call mode_set hooks twice. 760 * it away), so we won't call call mode_set hooks twice.
761 */ 761 */
762 funcs->mode_set(encoder, mode, adjusted_mode); 762 if (funcs->mode_set)
763 funcs->mode_set(encoder, mode, adjusted_mode);
763 764
764 if (encoder->bridge && encoder->bridge->funcs->mode_set) 765 if (encoder->bridge && encoder->bridge->funcs->mode_set)
765 encoder->bridge->funcs->mode_set(encoder->bridge, 766 encoder->bridge->funcs->mode_set(encoder->bridge,
@@ -768,34 +769,44 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
768} 769}
769 770
770/** 771/**
771 * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates 772 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
772 * @dev: DRM device 773 * @dev: DRM device
773 * @state: atomic state 774 * @old_state: atomic state object with old state structures
774 * 775 *
775 * This function commits the modeset changes that need to be committed before 776 * This function shuts down all the outputs that need to be shut down and
776 * updating planes. It shuts down all the outputs that need to be shut down and
777 * prepares them (if required) with the new mode. 777 * prepares them (if required) with the new mode.
778 *
779 * For compatability with legacy crtc helpers this should be called before
780 * drm_atomic_helper_commit_planes(), which is what the default commit function
781 * does. But drivers with different needs can group the modeset commits together
782 * and do the plane commits at the end. This is useful for drivers doing runtime
783 * PM since planes updates then only happen when the CRTC is actually enabled.
778 */ 784 */
779void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, 785void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
780 struct drm_atomic_state *state) 786 struct drm_atomic_state *old_state)
781{ 787{
782 disable_outputs(dev, state); 788 disable_outputs(dev, old_state);
783 set_routing_links(dev, state); 789 set_routing_links(dev, old_state);
784 crtc_set_mode(dev, state); 790 crtc_set_mode(dev, old_state);
785} 791}
786EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes); 792EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
787 793
788/** 794/**
789 * drm_atomic_helper_commit_post_planes - modeset commit after plane updates 795 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
790 * @dev: DRM device 796 * @dev: DRM device
791 * @old_state: atomic state object with old state structures 797 * @old_state: atomic state object with old state structures
792 * 798 *
793 * This function commits the modeset changes that need to be committed after 799 * This function enables all the outputs with the new configuration which had to
794 * updating planes: It enables all the outputs with the new configuration which 800 * be turned off for the update.
795 * had to be turned off for the update. 801 *
802 * For compatability with legacy crtc helpers this should be called after
803 * drm_atomic_helper_commit_planes(), which is what the default commit function
804 * does. But drivers with different needs can group the modeset commits together
805 * and do the plane commits at the end. This is useful for drivers doing runtime
806 * PM since planes updates then only happen when the CRTC is actually enabled.
796 */ 807 */
797void drm_atomic_helper_commit_post_planes(struct drm_device *dev, 808void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
798 struct drm_atomic_state *old_state) 809 struct drm_atomic_state *old_state)
799{ 810{
800 int ncrtcs = old_state->dev->mode_config.num_crtc; 811 int ncrtcs = old_state->dev->mode_config.num_crtc;
801 int i; 812 int i;
@@ -816,8 +827,8 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
816 funcs = crtc->helper_private; 827 funcs = crtc->helper_private;
817 828
818 if (crtc->state->enable) { 829 if (crtc->state->enable) {
819 DRM_DEBUG_KMS("enabling [CRTC:%d]\n", 830 DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
820 crtc->base.id); 831 crtc->base.id);
821 832
822 if (funcs->enable) 833 if (funcs->enable)
823 funcs->enable(crtc); 834 funcs->enable(crtc);
@@ -842,8 +853,8 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
842 encoder = connector->state->best_encoder; 853 encoder = connector->state->best_encoder;
843 funcs = encoder->helper_private; 854 funcs = encoder->helper_private;
844 855
845 DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n", 856 DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
846 encoder->base.id, encoder->name); 857 encoder->base.id, encoder->name);
847 858
848 /* 859 /*
849 * Each encoder has at most one connector (since we always steal 860 * Each encoder has at most one connector (since we always steal
@@ -861,7 +872,7 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
861 encoder->bridge->funcs->enable(encoder->bridge); 872 encoder->bridge->funcs->enable(encoder->bridge);
862 } 873 }
863} 874}
864EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes); 875EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
865 876
866static void wait_for_fences(struct drm_device *dev, 877static void wait_for_fences(struct drm_device *dev,
867 struct drm_atomic_state *state) 878 struct drm_atomic_state *state)
@@ -1030,11 +1041,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1030 1041
1031 wait_for_fences(dev, state); 1042 wait_for_fences(dev, state);
1032 1043
1033 drm_atomic_helper_commit_pre_planes(dev, state); 1044 drm_atomic_helper_commit_modeset_disables(dev, state);
1034 1045
1035 drm_atomic_helper_commit_planes(dev, state); 1046 drm_atomic_helper_commit_planes(dev, state);
1036 1047
1037 drm_atomic_helper_commit_post_planes(dev, state); 1048 drm_atomic_helper_commit_modeset_enables(dev, state);
1038 1049
1039 drm_atomic_helper_wait_for_vblanks(dev, state); 1050 drm_atomic_helper_wait_for_vblanks(dev, state);
1040 1051
@@ -1105,6 +1116,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1105 for (i = 0; i < nplanes; i++) { 1116 for (i = 0; i < nplanes; i++) {
1106 struct drm_plane_helper_funcs *funcs; 1117 struct drm_plane_helper_funcs *funcs;
1107 struct drm_plane *plane = state->planes[i]; 1118 struct drm_plane *plane = state->planes[i];
1119 struct drm_plane_state *plane_state = state->plane_states[i];
1108 struct drm_framebuffer *fb; 1120 struct drm_framebuffer *fb;
1109 1121
1110 if (!plane) 1122 if (!plane)
@@ -1112,10 +1124,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1112 1124
1113 funcs = plane->helper_private; 1125 funcs = plane->helper_private;
1114 1126
1115 fb = state->plane_states[i]->fb; 1127 fb = plane_state->fb;
1116 1128
1117 if (fb && funcs->prepare_fb) { 1129 if (fb && funcs->prepare_fb) {
1118 ret = funcs->prepare_fb(plane, fb); 1130 ret = funcs->prepare_fb(plane, fb, plane_state);
1119 if (ret) 1131 if (ret)
1120 goto fail; 1132 goto fail;
1121 } 1133 }
@@ -1127,6 +1139,7 @@ fail:
1127 for (i--; i >= 0; i--) { 1139 for (i--; i >= 0; i--) {
1128 struct drm_plane_helper_funcs *funcs; 1140 struct drm_plane_helper_funcs *funcs;
1129 struct drm_plane *plane = state->planes[i]; 1141 struct drm_plane *plane = state->planes[i];
1142 struct drm_plane_state *plane_state = state->plane_states[i];
1130 struct drm_framebuffer *fb; 1143 struct drm_framebuffer *fb;
1131 1144
1132 if (!plane) 1145 if (!plane)
@@ -1137,7 +1150,7 @@ fail:
1137 fb = state->plane_states[i]->fb; 1150 fb = state->plane_states[i]->fb;
1138 1151
1139 if (fb && funcs->cleanup_fb) 1152 if (fb && funcs->cleanup_fb)
1140 funcs->cleanup_fb(plane, fb); 1153 funcs->cleanup_fb(plane, fb, plane_state);
1141 1154
1142 } 1155 }
1143 1156
@@ -1243,6 +1256,7 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
1243 for (i = 0; i < nplanes; i++) { 1256 for (i = 0; i < nplanes; i++) {
1244 struct drm_plane_helper_funcs *funcs; 1257 struct drm_plane_helper_funcs *funcs;
1245 struct drm_plane *plane = old_state->planes[i]; 1258 struct drm_plane *plane = old_state->planes[i];
1259 struct drm_plane_state *plane_state = old_state->plane_states[i];
1246 struct drm_framebuffer *old_fb; 1260 struct drm_framebuffer *old_fb;
1247 1261
1248 if (!plane) 1262 if (!plane)
@@ -1250,10 +1264,10 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
1250 1264
1251 funcs = plane->helper_private; 1265 funcs = plane->helper_private;
1252 1266
1253 old_fb = old_state->plane_states[i]->fb; 1267 old_fb = plane_state->fb;
1254 1268
1255 if (old_fb && funcs->cleanup_fb) 1269 if (old_fb && funcs->cleanup_fb)
1256 funcs->cleanup_fb(plane, old_fb); 1270 funcs->cleanup_fb(plane, old_fb, plane_state);
1257 } 1271 }
1258} 1272}
1259EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); 1273EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
@@ -1678,12 +1692,13 @@ backoff:
1678EXPORT_SYMBOL(drm_atomic_helper_set_config); 1692EXPORT_SYMBOL(drm_atomic_helper_set_config);
1679 1693
1680/** 1694/**
1681 * drm_atomic_helper_crtc_set_property - helper for crtc prorties 1695 * drm_atomic_helper_crtc_set_property - helper for crtc properties
1682 * @crtc: DRM crtc 1696 * @crtc: DRM crtc
1683 * @property: DRM property 1697 * @property: DRM property
1684 * @val: value of property 1698 * @val: value of property
1685 * 1699 *
1686 * Provides a default plane disablle handler using the atomic driver interface. 1700 * Provides a default crtc set_property handler using the atomic driver
1701 * interface.
1687 * 1702 *
1688 * RETURNS: 1703 * RETURNS:
1689 * Zero on success, error code on failure 1704 * Zero on success, error code on failure
@@ -1737,12 +1752,13 @@ backoff:
1737EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property); 1752EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
1738 1753
1739/** 1754/**
1740 * drm_atomic_helper_plane_set_property - helper for plane prorties 1755 * drm_atomic_helper_plane_set_property - helper for plane properties
1741 * @plane: DRM plane 1756 * @plane: DRM plane
1742 * @property: DRM property 1757 * @property: DRM property
1743 * @val: value of property 1758 * @val: value of property
1744 * 1759 *
1745 * Provides a default plane disable handler using the atomic driver interface. 1760 * Provides a default plane set_property handler using the atomic driver
1761 * interface.
1746 * 1762 *
1747 * RETURNS: 1763 * RETURNS:
1748 * Zero on success, error code on failure 1764 * Zero on success, error code on failure
@@ -1796,12 +1812,13 @@ backoff:
1796EXPORT_SYMBOL(drm_atomic_helper_plane_set_property); 1812EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
1797 1813
1798/** 1814/**
1799 * drm_atomic_helper_connector_set_property - helper for connector prorties 1815 * drm_atomic_helper_connector_set_property - helper for connector properties
1800 * @connector: DRM connector 1816 * @connector: DRM connector
1801 * @property: DRM property 1817 * @property: DRM property
1802 * @val: value of property 1818 * @val: value of property
1803 * 1819 *
1804 * Provides a default plane disablle handler using the atomic driver interface. 1820 * Provides a default connector set_property handler using the atomic driver
1821 * interface.
1805 * 1822 *
1806 * RETURNS: 1823 * RETURNS:
1807 * Zero on success, error code on failure 1824 * Zero on success, error code on failure
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b6b07ff720b..927f3445ff38 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2009,21 +2009,32 @@ int drm_mode_getcrtc(struct drm_device *dev,
2009 return -ENOENT; 2009 return -ENOENT;
2010 2010
2011 drm_modeset_lock_crtc(crtc, crtc->primary); 2011 drm_modeset_lock_crtc(crtc, crtc->primary);
2012 crtc_resp->x = crtc->x;
2013 crtc_resp->y = crtc->y;
2014 crtc_resp->gamma_size = crtc->gamma_size; 2012 crtc_resp->gamma_size = crtc->gamma_size;
2015 if (crtc->primary->fb) 2013 if (crtc->primary->fb)
2016 crtc_resp->fb_id = crtc->primary->fb->base.id; 2014 crtc_resp->fb_id = crtc->primary->fb->base.id;
2017 else 2015 else
2018 crtc_resp->fb_id = 0; 2016 crtc_resp->fb_id = 0;
2019 2017
2020 if (crtc->enabled) { 2018 if (crtc->state) {
2021 2019 crtc_resp->x = crtc->primary->state->src_x >> 16;
2022 drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); 2020 crtc_resp->y = crtc->primary->state->src_y >> 16;
2023 crtc_resp->mode_valid = 1; 2021 if (crtc->state->enable) {
2022 drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
2023 crtc_resp->mode_valid = 1;
2024 2024
2025 } else {
2026 crtc_resp->mode_valid = 0;
2027 }
2025 } else { 2028 } else {
2026 crtc_resp->mode_valid = 0; 2029 crtc_resp->x = crtc->x;
2030 crtc_resp->y = crtc->y;
2031 if (crtc->enabled) {
2032 drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
2033 crtc_resp->mode_valid = 1;
2034
2035 } else {
2036 crtc_resp->mode_valid = 0;
2037 }
2027 } 2038 }
2028 drm_modeset_unlock_crtc(crtc); 2039 drm_modeset_unlock_crtc(crtc);
2029 2040
@@ -3262,6 +3273,12 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
3262 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); 3273 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
3263 return -EINVAL; 3274 return -EINVAL;
3264 } 3275 }
3276
3277 if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
3278 DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
3279 r->modifier[i], i);
3280 return -EINVAL;
3281 }
3265 } 3282 }
3266 3283
3267 return 0; 3284 return 0;
@@ -3275,7 +3292,7 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3275 struct drm_framebuffer *fb; 3292 struct drm_framebuffer *fb;
3276 int ret; 3293 int ret;
3277 3294
3278 if (r->flags & ~DRM_MODE_FB_INTERLACED) { 3295 if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
3279 DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); 3296 DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
3280 return ERR_PTR(-EINVAL); 3297 return ERR_PTR(-EINVAL);
3281 } 3298 }
@@ -3291,6 +3308,12 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3291 return ERR_PTR(-EINVAL); 3308 return ERR_PTR(-EINVAL);
3292 } 3309 }
3293 3310
3311 if (r->flags & DRM_MODE_FB_MODIFIERS &&
3312 !dev->mode_config.allow_fb_modifiers) {
3313 DRM_DEBUG_KMS("driver does not support fb modifiers\n");
3314 return ERR_PTR(-EINVAL);
3315 }
3316
3294 ret = framebuffer_check(r); 3317 ret = framebuffer_check(r);
3295 if (ret) 3318 if (ret)
3296 return ERR_PTR(ret); 3319 return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index b1979e7bdc88..3053aab968f9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -837,6 +837,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
837 for (i = 0; i < 4; i++) { 837 for (i = 0; i < 4; i++) {
838 fb->pitches[i] = mode_cmd->pitches[i]; 838 fb->pitches[i] = mode_cmd->pitches[i];
839 fb->offsets[i] = mode_cmd->offsets[i]; 839 fb->offsets[i] = mode_cmd->offsets[i];
840 fb->modifier[i] = mode_cmd->modifier[i];
840 } 841 }
841 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, 842 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
842 &fb->bits_per_pixel); 843 &fb->bits_per_pixel);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 3785d66721f2..a6d773a61c2d 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -321,6 +321,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
321 else 321 else
322 req->value = 64; 322 req->value = 64;
323 break; 323 break;
324 case DRM_CAP_ADDFB2_MODIFIERS:
325 req->value = dev->mode_config.allow_fb_modifiers;
326 break;
324 default: 327 default:
325 return -EINVAL; 328 return -EINVAL;
326 } 329 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 10574a0c3a55..c8a34476570a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -276,7 +276,6 @@ static void vblank_disable_fn(unsigned long arg)
276void drm_vblank_cleanup(struct drm_device *dev) 276void drm_vblank_cleanup(struct drm_device *dev)
277{ 277{
278 int crtc; 278 int crtc;
279 unsigned long irqflags;
280 279
281 /* Bail if the driver didn't call drm_vblank_init() */ 280 /* Bail if the driver didn't call drm_vblank_init() */
282 if (dev->num_crtcs == 0) 281 if (dev->num_crtcs == 0)
@@ -285,11 +284,10 @@ void drm_vblank_cleanup(struct drm_device *dev)
285 for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 284 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
286 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 285 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
287 286
288 del_timer_sync(&vblank->disable_timer); 287 WARN_ON(vblank->enabled &&
288 drm_core_check_feature(dev, DRIVER_MODESET));
289 289
290 spin_lock_irqsave(&dev->vbl_lock, irqflags); 290 del_timer_sync(&vblank->disable_timer);
291 vblank_disable_and_save(dev, crtc);
292 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
293 } 291 }
294 292
295 kfree(dev->vblank); 293 kfree(dev->vblank);
@@ -475,17 +473,23 @@ int drm_irq_uninstall(struct drm_device *dev)
475 dev->irq_enabled = false; 473 dev->irq_enabled = false;
476 474
477 /* 475 /*
478 * Wake up any waiters so they don't hang. 476 * Wake up any waiters so they don't hang. This is just to paper over
477 * isssues for UMS drivers which aren't in full control of their
478 * vblank/irq handling. KMS drivers must ensure that vblanks are all
479 * disabled when uninstalling the irq handler.
479 */ 480 */
480 if (dev->num_crtcs) { 481 if (dev->num_crtcs) {
481 spin_lock_irqsave(&dev->vbl_lock, irqflags); 482 spin_lock_irqsave(&dev->vbl_lock, irqflags);
482 for (i = 0; i < dev->num_crtcs; i++) { 483 for (i = 0; i < dev->num_crtcs; i++) {
483 struct drm_vblank_crtc *vblank = &dev->vblank[i]; 484 struct drm_vblank_crtc *vblank = &dev->vblank[i];
484 485
486 if (!vblank->enabled)
487 continue;
488
489 WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
490
491 vblank_disable_and_save(dev, i);
485 wake_up(&vblank->queue); 492 wake_up(&vblank->queue);
486 vblank->enabled = false;
487 vblank->last =
488 dev->driver->get_vblank_counter(dev, i);
489 } 493 }
490 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 494 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
491 } 495 }
@@ -1052,7 +1056,7 @@ EXPORT_SYMBOL(drm_vblank_get);
1052 * Acquire a reference count on vblank events to avoid having them disabled 1056 * Acquire a reference count on vblank events to avoid having them disabled
1053 * while in use. 1057 * while in use.
1054 * 1058 *
1055 * This is the native kms version of drm_vblank_off(). 1059 * This is the native kms version of drm_vblank_get().
1056 * 1060 *
1057 * Returns: 1061 * Returns:
1058 * Zero on success, nonzero on failure. 1062 * Zero on success, nonzero on failure.
@@ -1233,6 +1237,38 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
1233EXPORT_SYMBOL(drm_crtc_vblank_off); 1237EXPORT_SYMBOL(drm_crtc_vblank_off);
1234 1238
1235/** 1239/**
1240 * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
1241 * @crtc: CRTC in question
1242 *
1243 * Drivers can use this function to reset the vblank state to off at load time.
1244 * Drivers should use this together with the drm_crtc_vblank_off() and
1245 * drm_crtc_vblank_on() functions. The difference compared to
1246 * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
1247 * and hence doesn't need to call any driver hooks.
1248 */
1249void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
1250{
1251 struct drm_device *dev = drm_crtc->dev;
1252 unsigned long irqflags;
1253 int crtc = drm_crtc_index(drm_crtc);
1254 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1255
1256 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1257 /*
1258 * Prevent subsequent drm_vblank_get() from enabling the vblank
1259 * interrupt by bumping the refcount.
1260 */
1261 if (!vblank->inmodeset) {
1262 atomic_inc(&vblank->refcount);
1263 vblank->inmodeset = 1;
1264 }
1265 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1266
1267 WARN_ON(!list_empty(&dev->vblank_event_list));
1268}
1269EXPORT_SYMBOL(drm_crtc_vblank_reset);
1270
1271/**
1236 * drm_vblank_on - enable vblank events on a CRTC 1272 * drm_vblank_on - enable vblank events on a CRTC
1237 * @dev: DRM device 1273 * @dev: DRM device
1238 * @crtc: CRTC in question 1274 * @crtc: CRTC in question
@@ -1653,7 +1689,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1653 struct timeval tvblank; 1689 struct timeval tvblank;
1654 unsigned long irqflags; 1690 unsigned long irqflags;
1655 1691
1656 if (!dev->num_crtcs) 1692 if (WARN_ON_ONCE(!dev->num_crtcs))
1657 return false; 1693 return false;
1658 1694
1659 if (WARN_ON(crtc >= dev->num_crtcs)) 1695 if (WARN_ON(crtc >= dev->num_crtcs))
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5ba5792bfdba..813a06627eb3 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -437,7 +437,8 @@ int drm_plane_helper_commit(struct drm_plane *plane,
437 437
438 if (plane_funcs->prepare_fb && plane_state->fb && 438 if (plane_funcs->prepare_fb && plane_state->fb &&
439 plane_state->fb != old_fb) { 439 plane_state->fb != old_fb) {
440 ret = plane_funcs->prepare_fb(plane, plane_state->fb); 440 ret = plane_funcs->prepare_fb(plane, plane_state->fb,
441 plane_state);
441 if (ret) 442 if (ret)
442 goto out; 443 goto out;
443 } 444 }
@@ -487,7 +488,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
487 } 488 }
488 489
489 if (plane_funcs->cleanup_fb && old_fb) 490 if (plane_funcs->cleanup_fb && old_fb)
490 plane_funcs->cleanup_fb(plane, old_fb); 491 plane_funcs->cleanup_fb(plane, old_fb, plane_state);
491out: 492out:
492 if (plane_state) { 493 if (plane_state) {
493 if (plane->funcs->atomic_destroy_state) 494 if (plane->funcs->atomic_destroy_state)
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a9041d1a8ff0..5febffdb027d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -25,6 +25,7 @@
25#include <drm/drm_crtc_helper.h> 25#include <drm/drm_crtc_helper.h>
26#include <drm/drm_encoder_slave.h> 26#include <drm/drm_encoder_slave.h>
27#include <drm/drm_edid.h> 27#include <drm/drm_edid.h>
28#include <drm/drm_of.h>
28#include <drm/i2c/tda998x.h> 29#include <drm/i2c/tda998x.h>
29 30
30#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 31#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
@@ -387,7 +388,7 @@ set_page(struct tda998x_priv *priv, uint16_t reg)
387 }; 388 };
388 int ret = i2c_master_send(client, buf, sizeof(buf)); 389 int ret = i2c_master_send(client, buf, sizeof(buf));
389 if (ret < 0) { 390 if (ret < 0) {
390 dev_err(&client->dev, "setpage %04x err %d\n", 391 dev_err(&client->dev, "%s %04x err %d\n", __func__,
391 reg, ret); 392 reg, ret);
392 return ret; 393 return ret;
393 } 394 }
@@ -1035,8 +1036,9 @@ tda998x_encoder_detect(struct tda998x_priv *priv)
1035 connector_status_disconnected; 1036 connector_status_disconnected;
1036} 1037}
1037 1038
1038static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk) 1039static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
1039{ 1040{
1041 struct tda998x_priv *priv = data;
1040 uint8_t offset, segptr; 1042 uint8_t offset, segptr;
1041 int ret, i; 1043 int ret, i;
1042 1044
@@ -1080,8 +1082,8 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
1080 return -ETIMEDOUT; 1082 return -ETIMEDOUT;
1081 } 1083 }
1082 1084
1083 ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH); 1085 ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
1084 if (ret != EDID_LENGTH) { 1086 if (ret != length) {
1085 dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n", 1087 dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
1086 blk, ret); 1088 blk, ret);
1087 return ret; 1089 return ret;
@@ -1090,82 +1092,31 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
1090 return 0; 1092 return 0;
1091} 1093}
1092 1094
1093static uint8_t *do_get_edid(struct tda998x_priv *priv) 1095static int
1096tda998x_encoder_get_modes(struct tda998x_priv *priv,
1097 struct drm_connector *connector)
1094{ 1098{
1095 int j, valid_extensions = 0; 1099 struct edid *edid;
1096 uint8_t *block, *new; 1100 int n;
1097 bool print_bad_edid = drm_debug & DRM_UT_KMS;
1098
1099 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
1100 return NULL;
1101 1101
1102 if (priv->rev == TDA19988) 1102 if (priv->rev == TDA19988)
1103 reg_clear(priv, REG_TX4, TX4_PD_RAM); 1103 reg_clear(priv, REG_TX4, TX4_PD_RAM);
1104 1104
1105 /* base block fetch */ 1105 edid = drm_do_get_edid(connector, read_edid_block, priv);
1106 if (read_edid_block(priv, block, 0))
1107 goto fail;
1108
1109 if (!drm_edid_block_valid(block, 0, print_bad_edid))
1110 goto fail;
1111
1112 /* if there's no extensions, we're done */
1113 if (block[0x7e] == 0)
1114 goto done;
1115
1116 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
1117 if (!new)
1118 goto fail;
1119 block = new;
1120
1121 for (j = 1; j <= block[0x7e]; j++) {
1122 uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
1123 if (read_edid_block(priv, ext_block, j))
1124 goto fail;
1125
1126 if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
1127 goto fail;
1128
1129 valid_extensions++;
1130 }
1131
1132 if (valid_extensions != block[0x7e]) {
1133 block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
1134 block[0x7e] = valid_extensions;
1135 new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
1136 if (!new)
1137 goto fail;
1138 block = new;
1139 }
1140 1106
1141done:
1142 if (priv->rev == TDA19988) 1107 if (priv->rev == TDA19988)
1143 reg_set(priv, REG_TX4, TX4_PD_RAM); 1108 reg_set(priv, REG_TX4, TX4_PD_RAM);
1144 1109
1145 return block; 1110 if (!edid) {
1146 1111 dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
1147fail: 1112 return 0;
1148 if (priv->rev == TDA19988)
1149 reg_set(priv, REG_TX4, TX4_PD_RAM);
1150 dev_warn(&priv->hdmi->dev, "failed to read EDID\n");
1151 kfree(block);
1152 return NULL;
1153}
1154
1155static int
1156tda998x_encoder_get_modes(struct tda998x_priv *priv,
1157 struct drm_connector *connector)
1158{
1159 struct edid *edid = (struct edid *)do_get_edid(priv);
1160 int n = 0;
1161
1162 if (edid) {
1163 drm_mode_connector_update_edid_property(connector, edid);
1164 n = drm_add_edid_modes(connector, edid);
1165 priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
1166 kfree(edid);
1167 } 1113 }
1168 1114
1115 drm_mode_connector_update_edid_property(connector, edid);
1116 n = drm_add_edid_modes(connector, edid);
1117 priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
1118 kfree(edid);
1119
1169 return n; 1120 return n;
1170} 1121}
1171 1122
@@ -1547,6 +1498,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
1547 struct i2c_client *client = to_i2c_client(dev); 1498 struct i2c_client *client = to_i2c_client(dev);
1548 struct drm_device *drm = data; 1499 struct drm_device *drm = data;
1549 struct tda998x_priv2 *priv; 1500 struct tda998x_priv2 *priv;
1501 uint32_t crtcs = 0;
1550 int ret; 1502 int ret;
1551 1503
1552 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1504 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -1555,9 +1507,18 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
1555 1507
1556 dev_set_drvdata(dev, priv); 1508 dev_set_drvdata(dev, priv);
1557 1509
1510 if (dev->of_node)
1511 crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
1512
1513 /* If no CRTCs were found, fall back to our old behaviour */
1514 if (crtcs == 0) {
1515 dev_warn(dev, "Falling back to first CRTC\n");
1516 crtcs = 1 << 0;
1517 }
1518
1558 priv->base.encoder = &priv->encoder; 1519 priv->base.encoder = &priv->encoder;
1559 priv->connector.interlace_allowed = 1; 1520 priv->connector.interlace_allowed = 1;
1560 priv->encoder.possible_crtcs = 1 << 0; 1521 priv->encoder.possible_crtcs = crtcs;
1561 1522
1562 ret = tda998x_create(client, &priv->base); 1523 ret = tda998x_create(client, &priv->base);
1563 if (ret) 1524 if (ret)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f01922591679..d3ebaf204408 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -83,9 +83,11 @@ i915-y += dvo_ch7017.o \
83 intel_sdvo.o \ 83 intel_sdvo.o \
84 intel_tv.o 84 intel_tv.o
85 85
86# virtual gpu code
87i915-y += i915_vgpu.o
88
86# legacy horrors 89# legacy horrors
87i915-y += i915_dma.o \ 90i915-y += i915_dma.o
88 i915_ums.o
89 91
90obj-$(CONFIG_DRM_I915) += i915.o 92obj-$(CONFIG_DRM_I915) += i915.o
91 93
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 806e812340d0..9a6da3536ae5 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -818,24 +818,26 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
818 return false; 818 return false;
819} 819}
820 820
821static u32 *vmap_batch(struct drm_i915_gem_object *obj) 821static u32 *vmap_batch(struct drm_i915_gem_object *obj,
822 unsigned start, unsigned len)
822{ 823{
823 int i; 824 int i;
824 void *addr = NULL; 825 void *addr = NULL;
825 struct sg_page_iter sg_iter; 826 struct sg_page_iter sg_iter;
827 int first_page = start >> PAGE_SHIFT;
828 int last_page = (len + start + 4095) >> PAGE_SHIFT;
829 int npages = last_page - first_page;
826 struct page **pages; 830 struct page **pages;
827 831
828 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 832 pages = drm_malloc_ab(npages, sizeof(*pages));
829 if (pages == NULL) { 833 if (pages == NULL) {
830 DRM_DEBUG_DRIVER("Failed to get space for pages\n"); 834 DRM_DEBUG_DRIVER("Failed to get space for pages\n");
831 goto finish; 835 goto finish;
832 } 836 }
833 837
834 i = 0; 838 i = 0;
835 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 839 for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first_page)
836 pages[i] = sg_page_iter_page(&sg_iter); 840 pages[i++] = sg_page_iter_page(&sg_iter);
837 i++;
838 }
839 841
840 addr = vmap(pages, i, 0, PAGE_KERNEL); 842 addr = vmap(pages, i, 0, PAGE_KERNEL);
841 if (addr == NULL) { 843 if (addr == NULL) {
@@ -855,61 +857,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
855 u32 batch_start_offset, 857 u32 batch_start_offset,
856 u32 batch_len) 858 u32 batch_len)
857{ 859{
858 int ret = 0;
859 int needs_clflush = 0; 860 int needs_clflush = 0;
860 u32 *src_base, *dest_base = NULL; 861 void *src_base, *src;
861 u32 *src_addr, *dest_addr; 862 void *dst = NULL;
862 u32 offset = batch_start_offset / sizeof(*dest_addr); 863 int ret;
863 u32 end = batch_start_offset + batch_len;
864 864
865 if (end > dest_obj->base.size || end > src_obj->base.size) 865 if (batch_len > dest_obj->base.size ||
866 batch_len + batch_start_offset > src_obj->base.size)
866 return ERR_PTR(-E2BIG); 867 return ERR_PTR(-E2BIG);
867 868
868 ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); 869 ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
869 if (ret) { 870 if (ret) {
870 DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); 871 DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
871 return ERR_PTR(ret); 872 return ERR_PTR(ret);
872 } 873 }
873 874
874 src_base = vmap_batch(src_obj); 875 src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
875 if (!src_base) { 876 if (!src_base) {
876 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); 877 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
877 ret = -ENOMEM; 878 ret = -ENOMEM;
878 goto unpin_src; 879 goto unpin_src;
879 } 880 }
880 881
881 src_addr = src_base + offset; 882 ret = i915_gem_object_get_pages(dest_obj);
882 883 if (ret) {
883 if (needs_clflush) 884 DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
884 drm_clflush_virt_range((char *)src_addr, batch_len); 885 goto unmap_src;
886 }
887 i915_gem_object_pin_pages(dest_obj);
885 888
886 ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); 889 ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
887 if (ret) { 890 if (ret) {
888 DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n"); 891 DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
889 goto unmap_src; 892 goto unmap_src;
890 } 893 }
891 894
892 dest_base = vmap_batch(dest_obj); 895 dst = vmap_batch(dest_obj, 0, batch_len);
893 if (!dest_base) { 896 if (!dst) {
894 DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); 897 DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
898 i915_gem_object_unpin_pages(dest_obj);
895 ret = -ENOMEM; 899 ret = -ENOMEM;
896 goto unmap_src; 900 goto unmap_src;
897 } 901 }
898 902
899 dest_addr = dest_base + offset; 903 src = src_base + offset_in_page(batch_start_offset);
900 904 if (needs_clflush)
901 if (batch_start_offset != 0) 905 drm_clflush_virt_range(src, batch_len);
902 memset((u8 *)dest_base, 0, batch_start_offset);
903 906
904 memcpy(dest_addr, src_addr, batch_len); 907 memcpy(dst, src, batch_len);
905 memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
906 908
907unmap_src: 909unmap_src:
908 vunmap(src_base); 910 vunmap(src_base);
909unpin_src: 911unpin_src:
910 i915_gem_object_unpin_pages(src_obj); 912 i915_gem_object_unpin_pages(src_obj);
911 913
912 return ret ? ERR_PTR(ret) : dest_base; 914 return ret ? ERR_PTR(ret) : dst;
913} 915}
914 916
915/** 917/**
@@ -1046,34 +1048,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1046 u32 batch_len, 1048 u32 batch_len,
1047 bool is_master) 1049 bool is_master)
1048{ 1050{
1049 int ret = 0;
1050 u32 *cmd, *batch_base, *batch_end; 1051 u32 *cmd, *batch_base, *batch_end;
1051 struct drm_i915_cmd_descriptor default_desc = { 0 }; 1052 struct drm_i915_cmd_descriptor default_desc = { 0 };
1052 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ 1053 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
1053 1054 int ret = 0;
1054 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
1055 if (ret) {
1056 DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
1057 return -1;
1058 }
1059 1055
1060 batch_base = copy_batch(shadow_batch_obj, batch_obj, 1056 batch_base = copy_batch(shadow_batch_obj, batch_obj,
1061 batch_start_offset, batch_len); 1057 batch_start_offset, batch_len);
1062 if (IS_ERR(batch_base)) { 1058 if (IS_ERR(batch_base)) {
1063 DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); 1059 DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
1064 i915_gem_object_ggtt_unpin(shadow_batch_obj);
1065 return PTR_ERR(batch_base); 1060 return PTR_ERR(batch_base);
1066 } 1061 }
1067 1062
1068 cmd = batch_base + (batch_start_offset / sizeof(*cmd));
1069
1070 /* 1063 /*
1071 * We use the batch length as size because the shadow object is as 1064 * We use the batch length as size because the shadow object is as
1072 * large or larger and copy_batch() will write MI_NOPs to the extra 1065 * large or larger and copy_batch() will write MI_NOPs to the extra
1073 * space. Parsing should be faster in some cases this way. 1066 * space. Parsing should be faster in some cases this way.
1074 */ 1067 */
1075 batch_end = cmd + (batch_len / sizeof(*batch_end)); 1068 batch_end = batch_base + (batch_len / sizeof(*batch_end));
1076 1069
1070 cmd = batch_base;
1077 while (cmd < batch_end) { 1071 while (cmd < batch_end) {
1078 const struct drm_i915_cmd_descriptor *desc; 1072 const struct drm_i915_cmd_descriptor *desc;
1079 u32 length; 1073 u32 length;
@@ -1132,7 +1126,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1132 } 1126 }
1133 1127
1134 vunmap(batch_base); 1128 vunmap(batch_base);
1135 i915_gem_object_ggtt_unpin(shadow_batch_obj); 1129 i915_gem_object_unpin_pages(shadow_batch_obj);
1136 1130
1137 return ret; 1131 return ret;
1138} 1132}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e8b18e542da4..e38f45374d55 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -139,10 +139,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
140 if (obj->base.name) 140 if (obj->base.name)
141 seq_printf(m, " (name: %d)", obj->base.name); 141 seq_printf(m, " (name: %d)", obj->base.name);
142 list_for_each_entry(vma, &obj->vma_list, vma_link) 142 list_for_each_entry(vma, &obj->vma_list, vma_link) {
143 if (vma->pin_count > 0) 143 if (vma->pin_count > 0)
144 pin_count++; 144 pin_count++;
145 seq_printf(m, " (pinned x %d)", pin_count); 145 }
146 seq_printf(m, " (pinned x %d)", pin_count);
146 if (obj->pin_display) 147 if (obj->pin_display)
147 seq_printf(m, " (display)"); 148 seq_printf(m, " (display)");
148 if (obj->fence_reg != I915_FENCE_REG_NONE) 149 if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
580 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 581 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
581 work->flip_queued_vblank, 582 work->flip_queued_vblank,
582 work->flip_ready_vblank, 583 work->flip_ready_vblank,
583 drm_vblank_count(dev, crtc->pipe)); 584 drm_crtc_vblank_count(&crtc->base));
584 if (work->enable_stall_check) 585 if (work->enable_stall_check)
585 seq_puts(m, "Stall check enabled, "); 586 seq_puts(m, "Stall check enabled, ");
586 else 587 else
@@ -1778,11 +1779,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1778 ifbdev = dev_priv->fbdev; 1779 ifbdev = dev_priv->fbdev;
1779 fb = to_intel_framebuffer(ifbdev->helper.fb); 1780 fb = to_intel_framebuffer(ifbdev->helper.fb);
1780 1781
1781 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1782 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1782 fb->base.width, 1783 fb->base.width,
1783 fb->base.height, 1784 fb->base.height,
1784 fb->base.depth, 1785 fb->base.depth,
1785 fb->base.bits_per_pixel, 1786 fb->base.bits_per_pixel,
1787 fb->base.modifier[0],
1786 atomic_read(&fb->base.refcount.refcount)); 1788 atomic_read(&fb->base.refcount.refcount));
1787 describe_obj(m, fb->obj); 1789 describe_obj(m, fb->obj);
1788 seq_putc(m, '\n'); 1790 seq_putc(m, '\n');
@@ -1793,11 +1795,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1793 if (ifbdev && &fb->base == ifbdev->helper.fb) 1795 if (ifbdev && &fb->base == ifbdev->helper.fb)
1794 continue; 1796 continue;
1795 1797
1796 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1798 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1797 fb->base.width, 1799 fb->base.width,
1798 fb->base.height, 1800 fb->base.height,
1799 fb->base.depth, 1801 fb->base.depth,
1800 fb->base.bits_per_pixel, 1802 fb->base.bits_per_pixel,
1803 fb->base.modifier[0],
1801 atomic_read(&fb->base.refcount.refcount)); 1804 atomic_read(&fb->base.refcount.refcount));
1802 describe_obj(m, fb->obj); 1805 describe_obj(m, fb->obj);
1803 seq_putc(m, '\n'); 1806 seq_putc(m, '\n');
@@ -2183,7 +2186,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2183 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2186 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2184 2187
2185 seq_puts(m, "aliasing PPGTT:\n"); 2188 seq_puts(m, "aliasing PPGTT:\n");
2186 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 2189 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
2187 2190
2188 ppgtt->debug_dump(ppgtt, m); 2191 ppgtt->debug_dump(ppgtt, m);
2189 } 2192 }
@@ -4189,7 +4192,7 @@ i915_max_freq_set(void *data, u64 val)
4189{ 4192{
4190 struct drm_device *dev = data; 4193 struct drm_device *dev = data;
4191 struct drm_i915_private *dev_priv = dev->dev_private; 4194 struct drm_i915_private *dev_priv = dev->dev_private;
4192 u32 rp_state_cap, hw_max, hw_min; 4195 u32 hw_max, hw_min;
4193 int ret; 4196 int ret;
4194 4197
4195 if (INTEL_INFO(dev)->gen < 6) 4198 if (INTEL_INFO(dev)->gen < 6)
@@ -4206,18 +4209,10 @@ i915_max_freq_set(void *data, u64 val)
4206 /* 4209 /*
4207 * Turbo will still be enabled, but won't go above the set value. 4210 * Turbo will still be enabled, but won't go above the set value.
4208 */ 4211 */
4209 if (IS_VALLEYVIEW(dev)) { 4212 val = intel_freq_opcode(dev_priv, val);
4210 val = intel_freq_opcode(dev_priv, val);
4211
4212 hw_max = dev_priv->rps.max_freq;
4213 hw_min = dev_priv->rps.min_freq;
4214 } else {
4215 val = intel_freq_opcode(dev_priv, val);
4216 4213
4217 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4214 hw_max = dev_priv->rps.max_freq;
4218 hw_max = dev_priv->rps.max_freq; 4215 hw_min = dev_priv->rps.min_freq;
4219 hw_min = (rp_state_cap >> 16) & 0xff;
4220 }
4221 4216
4222 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4217 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4223 mutex_unlock(&dev_priv->rps.hw_lock); 4218 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4226,10 +4221,7 @@ i915_max_freq_set(void *data, u64 val)
4226 4221
4227 dev_priv->rps.max_freq_softlimit = val; 4222 dev_priv->rps.max_freq_softlimit = val;
4228 4223
4229 if (IS_VALLEYVIEW(dev)) 4224 intel_set_rps(dev, val);
4230 valleyview_set_rps(dev, val);
4231 else
4232 gen6_set_rps(dev, val);
4233 4225
4234 mutex_unlock(&dev_priv->rps.hw_lock); 4226 mutex_unlock(&dev_priv->rps.hw_lock);
4235 4227
@@ -4267,7 +4259,7 @@ i915_min_freq_set(void *data, u64 val)
4267{ 4259{
4268 struct drm_device *dev = data; 4260 struct drm_device *dev = data;
4269 struct drm_i915_private *dev_priv = dev->dev_private; 4261 struct drm_i915_private *dev_priv = dev->dev_private;
4270 u32 rp_state_cap, hw_max, hw_min; 4262 u32 hw_max, hw_min;
4271 int ret; 4263 int ret;
4272 4264
4273 if (INTEL_INFO(dev)->gen < 6) 4265 if (INTEL_INFO(dev)->gen < 6)
@@ -4284,18 +4276,10 @@ i915_min_freq_set(void *data, u64 val)
4284 /* 4276 /*
4285 * Turbo will still be enabled, but won't go below the set value. 4277 * Turbo will still be enabled, but won't go below the set value.
4286 */ 4278 */
4287 if (IS_VALLEYVIEW(dev)) { 4279 val = intel_freq_opcode(dev_priv, val);
4288 val = intel_freq_opcode(dev_priv, val);
4289 4280
4290 hw_max = dev_priv->rps.max_freq; 4281 hw_max = dev_priv->rps.max_freq;
4291 hw_min = dev_priv->rps.min_freq; 4282 hw_min = dev_priv->rps.min_freq;
4292 } else {
4293 val = intel_freq_opcode(dev_priv, val);
4294
4295 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4296 hw_max = dev_priv->rps.max_freq;
4297 hw_min = (rp_state_cap >> 16) & 0xff;
4298 }
4299 4283
4300 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4284 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4301 mutex_unlock(&dev_priv->rps.hw_lock); 4285 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4304,10 +4288,7 @@ i915_min_freq_set(void *data, u64 val)
4304 4288
4305 dev_priv->rps.min_freq_softlimit = val; 4289 dev_priv->rps.min_freq_softlimit = val;
4306 4290
4307 if (IS_VALLEYVIEW(dev)) 4291 intel_set_rps(dev, val);
4308 valleyview_set_rps(dev, val);
4309 else
4310 gen6_set_rps(dev, val);
4311 4292
4312 mutex_unlock(&dev_priv->rps.hw_lock); 4293 mutex_unlock(&dev_priv->rps.hw_lock);
4313 4294
@@ -4374,6 +4355,85 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4374 i915_cache_sharing_get, i915_cache_sharing_set, 4355 i915_cache_sharing_get, i915_cache_sharing_set,
4375 "%llu\n"); 4356 "%llu\n");
4376 4357
4358static int i915_sseu_status(struct seq_file *m, void *unused)
4359{
4360 struct drm_info_node *node = (struct drm_info_node *) m->private;
4361 struct drm_device *dev = node->minor->dev;
4362 struct drm_i915_private *dev_priv = dev->dev_private;
4363 unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
4364
4365 if (INTEL_INFO(dev)->gen < 9)
4366 return -ENODEV;
4367
4368 seq_puts(m, "SSEU Device Info\n");
4369 seq_printf(m, " Available Slice Total: %u\n",
4370 INTEL_INFO(dev)->slice_total);
4371 seq_printf(m, " Available Subslice Total: %u\n",
4372 INTEL_INFO(dev)->subslice_total);
4373 seq_printf(m, " Available Subslice Per Slice: %u\n",
4374 INTEL_INFO(dev)->subslice_per_slice);
4375 seq_printf(m, " Available EU Total: %u\n",
4376 INTEL_INFO(dev)->eu_total);
4377 seq_printf(m, " Available EU Per Subslice: %u\n",
4378 INTEL_INFO(dev)->eu_per_subslice);
4379 seq_printf(m, " Has Slice Power Gating: %s\n",
4380 yesno(INTEL_INFO(dev)->has_slice_pg));
4381 seq_printf(m, " Has Subslice Power Gating: %s\n",
4382 yesno(INTEL_INFO(dev)->has_subslice_pg));
4383 seq_printf(m, " Has EU Power Gating: %s\n",
4384 yesno(INTEL_INFO(dev)->has_eu_pg));
4385
4386 seq_puts(m, "SSEU Device Status\n");
4387 if (IS_SKYLAKE(dev)) {
4388 const int s_max = 3, ss_max = 4;
4389 int s, ss;
4390 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4391
4392 s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
4393 s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
4394 s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
4395 eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
4396 eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
4397 eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
4398 eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
4399 eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
4400 eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
4401 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4402 GEN9_PGCTL_SSA_EU19_ACK |
4403 GEN9_PGCTL_SSA_EU210_ACK |
4404 GEN9_PGCTL_SSA_EU311_ACK;
4405 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4406 GEN9_PGCTL_SSB_EU19_ACK |
4407 GEN9_PGCTL_SSB_EU210_ACK |
4408 GEN9_PGCTL_SSB_EU311_ACK;
4409
4410 for (s = 0; s < s_max; s++) {
4411 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4412 /* skip disabled slice */
4413 continue;
4414
4415 s_tot++;
4416 ss_per = INTEL_INFO(dev)->subslice_per_slice;
4417 ss_tot += ss_per;
4418 for (ss = 0; ss < ss_max; ss++) {
4419 unsigned int eu_cnt;
4420
4421 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4422 eu_mask[ss%2]);
4423 eu_tot += eu_cnt;
4424 eu_per = max(eu_per, eu_cnt);
4425 }
4426 }
4427 }
4428 seq_printf(m, " Enabled Slice Total: %u\n", s_tot);
4429 seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot);
4430 seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per);
4431 seq_printf(m, " Enabled EU Total: %u\n", eu_tot);
4432 seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per);
4433
4434 return 0;
4435}
4436
4377static int i915_forcewake_open(struct inode *inode, struct file *file) 4437static int i915_forcewake_open(struct inode *inode, struct file *file)
4378{ 4438{
4379 struct drm_device *dev = inode->i_private; 4439 struct drm_device *dev = inode->i_private;
@@ -4487,6 +4547,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
4487 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4547 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4488 {"i915_wa_registers", i915_wa_registers, 0}, 4548 {"i915_wa_registers", i915_wa_registers, 0},
4489 {"i915_ddb_info", i915_ddb_info, 0}, 4549 {"i915_ddb_info", i915_ddb_info, 0},
4550 {"i915_sseu_status", i915_sseu_status, 0},
4490}; 4551};
4491#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4552#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4492 4553
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a46787129e7..053e1788f578 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,6 +36,7 @@
36#include "intel_drv.h" 36#include "intel_drv.h"
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_vgpu.h"
39#include "i915_trace.h" 40#include "i915_trace.h"
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <linux/console.h> 42#include <linux/console.h>
@@ -605,6 +606,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
605 } 606 }
606 } 607 }
607 608
609 /* Initialize slice/subslice/EU info */
608 if (IS_CHERRYVIEW(dev)) { 610 if (IS_CHERRYVIEW(dev)) {
609 u32 fuse, mask_eu; 611 u32 fuse, mask_eu;
610 612
@@ -614,7 +616,90 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
614 CHV_FGT_EU_DIS_SS1_R0_MASK | 616 CHV_FGT_EU_DIS_SS1_R0_MASK |
615 CHV_FGT_EU_DIS_SS1_R1_MASK); 617 CHV_FGT_EU_DIS_SS1_R1_MASK);
616 info->eu_total = 16 - hweight32(mask_eu); 618 info->eu_total = 16 - hweight32(mask_eu);
619 } else if (IS_SKYLAKE(dev)) {
620 const int s_max = 3, ss_max = 4, eu_max = 8;
621 int s, ss;
622 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
623
624 fuse2 = I915_READ(GEN8_FUSE2);
625 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
626 GEN8_F2_S_ENA_SHIFT;
627 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
628 GEN9_F2_SS_DIS_SHIFT;
629
630 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
631 eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
632 eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
633
634 info->slice_total = hweight32(s_enable);
635 /*
636 * The subslice disable field is global, i.e. it applies
637 * to each of the enabled slices.
638 */
639 info->subslice_per_slice = ss_max - hweight32(ss_disable);
640 info->subslice_total = info->slice_total *
641 info->subslice_per_slice;
642
643 /*
644 * Iterate through enabled slices and subslices to
645 * count the total enabled EU.
646 */
647 for (s = 0; s < s_max; s++) {
648 if (!(s_enable & (0x1 << s)))
649 /* skip disabled slice */
650 continue;
651
652 for (ss = 0; ss < ss_max; ss++) {
653 u32 n_disabled;
654
655 if (ss_disable & (0x1 << ss))
656 /* skip disabled subslice */
657 continue;
658
659 n_disabled = hweight8(eu_disable[s] >>
660 (ss * eu_max));
661
662 /*
663 * Record which subslice(s) has(have) 7 EUs. we
664 * can tune the hash used to spread work among
665 * subslices if they are unbalanced.
666 */
667 if (eu_max - n_disabled == 7)
668 info->subslice_7eu[s] |= 1 << ss;
669
670 info->eu_total += eu_max - n_disabled;
671 }
672 }
673
674 /*
675 * SKL is expected to always have a uniform distribution
676 * of EU across subslices with the exception that any one
677 * EU in any one subslice may be fused off for die
678 * recovery.
679 */
680 info->eu_per_subslice = info->subslice_total ?
681 DIV_ROUND_UP(info->eu_total,
682 info->subslice_total) : 0;
683 /*
684 * SKL supports slice power gating on devices with more than
685 * one slice, and supports EU power gating on devices with
686 * more than one EU pair per subslice.
687 */
688 info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
689 info->has_subslice_pg = 0;
690 info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
617 } 691 }
692 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
693 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
694 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
695 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
696 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
697 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
698 info->has_slice_pg ? "y" : "n");
699 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
700 info->has_subslice_pg ? "y" : "n");
701 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
702 info->has_eu_pg ? "y" : "n");
618} 703}
619 704
620/** 705/**
@@ -637,17 +722,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
637 722
638 info = (struct intel_device_info *) flags; 723 info = (struct intel_device_info *) flags;
639 724
640 /* Refuse to load on gen6+ without kms enabled. */
641 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
642 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
643 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
644 return -ENODEV;
645 }
646
647 /* UMS needs agp support. */
648 if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
649 return -EINVAL;
650
651 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 725 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
652 if (dev_priv == NULL) 726 if (dev_priv == NULL)
653 return -ENOMEM; 727 return -ENOMEM;
@@ -717,20 +791,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
717 if (ret) 791 if (ret)
718 goto out_regs; 792 goto out_regs;
719 793
720 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 794 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
721 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 795 * otherwise the vga fbdev driver falls over. */
722 * otherwise the vga fbdev driver falls over. */ 796 ret = i915_kick_out_firmware_fb(dev_priv);
723 ret = i915_kick_out_firmware_fb(dev_priv); 797 if (ret) {
724 if (ret) { 798 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
725 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 799 goto out_gtt;
726 goto out_gtt; 800 }
727 }
728 801
729 ret = i915_kick_out_vgacon(dev_priv); 802 ret = i915_kick_out_vgacon(dev_priv);
730 if (ret) { 803 if (ret) {
731 DRM_ERROR("failed to remove conflicting VGA console\n"); 804 DRM_ERROR("failed to remove conflicting VGA console\n");
732 goto out_gtt; 805 goto out_gtt;
733 }
734 } 806 }
735 807
736 pci_set_master(dev->pdev); 808 pci_set_master(dev->pdev);
@@ -834,14 +906,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
834 906
835 intel_power_domains_init(dev_priv); 907 intel_power_domains_init(dev_priv);
836 908
837 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 909 ret = i915_load_modeset_init(dev);
838 ret = i915_load_modeset_init(dev); 910 if (ret < 0) {
839 if (ret < 0) { 911 DRM_ERROR("failed to init modeset\n");
840 DRM_ERROR("failed to init modeset\n"); 912 goto out_power_well;
841 goto out_power_well;
842 }
843 } 913 }
844 914
915 /*
916 * Notify a valid surface after modesetting,
917 * when running inside a VM.
918 */
919 if (intel_vgpu_active(dev))
920 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
921
845 i915_setup_sysfs(dev); 922 i915_setup_sysfs(dev);
846 923
847 if (INTEL_INFO(dev)->num_pipes) { 924 if (INTEL_INFO(dev)->num_pipes) {
@@ -921,28 +998,25 @@ int i915_driver_unload(struct drm_device *dev)
921 998
922 acpi_video_unregister(); 999 acpi_video_unregister();
923 1000
924 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1001 intel_fbdev_fini(dev);
925 intel_fbdev_fini(dev);
926 1002
927 drm_vblank_cleanup(dev); 1003 drm_vblank_cleanup(dev);
928 1004
929 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1005 intel_modeset_cleanup(dev);
930 intel_modeset_cleanup(dev);
931
932 /*
933 * free the memory space allocated for the child device
934 * config parsed from VBT
935 */
936 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
937 kfree(dev_priv->vbt.child_dev);
938 dev_priv->vbt.child_dev = NULL;
939 dev_priv->vbt.child_dev_num = 0;
940 }
941 1006
942 vga_switcheroo_unregister_client(dev->pdev); 1007 /*
943 vga_client_register(dev->pdev, NULL, NULL, NULL); 1008 * free the memory space allocated for the child device
1009 * config parsed from VBT
1010 */
1011 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1012 kfree(dev_priv->vbt.child_dev);
1013 dev_priv->vbt.child_dev = NULL;
1014 dev_priv->vbt.child_dev_num = 0;
944 } 1015 }
945 1016
1017 vga_switcheroo_unregister_client(dev->pdev);
1018 vga_client_register(dev->pdev, NULL, NULL, NULL);
1019
946 /* Free error state after interrupts are fully disabled. */ 1020 /* Free error state after interrupts are fully disabled. */
947 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1021 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
948 i915_destroy_error_state(dev); 1022 i915_destroy_error_state(dev);
@@ -952,17 +1026,15 @@ int i915_driver_unload(struct drm_device *dev)
952 1026
953 intel_opregion_fini(dev); 1027 intel_opregion_fini(dev);
954 1028
955 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1029 /* Flush any outstanding unpin_work. */
956 /* Flush any outstanding unpin_work. */ 1030 flush_workqueue(dev_priv->wq);
957 flush_workqueue(dev_priv->wq);
958 1031
959 mutex_lock(&dev->struct_mutex); 1032 mutex_lock(&dev->struct_mutex);
960 i915_gem_cleanup_ringbuffer(dev); 1033 i915_gem_cleanup_ringbuffer(dev);
961 i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool); 1034 i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
962 i915_gem_context_fini(dev); 1035 i915_gem_context_fini(dev);
963 mutex_unlock(&dev->struct_mutex); 1036 mutex_unlock(&dev->struct_mutex);
964 i915_gem_cleanup_stolen(dev); 1037 i915_gem_cleanup_stolen(dev);
965 }
966 1038
967 intel_teardown_gmbus(dev); 1039 intel_teardown_gmbus(dev);
968 intel_teardown_mchbar(dev); 1040 intel_teardown_mchbar(dev);
@@ -1023,8 +1095,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1023 i915_gem_release(dev, file); 1095 i915_gem_release(dev, file);
1024 mutex_unlock(&dev->struct_mutex); 1096 mutex_unlock(&dev->struct_mutex);
1025 1097
1026 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1098 intel_modeset_preclose(dev, file);
1027 intel_modeset_preclose(dev, file);
1028} 1099}
1029 1100
1030void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1101void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc6ea53d2b81..0001642c38b4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -369,6 +369,19 @@ static const struct intel_device_info intel_skylake_info = {
369 IVB_CURSOR_OFFSETS, 369 IVB_CURSOR_OFFSETS,
370}; 370};
371 371
372static const struct intel_device_info intel_skylake_gt3_info = {
373 .is_preliminary = 1,
374 .is_skylake = 1,
375 .gen = 9, .num_pipes = 3,
376 .need_gfx_hws = 1, .has_hotplug = 1,
377 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
378 .has_llc = 1,
379 .has_ddi = 1,
380 .has_fbc = 1,
381 GEN_DEFAULT_PIPEOFFSETS,
382 IVB_CURSOR_OFFSETS,
383};
384
372/* 385/*
373 * Make sure any device matches here are from most specific to most 386 * Make sure any device matches here are from most specific to most
374 * general. For example, since the Quanta match is based on the subsystem 387 * general. For example, since the Quanta match is based on the subsystem
@@ -406,7 +419,9 @@ static const struct intel_device_info intel_skylake_info = {
406 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ 419 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
407 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ 420 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
408 INTEL_CHV_IDS(&intel_cherryview_info), \ 421 INTEL_CHV_IDS(&intel_cherryview_info), \
409 INTEL_SKL_IDS(&intel_skylake_info) 422 INTEL_SKL_GT1_IDS(&intel_skylake_info), \
423 INTEL_SKL_GT2_IDS(&intel_skylake_info), \
424 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \
410 425
411static const struct pci_device_id pciidlist[] = { /* aka */ 426static const struct pci_device_id pciidlist[] = { /* aka */
412 INTEL_PCI_IDS, 427 INTEL_PCI_IDS,
@@ -553,6 +568,7 @@ static int i915_drm_suspend(struct drm_device *dev)
553 struct drm_i915_private *dev_priv = dev->dev_private; 568 struct drm_i915_private *dev_priv = dev->dev_private;
554 struct drm_crtc *crtc; 569 struct drm_crtc *crtc;
555 pci_power_t opregion_target_state; 570 pci_power_t opregion_target_state;
571 int error;
556 572
557 /* ignore lid events during suspend */ 573 /* ignore lid events during suspend */
558 mutex_lock(&dev_priv->modeset_restore_lock); 574 mutex_lock(&dev_priv->modeset_restore_lock);
@@ -567,37 +583,32 @@ static int i915_drm_suspend(struct drm_device *dev)
567 583
568 pci_save_state(dev->pdev); 584 pci_save_state(dev->pdev);
569 585
570 /* If KMS is active, we do the leavevt stuff here */ 586 error = i915_gem_suspend(dev);
571 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 587 if (error) {
572 int error; 588 dev_err(&dev->pdev->dev,
573 589 "GEM idle failed, resume might fail\n");
574 error = i915_gem_suspend(dev); 590 return error;
575 if (error) { 591 }
576 dev_err(&dev->pdev->dev,
577 "GEM idle failed, resume might fail\n");
578 return error;
579 }
580 592
581 intel_suspend_gt_powersave(dev); 593 intel_suspend_gt_powersave(dev);
582 594
583 /* 595 /*
584 * Disable CRTCs directly since we want to preserve sw state 596 * Disable CRTCs directly since we want to preserve sw state
585 * for _thaw. Also, power gate the CRTC power wells. 597 * for _thaw. Also, power gate the CRTC power wells.
586 */ 598 */
587 drm_modeset_lock_all(dev); 599 drm_modeset_lock_all(dev);
588 for_each_crtc(dev, crtc) 600 for_each_crtc(dev, crtc)
589 intel_crtc_control(crtc, false); 601 intel_crtc_control(crtc, false);
590 drm_modeset_unlock_all(dev); 602 drm_modeset_unlock_all(dev);
591 603
592 intel_dp_mst_suspend(dev); 604 intel_dp_mst_suspend(dev);
593 605
594 intel_runtime_pm_disable_interrupts(dev_priv); 606 intel_runtime_pm_disable_interrupts(dev_priv);
595 intel_hpd_cancel_work(dev_priv); 607 intel_hpd_cancel_work(dev_priv);
596 608
597 intel_suspend_encoders(dev_priv); 609 intel_suspend_encoders(dev_priv);
598 610
599 intel_suspend_hw(dev); 611 intel_suspend_hw(dev);
600 }
601 612
602 i915_gem_suspend_gtt_mappings(dev); 613 i915_gem_suspend_gtt_mappings(dev);
603 614
@@ -679,53 +690,48 @@ static int i915_drm_resume(struct drm_device *dev)
679{ 690{
680 struct drm_i915_private *dev_priv = dev->dev_private; 691 struct drm_i915_private *dev_priv = dev->dev_private;
681 692
682 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 693 mutex_lock(&dev->struct_mutex);
683 mutex_lock(&dev->struct_mutex); 694 i915_gem_restore_gtt_mappings(dev);
684 i915_gem_restore_gtt_mappings(dev); 695 mutex_unlock(&dev->struct_mutex);
685 mutex_unlock(&dev->struct_mutex);
686 }
687 696
688 i915_restore_state(dev); 697 i915_restore_state(dev);
689 intel_opregion_setup(dev); 698 intel_opregion_setup(dev);
690 699
691 /* KMS EnterVT equivalent */ 700 intel_init_pch_refclk(dev);
692 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 701 drm_mode_config_reset(dev);
693 intel_init_pch_refclk(dev);
694 drm_mode_config_reset(dev);
695 702
696 mutex_lock(&dev->struct_mutex); 703 mutex_lock(&dev->struct_mutex);
697 if (i915_gem_init_hw(dev)) { 704 if (i915_gem_init_hw(dev)) {
698 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 705 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
699 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 706 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
700 } 707 }
701 mutex_unlock(&dev->struct_mutex); 708 mutex_unlock(&dev->struct_mutex);
702 709
703 /* We need working interrupts for modeset enabling ... */ 710 /* We need working interrupts for modeset enabling ... */
704 intel_runtime_pm_enable_interrupts(dev_priv); 711 intel_runtime_pm_enable_interrupts(dev_priv);
705 712
706 intel_modeset_init_hw(dev); 713 intel_modeset_init_hw(dev);
707 714
708 spin_lock_irq(&dev_priv->irq_lock); 715 spin_lock_irq(&dev_priv->irq_lock);
709 if (dev_priv->display.hpd_irq_setup) 716 if (dev_priv->display.hpd_irq_setup)
710 dev_priv->display.hpd_irq_setup(dev); 717 dev_priv->display.hpd_irq_setup(dev);
711 spin_unlock_irq(&dev_priv->irq_lock); 718 spin_unlock_irq(&dev_priv->irq_lock);
712 719
713 drm_modeset_lock_all(dev); 720 drm_modeset_lock_all(dev);
714 intel_modeset_setup_hw_state(dev, true); 721 intel_modeset_setup_hw_state(dev, true);
715 drm_modeset_unlock_all(dev); 722 drm_modeset_unlock_all(dev);
716 723
717 intel_dp_mst_resume(dev); 724 intel_dp_mst_resume(dev);
718 725
719 /* 726 /*
720 * ... but also need to make sure that hotplug processing 727 * ... but also need to make sure that hotplug processing
721 * doesn't cause havoc. Like in the driver load code we don't 728 * doesn't cause havoc. Like in the driver load code we don't
722 * bother with the tiny race here where we might loose hotplug 729 * bother with the tiny race here where we might loose hotplug
723 * notifications. 730 * notifications.
724 * */ 731 * */
725 intel_hpd_init(dev_priv); 732 intel_hpd_init(dev_priv);
726 /* Config may have changed between suspend and resume */ 733 /* Config may have changed between suspend and resume */
727 drm_helper_hpd_irq_event(dev); 734 drm_helper_hpd_irq_event(dev);
728 }
729 735
730 intel_opregion_init(dev); 736 intel_opregion_init(dev);
731 737
@@ -861,38 +867,35 @@ int i915_reset(struct drm_device *dev)
861 * was running at the time of the reset (i.e. we weren't VT 867 * was running at the time of the reset (i.e. we weren't VT
862 * switched away). 868 * switched away).
863 */ 869 */
864 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
865 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
866 dev_priv->gpu_error.reload_in_reset = true;
867 870
868 ret = i915_gem_init_hw(dev); 871 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
872 dev_priv->gpu_error.reload_in_reset = true;
869 873
870 dev_priv->gpu_error.reload_in_reset = false; 874 ret = i915_gem_init_hw(dev);
871 875
872 mutex_unlock(&dev->struct_mutex); 876 dev_priv->gpu_error.reload_in_reset = false;
873 if (ret) {
874 DRM_ERROR("Failed hw init on reset %d\n", ret);
875 return ret;
876 }
877
878 /*
879 * FIXME: This races pretty badly against concurrent holders of
880 * ring interrupts. This is possible since we've started to drop
881 * dev->struct_mutex in select places when waiting for the gpu.
882 */
883 877
884 /* 878 mutex_unlock(&dev->struct_mutex);
885 * rps/rc6 re-init is necessary to restore state lost after the 879 if (ret) {
886 * reset and the re-install of gt irqs. Skip for ironlake per 880 DRM_ERROR("Failed hw init on reset %d\n", ret);
887 * previous concerns that it doesn't respond well to some forms 881 return ret;
888 * of re-init after reset.
889 */
890 if (INTEL_INFO(dev)->gen > 5)
891 intel_enable_gt_powersave(dev);
892 } else {
893 mutex_unlock(&dev->struct_mutex);
894 } 882 }
895 883
884 /*
885 * FIXME: This races pretty badly against concurrent holders of
886 * ring interrupts. This is possible since we've started to drop
887 * dev->struct_mutex in select places when waiting for the gpu.
888 */
889
890 /*
891 * rps/rc6 re-init is necessary to restore state lost after the
892 * reset and the re-install of gt irqs. Skip for ironlake per
893 * previous concerns that it doesn't respond well to some forms
894 * of re-init after reset.
895 */
896 if (INTEL_INFO(dev)->gen > 5)
897 intel_enable_gt_powersave(dev);
898
896 return 0; 899 return 0;
897} 900}
898 901
@@ -1650,11 +1653,9 @@ static int __init i915_init(void)
1650 1653
1651 if (!(driver.driver_features & DRIVER_MODESET)) { 1654 if (!(driver.driver_features & DRIVER_MODESET)) {
1652 driver.get_vblank_timestamp = NULL; 1655 driver.get_vblank_timestamp = NULL;
1653#ifndef CONFIG_DRM_I915_UMS
1654 /* Silently fail loading to not upset userspace. */ 1656 /* Silently fail loading to not upset userspace. */
1655 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); 1657 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1656 return 0; 1658 return 0;
1657#endif
1658 } 1659 }
1659 1660
1660 /* 1661 /*
@@ -1670,10 +1671,8 @@ static int __init i915_init(void)
1670 1671
1671static void __exit i915_exit(void) 1672static void __exit i915_exit(void)
1672{ 1673{
1673#ifndef CONFIG_DRM_I915_UMS
1674 if (!(driver.driver_features & DRIVER_MODESET)) 1674 if (!(driver.driver_features & DRIVER_MODESET))
1675 return; /* Never loaded a driver. */ 1675 return; /* Never loaded a driver. */
1676#endif
1677 1676
1678 drm_pci_exit(&driver, &i915_pci_driver); 1677 drm_pci_exit(&driver, &i915_pci_driver);
1679} 1678}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8727086cf48c..ee5bc43dfc0b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include <uapi/drm/i915_drm.h> 33#include <uapi/drm/i915_drm.h>
34#include <uapi/drm/drm_fourcc.h>
34 35
35#include "i915_reg.h" 36#include "i915_reg.h"
36#include "intel_bios.h" 37#include "intel_bios.h"
@@ -55,7 +56,7 @@
55 56
56#define DRIVER_NAME "i915" 57#define DRIVER_NAME "i915"
57#define DRIVER_DESC "Intel Graphics" 58#define DRIVER_DESC "Intel Graphics"
58#define DRIVER_DATE "20150130" 59#define DRIVER_DATE "20150227"
59 60
60#undef WARN_ON 61#undef WARN_ON
61/* Many gcc seem to no see through this and fall over :( */ 62/* Many gcc seem to no see through this and fall over :( */
@@ -692,7 +693,18 @@ struct intel_device_info {
692 int trans_offsets[I915_MAX_TRANSCODERS]; 693 int trans_offsets[I915_MAX_TRANSCODERS];
693 int palette_offsets[I915_MAX_PIPES]; 694 int palette_offsets[I915_MAX_PIPES];
694 int cursor_offsets[I915_MAX_PIPES]; 695 int cursor_offsets[I915_MAX_PIPES];
695 unsigned int eu_total; 696
697 /* Slice/subslice/EU info */
698 u8 slice_total;
699 u8 subslice_total;
700 u8 subslice_per_slice;
701 u8 eu_total;
702 u8 eu_per_subslice;
703 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
704 u8 subslice_7eu[3];
705 u8 has_slice_pg:1;
706 u8 has_subslice_pg:1;
707 u8 has_eu_pg:1;
696}; 708};
697 709
698#undef DEFINE_FLAG 710#undef DEFINE_FLAG
@@ -772,10 +784,10 @@ struct intel_context {
772}; 784};
773 785
774struct i915_fbc { 786struct i915_fbc {
775 unsigned long size; 787 unsigned long uncompressed_size;
776 unsigned threshold; 788 unsigned threshold;
777 unsigned int fb_id; 789 unsigned int fb_id;
778 enum plane plane; 790 struct intel_crtc *crtc;
779 int y; 791 int y;
780 792
781 struct drm_mm_node compressed_fb; 793 struct drm_mm_node compressed_fb;
@@ -888,150 +900,21 @@ struct intel_gmbus {
888}; 900};
889 901
890struct i915_suspend_saved_registers { 902struct i915_suspend_saved_registers {
891 u8 saveLBB;
892 u32 saveDSPACNTR;
893 u32 saveDSPBCNTR;
894 u32 saveDSPARB; 903 u32 saveDSPARB;
895 u32 savePIPEACONF;
896 u32 savePIPEBCONF;
897 u32 savePIPEASRC;
898 u32 savePIPEBSRC;
899 u32 saveFPA0;
900 u32 saveFPA1;
901 u32 saveDPLL_A;
902 u32 saveDPLL_A_MD;
903 u32 saveHTOTAL_A;
904 u32 saveHBLANK_A;
905 u32 saveHSYNC_A;
906 u32 saveVTOTAL_A;
907 u32 saveVBLANK_A;
908 u32 saveVSYNC_A;
909 u32 saveBCLRPAT_A;
910 u32 saveTRANSACONF;
911 u32 saveTRANS_HTOTAL_A;
912 u32 saveTRANS_HBLANK_A;
913 u32 saveTRANS_HSYNC_A;
914 u32 saveTRANS_VTOTAL_A;
915 u32 saveTRANS_VBLANK_A;
916 u32 saveTRANS_VSYNC_A;
917 u32 savePIPEASTAT;
918 u32 saveDSPASTRIDE;
919 u32 saveDSPASIZE;
920 u32 saveDSPAPOS;
921 u32 saveDSPAADDR;
922 u32 saveDSPASURF;
923 u32 saveDSPATILEOFF;
924 u32 savePFIT_PGM_RATIOS;
925 u32 saveBLC_HIST_CTL;
926 u32 saveBLC_PWM_CTL;
927 u32 saveBLC_PWM_CTL2;
928 u32 saveBLC_CPU_PWM_CTL;
929 u32 saveBLC_CPU_PWM_CTL2;
930 u32 saveFPB0;
931 u32 saveFPB1;
932 u32 saveDPLL_B;
933 u32 saveDPLL_B_MD;
934 u32 saveHTOTAL_B;
935 u32 saveHBLANK_B;
936 u32 saveHSYNC_B;
937 u32 saveVTOTAL_B;
938 u32 saveVBLANK_B;
939 u32 saveVSYNC_B;
940 u32 saveBCLRPAT_B;
941 u32 saveTRANSBCONF;
942 u32 saveTRANS_HTOTAL_B;
943 u32 saveTRANS_HBLANK_B;
944 u32 saveTRANS_HSYNC_B;
945 u32 saveTRANS_VTOTAL_B;
946 u32 saveTRANS_VBLANK_B;
947 u32 saveTRANS_VSYNC_B;
948 u32 savePIPEBSTAT;
949 u32 saveDSPBSTRIDE;
950 u32 saveDSPBSIZE;
951 u32 saveDSPBPOS;
952 u32 saveDSPBADDR;
953 u32 saveDSPBSURF;
954 u32 saveDSPBTILEOFF;
955 u32 saveVGA0;
956 u32 saveVGA1;
957 u32 saveVGA_PD;
958 u32 saveVGACNTRL;
959 u32 saveADPA;
960 u32 saveLVDS; 904 u32 saveLVDS;
961 u32 savePP_ON_DELAYS; 905 u32 savePP_ON_DELAYS;
962 u32 savePP_OFF_DELAYS; 906 u32 savePP_OFF_DELAYS;
963 u32 saveDVOA;
964 u32 saveDVOB;
965 u32 saveDVOC;
966 u32 savePP_ON; 907 u32 savePP_ON;
967 u32 savePP_OFF; 908 u32 savePP_OFF;
968 u32 savePP_CONTROL; 909 u32 savePP_CONTROL;
969 u32 savePP_DIVISOR; 910 u32 savePP_DIVISOR;
970 u32 savePFIT_CONTROL;
971 u32 save_palette_a[256];
972 u32 save_palette_b[256];
973 u32 saveFBC_CONTROL; 911 u32 saveFBC_CONTROL;
974 u32 saveIER;
975 u32 saveIIR;
976 u32 saveIMR;
977 u32 saveDEIER;
978 u32 saveDEIMR;
979 u32 saveGTIER;
980 u32 saveGTIMR;
981 u32 saveFDI_RXA_IMR;
982 u32 saveFDI_RXB_IMR;
983 u32 saveCACHE_MODE_0; 912 u32 saveCACHE_MODE_0;
984 u32 saveMI_ARB_STATE; 913 u32 saveMI_ARB_STATE;
985 u32 saveSWF0[16]; 914 u32 saveSWF0[16];
986 u32 saveSWF1[16]; 915 u32 saveSWF1[16];
987 u32 saveSWF2[3]; 916 u32 saveSWF2[3];
988 u8 saveMSR;
989 u8 saveSR[8];
990 u8 saveGR[25];
991 u8 saveAR_INDEX;
992 u8 saveAR[21];
993 u8 saveDACMASK;
994 u8 saveCR[37];
995 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 917 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
996 u32 saveCURACNTR;
997 u32 saveCURAPOS;
998 u32 saveCURABASE;
999 u32 saveCURBCNTR;
1000 u32 saveCURBPOS;
1001 u32 saveCURBBASE;
1002 u32 saveCURSIZE;
1003 u32 saveDP_B;
1004 u32 saveDP_C;
1005 u32 saveDP_D;
1006 u32 savePIPEA_GMCH_DATA_M;
1007 u32 savePIPEB_GMCH_DATA_M;
1008 u32 savePIPEA_GMCH_DATA_N;
1009 u32 savePIPEB_GMCH_DATA_N;
1010 u32 savePIPEA_DP_LINK_M;
1011 u32 savePIPEB_DP_LINK_M;
1012 u32 savePIPEA_DP_LINK_N;
1013 u32 savePIPEB_DP_LINK_N;
1014 u32 saveFDI_RXA_CTL;
1015 u32 saveFDI_TXA_CTL;
1016 u32 saveFDI_RXB_CTL;
1017 u32 saveFDI_TXB_CTL;
1018 u32 savePFA_CTL_1;
1019 u32 savePFB_CTL_1;
1020 u32 savePFA_WIN_SZ;
1021 u32 savePFB_WIN_SZ;
1022 u32 savePFA_WIN_POS;
1023 u32 savePFB_WIN_POS;
1024 u32 savePCH_DREF_CONTROL;
1025 u32 saveDISP_ARB_CTL;
1026 u32 savePIPEA_DATA_M1;
1027 u32 savePIPEA_DATA_N1;
1028 u32 savePIPEA_LINK_M1;
1029 u32 savePIPEA_LINK_N1;
1030 u32 savePIPEB_DATA_M1;
1031 u32 savePIPEB_DATA_N1;
1032 u32 savePIPEB_LINK_M1;
1033 u32 savePIPEB_LINK_N1;
1034 u32 saveMCHBAR_RENDER_STANDBY;
1035 u32 savePCH_PORT_HOTPLUG; 918 u32 savePCH_PORT_HOTPLUG;
1036 u16 saveGCDGMBUS; 919 u16 saveGCDGMBUS;
1037}; 920};
@@ -1454,6 +1337,7 @@ struct intel_vbt_data {
1454 bool edp_initialized; 1337 bool edp_initialized;
1455 bool edp_support; 1338 bool edp_support;
1456 int edp_bpp; 1339 int edp_bpp;
1340 bool edp_low_vswing;
1457 struct edp_power_seq edp_pps; 1341 struct edp_power_seq edp_pps;
1458 1342
1459 struct { 1343 struct {
@@ -1640,6 +1524,10 @@ struct i915_workarounds {
1640 u32 count; 1524 u32 count;
1641}; 1525};
1642 1526
1527struct i915_virtual_gpu {
1528 bool active;
1529};
1530
1643struct drm_i915_private { 1531struct drm_i915_private {
1644 struct drm_device *dev; 1532 struct drm_device *dev;
1645 struct kmem_cache *slab; 1533 struct kmem_cache *slab;
@@ -1652,6 +1540,8 @@ struct drm_i915_private {
1652 1540
1653 struct intel_uncore uncore; 1541 struct intel_uncore uncore;
1654 1542
1543 struct i915_virtual_gpu vgpu;
1544
1655 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 1545 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1656 1546
1657 1547
@@ -2141,7 +2031,7 @@ struct drm_i915_gem_request {
2141 u32 tail; 2031 u32 tail;
2142 2032
2143 /** 2033 /**
2144 * Context related to this request 2034 * Context and ring buffer related to this request
2145 * Contexts are refcounted, so when this request is associated with a 2035 * Contexts are refcounted, so when this request is associated with a
2146 * context, we must increment the context's refcount, to guarantee that 2036 * context, we must increment the context's refcount, to guarantee that
2147 * it persists while any request is linked to it. Requests themselves 2037 * it persists while any request is linked to it. Requests themselves
@@ -2151,6 +2041,7 @@ struct drm_i915_gem_request {
2151 * context. 2041 * context.
2152 */ 2042 */
2153 struct intel_context *ctx; 2043 struct intel_context *ctx;
2044 struct intel_ringbuffer *ringbuf;
2154 2045
2155 /** Batch buffer related to this request if any */ 2046 /** Batch buffer related to this request if any */
2156 struct drm_i915_gem_object *batch_obj; 2047 struct drm_i915_gem_object *batch_obj;
@@ -2165,6 +2056,9 @@ struct drm_i915_gem_request {
2165 /** file_priv list entry for this request */ 2056 /** file_priv list entry for this request */
2166 struct list_head client_list; 2057 struct list_head client_list;
2167 2058
2059 /** process identifier submitting this request */
2060 struct pid *pid;
2061
2168 uint32_t uniq; 2062 uint32_t uniq;
2169 2063
2170 /** 2064 /**
@@ -2351,6 +2245,7 @@ struct drm_i915_cmd_table {
2351}) 2245})
2352#define INTEL_INFO(p) (&__I915__(p)->info) 2246#define INTEL_INFO(p) (&__I915__(p)->info)
2353#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2247#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2248#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2354 2249
2355#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2250#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
2356#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2251#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
@@ -2373,9 +2268,6 @@ struct drm_i915_cmd_table {
2373#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2268#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
2374 INTEL_DEVID(dev) == 0x0152 || \ 2269 INTEL_DEVID(dev) == 0x0152 || \
2375 INTEL_DEVID(dev) == 0x015a) 2270 INTEL_DEVID(dev) == 0x015a)
2376#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
2377 INTEL_DEVID(dev) == 0x0106 || \
2378 INTEL_DEVID(dev) == 0x010A)
2379#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2271#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
2380#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2272#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2381#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2273#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
@@ -2399,6 +2291,12 @@ struct drm_i915_cmd_table {
2399 INTEL_DEVID(dev) == 0x0A1E) 2291 INTEL_DEVID(dev) == 0x0A1E)
2400#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2292#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2401 2293
2294#define SKL_REVID_A0 (0x0)
2295#define SKL_REVID_B0 (0x1)
2296#define SKL_REVID_C0 (0x2)
2297#define SKL_REVID_D0 (0x3)
2298#define SKL_REVID_E0 (0x4)
2299
2402/* 2300/*
2403 * The genX designation typically refers to the render engine, so render 2301 * The genX designation typically refers to the render engine, so render
2404 * capability related checks should use IS_GEN, while display and other checks 2302 * capability related checks should use IS_GEN, while display and other checks
@@ -2506,8 +2404,6 @@ extern int i915_max_ioctl;
2506 2404
2507extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); 2405extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
2508extern int i915_resume_legacy(struct drm_device *dev); 2406extern int i915_resume_legacy(struct drm_device *dev);
2509extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
2510extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
2511 2407
2512/* i915_params.c */ 2408/* i915_params.c */
2513struct i915_params { 2409struct i915_params {
@@ -2590,6 +2486,10 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2590void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2486void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
2591 enum forcewake_domains domains); 2487 enum forcewake_domains domains);
2592void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2488void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2489static inline bool intel_vgpu_active(struct drm_device *dev)
2490{
2491 return to_i915(dev)->vgpu.active;
2492}
2593 2493
2594void 2494void
2595i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2495i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -3120,10 +3020,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
3120extern int i915_save_state(struct drm_device *dev); 3020extern int i915_save_state(struct drm_device *dev);
3121extern int i915_restore_state(struct drm_device *dev); 3021extern int i915_restore_state(struct drm_device *dev);
3122 3022
3123/* i915_ums.c */
3124void i915_save_display_reg(struct drm_device *dev);
3125void i915_restore_display_reg(struct drm_device *dev);
3126
3127/* i915_sysfs.c */ 3023/* i915_sysfs.c */
3128void i915_setup_sysfs(struct drm_device *dev_priv); 3024void i915_setup_sysfs(struct drm_device *dev_priv);
3129void i915_teardown_sysfs(struct drm_device *dev_priv); 3025void i915_teardown_sysfs(struct drm_device *dev_priv);
@@ -3195,8 +3091,7 @@ extern void i915_redisable_vga(struct drm_device *dev);
3195extern void i915_redisable_vga_power_on(struct drm_device *dev); 3091extern void i915_redisable_vga_power_on(struct drm_device *dev);
3196extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3092extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
3197extern void intel_init_pch_refclk(struct drm_device *dev); 3093extern void intel_init_pch_refclk(struct drm_device *dev);
3198extern void gen6_set_rps(struct drm_device *dev, u8 val); 3094extern void intel_set_rps(struct drm_device *dev, u8 val);
3199extern void valleyview_set_rps(struct drm_device *dev, u8 val);
3200extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3095extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3201 bool enable); 3096 bool enable);
3202extern void intel_detect_pch(struct drm_device *dev); 3097extern void intel_detect_pch(struct drm_device *dev);
@@ -3209,8 +3104,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3209int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3104int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
3210 struct drm_file *file); 3105 struct drm_file *file);
3211 3106
3212void intel_notify_mmio_flip(struct intel_engine_cs *ring);
3213
3214/* overlay */ 3107/* overlay */
3215extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3108extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
3216extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3109extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e5daad5f75fb..0107c2ae77d0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,7 @@
29#include <drm/drm_vma_manager.h> 29#include <drm/drm_vma_manager.h>
30#include <drm/i915_drm.h> 30#include <drm/i915_drm.h>
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_vgpu.h"
32#include "i915_trace.h" 33#include "i915_trace.h"
33#include "intel_drv.h" 34#include "intel_drv.h"
34#include <linux/oom.h> 35#include <linux/oom.h>
@@ -2492,6 +2493,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
2492 list_add_tail(&request->client_list, 2493 list_add_tail(&request->client_list,
2493 &file_priv->mm.request_list); 2494 &file_priv->mm.request_list);
2494 spin_unlock(&file_priv->mm.lock); 2495 spin_unlock(&file_priv->mm.lock);
2496
2497 request->pid = get_pid(task_pid(current));
2495 } 2498 }
2496 2499
2497 trace_i915_gem_request_add(request); 2500 trace_i915_gem_request_add(request);
@@ -2572,6 +2575,8 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2572 list_del(&request->list); 2575 list_del(&request->list);
2573 i915_gem_request_remove_from_client(request); 2576 i915_gem_request_remove_from_client(request);
2574 2577
2578 put_pid(request->pid);
2579
2575 i915_gem_request_unreference(request); 2580 i915_gem_request_unreference(request);
2576} 2581}
2577 2582
@@ -2757,7 +2762,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2757 2762
2758 while (!list_empty(&ring->request_list)) { 2763 while (!list_empty(&ring->request_list)) {
2759 struct drm_i915_gem_request *request; 2764 struct drm_i915_gem_request *request;
2760 struct intel_ringbuffer *ringbuf;
2761 2765
2762 request = list_first_entry(&ring->request_list, 2766 request = list_first_entry(&ring->request_list,
2763 struct drm_i915_gem_request, 2767 struct drm_i915_gem_request,
@@ -2768,23 +2772,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2768 2772
2769 trace_i915_gem_request_retire(request); 2773 trace_i915_gem_request_retire(request);
2770 2774
2771 /* This is one of the few common intersection points
2772 * between legacy ringbuffer submission and execlists:
2773 * we need to tell them apart in order to find the correct
2774 * ringbuffer to which the request belongs to.
2775 */
2776 if (i915.enable_execlists) {
2777 struct intel_context *ctx = request->ctx;
2778 ringbuf = ctx->engine[ring->id].ringbuf;
2779 } else
2780 ringbuf = ring->buffer;
2781
2782 /* We know the GPU must have read the request to have 2775 /* We know the GPU must have read the request to have
2783 * sent us the seqno + interrupt, so use the position 2776 * sent us the seqno + interrupt, so use the position
2784 * of tail of the request to update the last known position 2777 * of tail of the request to update the last known position
2785 * of the GPU head. 2778 * of the GPU head.
2786 */ 2779 */
2787 ringbuf->last_retired_head = request->postfix; 2780 request->ringbuf->last_retired_head = request->postfix;
2788 2781
2789 i915_gem_free_request(request); 2782 i915_gem_free_request(request);
2790 } 2783 }
@@ -4232,7 +4225,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
4232 fenceable = (vma->node.size == fence_size && 4225 fenceable = (vma->node.size == fence_size &&
4233 (vma->node.start & (fence_alignment - 1)) == 0); 4226 (vma->node.start & (fence_alignment - 1)) == 0);
4234 4227
4235 mappable = (vma->node.start + obj->base.size <= 4228 mappable = (vma->node.start + fence_size <=
4236 dev_priv->gtt.mappable_end); 4229 dev_priv->gtt.mappable_end);
4237 4230
4238 obj->map_and_fenceable = mappable && fenceable; 4231 obj->map_and_fenceable = mappable && fenceable;
@@ -4607,10 +4600,6 @@ i915_gem_suspend(struct drm_device *dev)
4607 4600
4608 i915_gem_retire_requests(dev); 4601 i915_gem_retire_requests(dev);
4609 4602
4610 /* Under UMS, be paranoid and evict. */
4611 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4612 i915_gem_evict_everything(dev);
4613
4614 i915_gem_stop_ringbuffers(dev); 4603 i915_gem_stop_ringbuffers(dev);
4615 mutex_unlock(&dev->struct_mutex); 4604 mutex_unlock(&dev->struct_mutex);
4616 4605
@@ -4967,18 +4956,8 @@ i915_gem_load(struct drm_device *dev)
4967 i915_gem_idle_work_handler); 4956 i915_gem_idle_work_handler);
4968 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4957 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4969 4958
4970 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4971 if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
4972 I915_WRITE(MI_ARB_STATE,
4973 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4974 }
4975
4976 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 4959 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4977 4960
4978 /* Old X drivers will take 0-2 for front, back, depth buffers */
4979 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4980 dev_priv->fence_reg_start = 3;
4981
4982 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) 4961 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4983 dev_priv->num_fence_regs = 32; 4962 dev_priv->num_fence_regs = 32;
4984 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4963 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
@@ -4986,6 +4965,10 @@ i915_gem_load(struct drm_device *dev)
4986 else 4965 else
4987 dev_priv->num_fence_regs = 8; 4966 dev_priv->num_fence_regs = 8;
4988 4967
4968 if (intel_vgpu_active(dev))
4969 dev_priv->num_fence_regs =
4970 I915_READ(vgtif_reg(avail_rs.fence_num));
4971
4989 /* Initialize fence registers to zero */ 4972 /* Initialize fence registers to zero */
4990 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4973 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4991 i915_gem_restore_fences(dev); 4974 i915_gem_restore_fences(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8603bf48d3ee..70346b0028f9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
296 struct drm_i915_private *dev_priv = dev->dev_private; 296 struct drm_i915_private *dev_priv = dev->dev_private;
297 int i; 297 int i;
298 298
299 /* In execlists mode we will unreference the context when the execlist 299 if (i915.enable_execlists) {
300 * queue is cleared and the requests destroyed. 300 struct intel_context *ctx;
301 */ 301
302 if (i915.enable_execlists) 302 list_for_each_entry(ctx, &dev_priv->context_list, link) {
303 intel_lr_context_reset(dev, ctx);
304 }
305
303 return; 306 return;
307 }
304 308
305 for (i = 0; i < I915_NUM_RINGS; i++) { 309 for (i = 0; i < I915_NUM_RINGS; i++) {
306 struct intel_engine_cs *ring = &dev_priv->ring[i]; 310 struct intel_engine_cs *ring = &dev_priv->ring[i];
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b773368fc62c..85a6adaba258 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1076,16 +1076,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1076 struct drm_i915_gem_object *batch_obj, 1076 struct drm_i915_gem_object *batch_obj,
1077 u32 batch_start_offset, 1077 u32 batch_start_offset,
1078 u32 batch_len, 1078 u32 batch_len,
1079 bool is_master, 1079 bool is_master)
1080 u32 *flags)
1081{ 1080{
1082 struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev); 1081 struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
1083 struct drm_i915_gem_object *shadow_batch_obj; 1082 struct drm_i915_gem_object *shadow_batch_obj;
1084 bool need_reloc = false; 1083 struct i915_vma *vma;
1085 int ret; 1084 int ret;
1086 1085
1087 shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, 1086 shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
1088 batch_obj->base.size); 1087 PAGE_ALIGN(batch_len));
1089 if (IS_ERR(shadow_batch_obj)) 1088 if (IS_ERR(shadow_batch_obj))
1090 return shadow_batch_obj; 1089 return shadow_batch_obj;
1091 1090
@@ -1095,40 +1094,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1095 batch_start_offset, 1094 batch_start_offset,
1096 batch_len, 1095 batch_len,
1097 is_master); 1096 is_master);
1098 if (ret) { 1097 if (ret)
1099 if (ret == -EACCES) 1098 goto err;
1100 return batch_obj;
1101 } else {
1102 struct i915_vma *vma;
1103 1099
1104 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); 1100 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1101 if (ret)
1102 goto err;
1105 1103
1106 vma = i915_gem_obj_to_ggtt(shadow_batch_obj); 1104 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1107 vma->exec_entry = shadow_exec_entry;
1108 vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
1109 drm_gem_object_reference(&shadow_batch_obj->base);
1110 i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
1111 list_add_tail(&vma->exec_list, &eb->vmas);
1112 1105
1113 shadow_batch_obj->base.pending_read_domains = 1106 vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1114 batch_obj->base.pending_read_domains; 1107 vma->exec_entry = shadow_exec_entry;
1108 vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
1109 drm_gem_object_reference(&shadow_batch_obj->base);
1110 list_add_tail(&vma->exec_list, &eb->vmas);
1115 1111
1116 /* 1112 shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1117 * Set the DISPATCH_SECURE bit to remove the NON_SECURE 1113
1118 * bit from MI_BATCH_BUFFER_START commands issued in the 1114 return shadow_batch_obj;
1119 * dispatch_execbuffer implementations. We specifically
1120 * don't want that set when the command parser is
1121 * enabled.
1122 *
1123 * FIXME: with aliasing ppgtt, buffers that should only
1124 * be in ggtt still end up in the aliasing ppgtt. remove
1125 * this check when that is fixed.
1126 */
1127 if (USES_FULL_PPGTT(dev))
1128 *flags |= I915_DISPATCH_SECURE;
1129 }
1130 1115
1131 return ret ? ERR_PTR(ret) : shadow_batch_obj; 1116err:
1117 if (ret == -EACCES) /* unhandled chained batch */
1118 return batch_obj;
1119 else
1120 return ERR_PTR(ret);
1132} 1121}
1133 1122
1134int 1123int
@@ -1138,7 +1127,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1138 struct drm_i915_gem_execbuffer2 *args, 1127 struct drm_i915_gem_execbuffer2 *args,
1139 struct list_head *vmas, 1128 struct list_head *vmas,
1140 struct drm_i915_gem_object *batch_obj, 1129 struct drm_i915_gem_object *batch_obj,
1141 u64 exec_start, u32 flags) 1130 u64 exec_start, u32 dispatch_flags)
1142{ 1131{
1143 struct drm_clip_rect *cliprects = NULL; 1132 struct drm_clip_rect *cliprects = NULL;
1144 struct drm_i915_private *dev_priv = dev->dev_private; 1133 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1266,19 +1255,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1266 1255
1267 ret = ring->dispatch_execbuffer(ring, 1256 ret = ring->dispatch_execbuffer(ring,
1268 exec_start, exec_len, 1257 exec_start, exec_len,
1269 flags); 1258 dispatch_flags);
1270 if (ret) 1259 if (ret)
1271 goto error; 1260 goto error;
1272 } 1261 }
1273 } else { 1262 } else {
1274 ret = ring->dispatch_execbuffer(ring, 1263 ret = ring->dispatch_execbuffer(ring,
1275 exec_start, exec_len, 1264 exec_start, exec_len,
1276 flags); 1265 dispatch_flags);
1277 if (ret) 1266 if (ret)
1278 return ret; 1267 return ret;
1279 } 1268 }
1280 1269
1281 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags); 1270 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
1282 1271
1283 i915_gem_execbuffer_move_to_active(vmas, ring); 1272 i915_gem_execbuffer_move_to_active(vmas, ring);
1284 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1273 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1353,7 +1342,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1353 struct i915_address_space *vm; 1342 struct i915_address_space *vm;
1354 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1343 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1355 u64 exec_start = args->batch_start_offset; 1344 u64 exec_start = args->batch_start_offset;
1356 u32 flags; 1345 u32 dispatch_flags;
1357 int ret; 1346 int ret;
1358 bool need_relocs; 1347 bool need_relocs;
1359 1348
@@ -1364,15 +1353,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1364 if (ret) 1353 if (ret)
1365 return ret; 1354 return ret;
1366 1355
1367 flags = 0; 1356 dispatch_flags = 0;
1368 if (args->flags & I915_EXEC_SECURE) { 1357 if (args->flags & I915_EXEC_SECURE) {
1369 if (!file->is_master || !capable(CAP_SYS_ADMIN)) 1358 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1370 return -EPERM; 1359 return -EPERM;
1371 1360
1372 flags |= I915_DISPATCH_SECURE; 1361 dispatch_flags |= I915_DISPATCH_SECURE;
1373 } 1362 }
1374 if (args->flags & I915_EXEC_IS_PINNED) 1363 if (args->flags & I915_EXEC_IS_PINNED)
1375 flags |= I915_DISPATCH_PINNED; 1364 dispatch_flags |= I915_DISPATCH_PINNED;
1376 1365
1377 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { 1366 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1378 DRM_DEBUG("execbuf with unknown ring: %d\n", 1367 DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1494,12 +1483,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1494 batch_obj, 1483 batch_obj,
1495 args->batch_start_offset, 1484 args->batch_start_offset,
1496 args->batch_len, 1485 args->batch_len,
1497 file->is_master, 1486 file->is_master);
1498 &flags);
1499 if (IS_ERR(batch_obj)) { 1487 if (IS_ERR(batch_obj)) {
1500 ret = PTR_ERR(batch_obj); 1488 ret = PTR_ERR(batch_obj);
1501 goto err; 1489 goto err;
1502 } 1490 }
1491
1492 /*
1493 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1494 * bit from MI_BATCH_BUFFER_START commands issued in the
1495 * dispatch_execbuffer implementations. We specifically
1496 * don't want that set when the command parser is
1497 * enabled.
1498 *
1499 * FIXME: with aliasing ppgtt, buffers that should only
1500 * be in ggtt still end up in the aliasing ppgtt. remove
1501 * this check when that is fixed.
1502 */
1503 if (USES_FULL_PPGTT(dev))
1504 dispatch_flags |= I915_DISPATCH_SECURE;
1505
1506 exec_start = 0;
1503 } 1507 }
1504 1508
1505 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 1509 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@@ -1507,7 +1511,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1507 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1511 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1508 * batch" bit. Hence we need to pin secure batches into the global gtt. 1512 * batch" bit. Hence we need to pin secure batches into the global gtt.
1509 * hsw should have this fixed, but bdw mucks it up again. */ 1513 * hsw should have this fixed, but bdw mucks it up again. */
1510 if (flags & I915_DISPATCH_SECURE) { 1514 if (dispatch_flags & I915_DISPATCH_SECURE) {
1511 /* 1515 /*
1512 * So on first glance it looks freaky that we pin the batch here 1516 * So on first glance it looks freaky that we pin the batch here
1513 * outside of the reservation loop. But: 1517 * outside of the reservation loop. But:
@@ -1527,7 +1531,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1527 exec_start += i915_gem_obj_offset(batch_obj, vm); 1531 exec_start += i915_gem_obj_offset(batch_obj, vm);
1528 1532
1529 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, 1533 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
1530 &eb->vmas, batch_obj, exec_start, flags); 1534 &eb->vmas, batch_obj, exec_start,
1535 dispatch_flags);
1531 1536
1532 /* 1537 /*
1533 * FIXME: We crucially rely upon the active tracking for the (ppgtt) 1538 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -1535,7 +1540,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1535 * needs to be adjusted to also track the ggtt batch vma properly as 1540 * needs to be adjusted to also track the ggtt batch vma properly as
1536 * active. 1541 * active.
1537 */ 1542 */
1538 if (flags & I915_DISPATCH_SECURE) 1543 if (dispatch_flags & I915_DISPATCH_SECURE)
1539 i915_gem_object_ggtt_unpin(batch_obj); 1544 i915_gem_object_ggtt_unpin(batch_obj);
1540err: 1545err:
1541 /* the request owns the ref now */ 1546 /* the request owns the ref now */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dccdc8aad2e2..74df3d1581dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -27,6 +27,7 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/i915_drm.h> 28#include <drm/i915_drm.h>
29#include "i915_drv.h" 29#include "i915_drv.h"
30#include "i915_vgpu.h"
30#include "i915_trace.h" 31#include "i915_trace.h"
31#include "intel_drv.h" 32#include "intel_drv.h"
32 33
@@ -103,6 +104,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
103 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 104 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
104 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 105 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
105 106
107 if (intel_vgpu_active(dev))
108 has_full_ppgtt = false; /* emulation is too hard */
109
106 /* 110 /*
107 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 111 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
108 * execlists, the sole mechanism available to submit work. 112 * execlists, the sole mechanism available to submit work.
@@ -138,7 +142,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
138 return has_aliasing_ppgtt ? 1 : 0; 142 return has_aliasing_ppgtt ? 1 : 0;
139} 143}
140 144
141
142static void ppgtt_bind_vma(struct i915_vma *vma, 145static void ppgtt_bind_vma(struct i915_vma *vma,
143 enum i915_cache_level cache_level, 146 enum i915_cache_level cache_level,
144 u32 flags); 147 u32 flags);
@@ -275,6 +278,100 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
275 return pte; 278 return pte;
276} 279}
277 280
281static void unmap_and_free_pt(struct i915_page_table_entry *pt, struct drm_device *dev)
282{
283 if (WARN_ON(!pt->page))
284 return;
285 __free_page(pt->page);
286 kfree(pt);
287}
288
289static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
290{
291 struct i915_page_table_entry *pt;
292
293 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
294 if (!pt)
295 return ERR_PTR(-ENOMEM);
296
297 pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
298 if (!pt->page) {
299 kfree(pt);
300 return ERR_PTR(-ENOMEM);
301 }
302
303 return pt;
304}
305
306/**
307 * alloc_pt_range() - Allocate a multiple page tables
308 * @pd: The page directory which will have at least @count entries
309 * available to point to the allocated page tables.
310 * @pde: First page directory entry for which we are allocating.
311 * @count: Number of pages to allocate.
312 * @dev: DRM device.
313 *
314 * Allocates multiple page table pages and sets the appropriate entries in the
315 * page table structure within the page directory. Function cleans up after
316 * itself on any failures.
317 *
318 * Return: 0 if allocation succeeded.
319 */
320static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
321 struct drm_device *dev)
322{
323 int i, ret;
324
325 /* 512 is the max page tables per page_directory on any platform. */
326 if (WARN_ON(pde + count > GEN6_PPGTT_PD_ENTRIES))
327 return -EINVAL;
328
329 for (i = pde; i < pde + count; i++) {
330 struct i915_page_table_entry *pt = alloc_pt_single(dev);
331
332 if (IS_ERR(pt)) {
333 ret = PTR_ERR(pt);
334 goto err_out;
335 }
336 WARN(pd->page_table[i],
337 "Leaking page directory entry %d (%p)\n",
338 i, pd->page_table[i]);
339 pd->page_table[i] = pt;
340 }
341
342 return 0;
343
344err_out:
345 while (i-- > pde)
346 unmap_and_free_pt(pd->page_table[i], dev);
347 return ret;
348}
349
350static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
351{
352 if (pd->page) {
353 __free_page(pd->page);
354 kfree(pd);
355 }
356}
357
358static struct i915_page_directory_entry *alloc_pd_single(void)
359{
360 struct i915_page_directory_entry *pd;
361
362 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
363 if (!pd)
364 return ERR_PTR(-ENOMEM);
365
366 pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
367 if (!pd->page) {
368 kfree(pd);
369 return ERR_PTR(-ENOMEM);
370 }
371
372 return pd;
373}
374
278/* Broadwell Page Directory Pointer Descriptors */ 375/* Broadwell Page Directory Pointer Descriptors */
279static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, 376static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
280 uint64_t val) 377 uint64_t val)
@@ -307,7 +404,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
307 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; 404 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
308 405
309 for (i = used_pd - 1; i >= 0; i--) { 406 for (i = used_pd - 1; i >= 0; i--) {
310 dma_addr_t addr = ppgtt->pd_dma_addr[i]; 407 dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
311 ret = gen8_write_pdp(ring, i, addr); 408 ret = gen8_write_pdp(ring, i, addr);
312 if (ret) 409 if (ret)
313 return ret; 410 return ret;
@@ -334,7 +431,24 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
334 I915_CACHE_LLC, use_scratch); 431 I915_CACHE_LLC, use_scratch);
335 432
336 while (num_entries) { 433 while (num_entries) {
337 struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; 434 struct i915_page_directory_entry *pd;
435 struct i915_page_table_entry *pt;
436 struct page *page_table;
437
438 if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
439 continue;
440
441 pd = ppgtt->pdp.page_directory[pdpe];
442
443 if (WARN_ON(!pd->page_table[pde]))
444 continue;
445
446 pt = pd->page_table[pde];
447
448 if (WARN_ON(!pt->page))
449 continue;
450
451 page_table = pt->page;
338 452
339 last_pte = pte + num_entries; 453 last_pte = pte + num_entries;
340 if (last_pte > GEN8_PTES_PER_PAGE) 454 if (last_pte > GEN8_PTES_PER_PAGE)
@@ -375,11 +489,16 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
375 pt_vaddr = NULL; 489 pt_vaddr = NULL;
376 490
377 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 491 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
378 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS)) 492 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
379 break; 493 break;
380 494
381 if (pt_vaddr == NULL) 495 if (pt_vaddr == NULL) {
382 pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); 496 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
497 struct i915_page_table_entry *pt = pd->page_table[pde];
498 struct page *page_table = pt->page;
499
500 pt_vaddr = kmap_atomic(page_table);
501 }
383 502
384 pt_vaddr[pte] = 503 pt_vaddr[pte] =
385 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), 504 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -403,29 +522,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
403 } 522 }
404} 523}
405 524
406static void gen8_free_page_tables(struct page **pt_pages) 525static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
407{ 526{
408 int i; 527 int i;
409 528
410 if (pt_pages == NULL) 529 if (!pd->page)
411 return; 530 return;
412 531
413 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) 532 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
414 if (pt_pages[i]) 533 if (WARN_ON(!pd->page_table[i]))
415 __free_pages(pt_pages[i], 0); 534 continue;
535
536 unmap_and_free_pt(pd->page_table[i], dev);
537 pd->page_table[i] = NULL;
538 }
416} 539}
417 540
418static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) 541static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
419{ 542{
420 int i; 543 int i;
421 544
422 for (i = 0; i < ppgtt->num_pd_pages; i++) { 545 for (i = 0; i < ppgtt->num_pd_pages; i++) {
423 gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); 546 if (WARN_ON(!ppgtt->pdp.page_directory[i]))
424 kfree(ppgtt->gen8_pt_pages[i]); 547 continue;
425 kfree(ppgtt->gen8_pt_dma_addr[i]);
426 }
427 548
428 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); 549 gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
550 unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
551 }
429} 552}
430 553
431static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) 554static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@@ -436,14 +559,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
436 for (i = 0; i < ppgtt->num_pd_pages; i++) { 559 for (i = 0; i < ppgtt->num_pd_pages; i++) {
437 /* TODO: In the future we'll support sparse mappings, so this 560 /* TODO: In the future we'll support sparse mappings, so this
438 * will have to change. */ 561 * will have to change. */
439 if (!ppgtt->pd_dma_addr[i]) 562 if (!ppgtt->pdp.page_directory[i]->daddr)
440 continue; 563 continue;
441 564
442 pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, 565 pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
443 PCI_DMA_BIDIRECTIONAL); 566 PCI_DMA_BIDIRECTIONAL);
444 567
445 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 568 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
446 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; 569 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
570 struct i915_page_table_entry *pt;
571 dma_addr_t addr;
572
573 if (WARN_ON(!pd->page_table[j]))
574 continue;
575
576 pt = pd->page_table[j];
577 addr = pt->daddr;
578
447 if (addr) 579 if (addr)
448 pci_unmap_page(hwdev, addr, PAGE_SIZE, 580 pci_unmap_page(hwdev, addr, PAGE_SIZE,
449 PCI_DMA_BIDIRECTIONAL); 581 PCI_DMA_BIDIRECTIONAL);
@@ -460,86 +592,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
460 gen8_ppgtt_free(ppgtt); 592 gen8_ppgtt_free(ppgtt);
461} 593}
462 594
463static struct page **__gen8_alloc_page_tables(void) 595static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
464{ 596{
465 struct page **pt_pages;
466 int i;
467
468 pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
469 if (!pt_pages)
470 return ERR_PTR(-ENOMEM);
471
472 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
473 pt_pages[i] = alloc_page(GFP_KERNEL);
474 if (!pt_pages[i])
475 goto bail;
476 }
477
478 return pt_pages;
479
480bail:
481 gen8_free_page_tables(pt_pages);
482 kfree(pt_pages);
483 return ERR_PTR(-ENOMEM);
484}
485
486static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
487 const int max_pdp)
488{
489 struct page **pt_pages[GEN8_LEGACY_PDPS];
490 int i, ret; 597 int i, ret;
491 598
492 for (i = 0; i < max_pdp; i++) { 599 for (i = 0; i < ppgtt->num_pd_pages; i++) {
493 pt_pages[i] = __gen8_alloc_page_tables(); 600 ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
494 if (IS_ERR(pt_pages[i])) { 601 0, GEN8_PDES_PER_PAGE, ppgtt->base.dev);
495 ret = PTR_ERR(pt_pages[i]); 602 if (ret)
496 goto unwind_out; 603 goto unwind_out;
497 }
498 } 604 }
499 605
500 /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
501 * "atomic" - for cleanup purposes.
502 */
503 for (i = 0; i < max_pdp; i++)
504 ppgtt->gen8_pt_pages[i] = pt_pages[i];
505
506 return 0; 606 return 0;
507 607
508unwind_out: 608unwind_out:
509 while (i--) { 609 while (i--)
510 gen8_free_page_tables(pt_pages[i]); 610 gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
511 kfree(pt_pages[i]);
512 }
513 611
514 return ret; 612 return -ENOMEM;
515} 613}
516 614
517static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) 615static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
616 const int max_pdp)
518{ 617{
519 int i; 618 int i;
520 619
521 for (i = 0; i < ppgtt->num_pd_pages; i++) { 620 for (i = 0; i < max_pdp; i++) {
522 ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, 621 ppgtt->pdp.page_directory[i] = alloc_pd_single();
523 sizeof(dma_addr_t), 622 if (IS_ERR(ppgtt->pdp.page_directory[i]))
524 GFP_KERNEL); 623 goto unwind_out;
525 if (!ppgtt->gen8_pt_dma_addr[i])
526 return -ENOMEM;
527 } 624 }
528 625
529 return 0; 626 ppgtt->num_pd_pages = max_pdp;
530} 627 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
531 628
532static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, 629 return 0;
533 const int max_pdp)
534{
535 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
536 if (!ppgtt->pd_pages)
537 return -ENOMEM;
538 630
539 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); 631unwind_out:
540 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); 632 while (i--)
633 unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
541 634
542 return 0; 635 return -ENOMEM;
543} 636}
544 637
545static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, 638static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
@@ -551,18 +644,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
551 if (ret) 644 if (ret)
552 return ret; 645 return ret;
553 646
554 ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); 647 ret = gen8_ppgtt_allocate_page_tables(ppgtt);
555 if (ret) { 648 if (ret)
556 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); 649 goto err_out;
557 return ret;
558 }
559 650
560 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; 651 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
561 652
562 ret = gen8_ppgtt_allocate_dma(ppgtt); 653 return 0;
563 if (ret)
564 gen8_ppgtt_free(ppgtt);
565 654
655err_out:
656 gen8_ppgtt_free(ppgtt);
566 return ret; 657 return ret;
567} 658}
568 659
@@ -573,14 +664,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
573 int ret; 664 int ret;
574 665
575 pd_addr = pci_map_page(ppgtt->base.dev->pdev, 666 pd_addr = pci_map_page(ppgtt->base.dev->pdev,
576 &ppgtt->pd_pages[pd], 0, 667 ppgtt->pdp.page_directory[pd]->page, 0,
577 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 668 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
578 669
579 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); 670 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
580 if (ret) 671 if (ret)
581 return ret; 672 return ret;
582 673
583 ppgtt->pd_dma_addr[pd] = pd_addr; 674 ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
584 675
585 return 0; 676 return 0;
586} 677}
@@ -590,17 +681,18 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
590 const int pt) 681 const int pt)
591{ 682{
592 dma_addr_t pt_addr; 683 dma_addr_t pt_addr;
593 struct page *p; 684 struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
685 struct i915_page_table_entry *ptab = pdir->page_table[pt];
686 struct page *p = ptab->page;
594 int ret; 687 int ret;
595 688
596 p = ppgtt->gen8_pt_pages[pd][pt];
597 pt_addr = pci_map_page(ppgtt->base.dev->pdev, 689 pt_addr = pci_map_page(ppgtt->base.dev->pdev,
598 p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 690 p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
599 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); 691 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
600 if (ret) 692 if (ret)
601 return ret; 693 return ret;
602 694
603 ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; 695 ptab->daddr = pt_addr;
604 696
605 return 0; 697 return 0;
606} 698}
@@ -653,10 +745,12 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
653 * will never need to touch the PDEs again. 745 * will never need to touch the PDEs again.
654 */ 746 */
655 for (i = 0; i < max_pdp; i++) { 747 for (i = 0; i < max_pdp; i++) {
748 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
656 gen8_ppgtt_pde_t *pd_vaddr; 749 gen8_ppgtt_pde_t *pd_vaddr;
657 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); 750 pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
658 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 751 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
659 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; 752 struct i915_page_table_entry *pt = pd->page_table[j];
753 dma_addr_t addr = pt->daddr;
660 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, 754 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
661 I915_CACHE_LLC); 755 I915_CACHE_LLC);
662 } 756 }
@@ -699,14 +793,15 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
699 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); 793 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
700 794
701 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + 795 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
702 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); 796 ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
703 797
704 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, 798 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
705 ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); 799 ppgtt->pd.pd_offset,
800 ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
706 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { 801 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
707 u32 expected; 802 u32 expected;
708 gen6_gtt_pte_t *pt_vaddr; 803 gen6_gtt_pte_t *pt_vaddr;
709 dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; 804 dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
710 pd_entry = readl(pd_addr + pde); 805 pd_entry = readl(pd_addr + pde);
711 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); 806 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
712 807
@@ -717,7 +812,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
717 expected); 812 expected);
718 seq_printf(m, "\tPDE: %x\n", pd_entry); 813 seq_printf(m, "\tPDE: %x\n", pd_entry);
719 814
720 pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); 815 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
721 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { 816 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
722 unsigned long va = 817 unsigned long va =
723 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + 818 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
@@ -750,13 +845,13 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
750 uint32_t pd_entry; 845 uint32_t pd_entry;
751 int i; 846 int i;
752 847
753 WARN_ON(ppgtt->pd_offset & 0x3f); 848 WARN_ON(ppgtt->pd.pd_offset & 0x3f);
754 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + 849 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
755 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); 850 ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
756 for (i = 0; i < ppgtt->num_pd_entries; i++) { 851 for (i = 0; i < ppgtt->num_pd_entries; i++) {
757 dma_addr_t pt_addr; 852 dma_addr_t pt_addr;
758 853
759 pt_addr = ppgtt->pt_dma_addr[i]; 854 pt_addr = ppgtt->pd.page_table[i]->daddr;
760 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); 855 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
761 pd_entry |= GEN6_PDE_VALID; 856 pd_entry |= GEN6_PDE_VALID;
762 857
@@ -767,9 +862,9 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
767 862
768static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) 863static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
769{ 864{
770 BUG_ON(ppgtt->pd_offset & 0x3f); 865 BUG_ON(ppgtt->pd.pd_offset & 0x3f);
771 866
772 return (ppgtt->pd_offset / 64) << 16; 867 return (ppgtt->pd.pd_offset / 64) << 16;
773} 868}
774 869
775static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 870static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@@ -797,6 +892,16 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
797 return 0; 892 return 0;
798} 893}
799 894
895static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
896 struct intel_engine_cs *ring)
897{
898 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
899
900 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
901 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
902 return 0;
903}
904
800static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 905static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
801 struct intel_engine_cs *ring) 906 struct intel_engine_cs *ring)
802{ 907{
@@ -922,7 +1027,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
922 if (last_pte > I915_PPGTT_PT_ENTRIES) 1027 if (last_pte > I915_PPGTT_PT_ENTRIES)
923 last_pte = I915_PPGTT_PT_ENTRIES; 1028 last_pte = I915_PPGTT_PT_ENTRIES;
924 1029
925 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 1030 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
926 1031
927 for (i = first_pte; i < last_pte; i++) 1032 for (i = first_pte; i < last_pte; i++)
928 pt_vaddr[i] = scratch_pte; 1033 pt_vaddr[i] = scratch_pte;
@@ -951,7 +1056,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
951 pt_vaddr = NULL; 1056 pt_vaddr = NULL;
952 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 1057 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
953 if (pt_vaddr == NULL) 1058 if (pt_vaddr == NULL)
954 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 1059 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
955 1060
956 pt_vaddr[act_pte] = 1061 pt_vaddr[act_pte] =
957 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1062 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -972,22 +1077,20 @@ static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
972{ 1077{
973 int i; 1078 int i;
974 1079
975 if (ppgtt->pt_dma_addr) { 1080 for (i = 0; i < ppgtt->num_pd_entries; i++)
976 for (i = 0; i < ppgtt->num_pd_entries; i++) 1081 pci_unmap_page(ppgtt->base.dev->pdev,
977 pci_unmap_page(ppgtt->base.dev->pdev, 1082 ppgtt->pd.page_table[i]->daddr,
978 ppgtt->pt_dma_addr[i], 1083 4096, PCI_DMA_BIDIRECTIONAL);
979 4096, PCI_DMA_BIDIRECTIONAL);
980 }
981} 1084}
982 1085
983static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) 1086static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
984{ 1087{
985 int i; 1088 int i;
986 1089
987 kfree(ppgtt->pt_dma_addr);
988 for (i = 0; i < ppgtt->num_pd_entries; i++) 1090 for (i = 0; i < ppgtt->num_pd_entries; i++)
989 __free_page(ppgtt->pt_pages[i]); 1091 unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
990 kfree(ppgtt->pt_pages); 1092
1093 unmap_and_free_pd(&ppgtt->pd);
991} 1094}
992 1095
993static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1096static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1032,31 +1135,13 @@ alloc:
1032 goto alloc; 1135 goto alloc;
1033 } 1136 }
1034 1137
1138 if (ret)
1139 return ret;
1140
1035 if (ppgtt->node.start < dev_priv->gtt.mappable_end) 1141 if (ppgtt->node.start < dev_priv->gtt.mappable_end)
1036 DRM_DEBUG("Forced to use aperture for PDEs\n"); 1142 DRM_DEBUG("Forced to use aperture for PDEs\n");
1037 1143
1038 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; 1144 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
1039 return ret;
1040}
1041
1042static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
1043{
1044 int i;
1045
1046 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
1047 GFP_KERNEL);
1048
1049 if (!ppgtt->pt_pages)
1050 return -ENOMEM;
1051
1052 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1053 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
1054 if (!ppgtt->pt_pages[i]) {
1055 gen6_ppgtt_free(ppgtt);
1056 return -ENOMEM;
1057 }
1058 }
1059
1060 return 0; 1145 return 0;
1061} 1146}
1062 1147
@@ -1068,20 +1153,14 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1068 if (ret) 1153 if (ret)
1069 return ret; 1154 return ret;
1070 1155
1071 ret = gen6_ppgtt_allocate_page_tables(ppgtt); 1156 ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
1157 ppgtt->base.dev);
1158
1072 if (ret) { 1159 if (ret) {
1073 drm_mm_remove_node(&ppgtt->node); 1160 drm_mm_remove_node(&ppgtt->node);
1074 return ret; 1161 return ret;
1075 } 1162 }
1076 1163
1077 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
1078 GFP_KERNEL);
1079 if (!ppgtt->pt_dma_addr) {
1080 drm_mm_remove_node(&ppgtt->node);
1081 gen6_ppgtt_free(ppgtt);
1082 return -ENOMEM;
1083 }
1084
1085 return 0; 1164 return 0;
1086} 1165}
1087 1166
@@ -1091,9 +1170,11 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
1091 int i; 1170 int i;
1092 1171
1093 for (i = 0; i < ppgtt->num_pd_entries; i++) { 1172 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1173 struct page *page;
1094 dma_addr_t pt_addr; 1174 dma_addr_t pt_addr;
1095 1175
1096 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, 1176 page = ppgtt->pd.page_table[i]->page;
1177 pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
1097 PCI_DMA_BIDIRECTIONAL); 1178 PCI_DMA_BIDIRECTIONAL);
1098 1179
1099 if (pci_dma_mapping_error(dev->pdev, pt_addr)) { 1180 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
@@ -1101,7 +1182,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
1101 return -EIO; 1182 return -EIO;
1102 } 1183 }
1103 1184
1104 ppgtt->pt_dma_addr[i] = pt_addr; 1185 ppgtt->pd.page_table[i]->daddr = pt_addr;
1105 } 1186 }
1106 1187
1107 return 0; 1188 return 0;
@@ -1123,6 +1204,9 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1123 } else 1204 } else
1124 BUG(); 1205 BUG();
1125 1206
1207 if (intel_vgpu_active(dev))
1208 ppgtt->switch_mm = vgpu_mm_switch;
1209
1126 ret = gen6_ppgtt_alloc(ppgtt); 1210 ret = gen6_ppgtt_alloc(ppgtt);
1127 if (ret) 1211 if (ret)
1128 return ret; 1212 return ret;
@@ -1137,10 +1221,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1137 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 1221 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1138 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 1222 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1139 ppgtt->base.start = 0; 1223 ppgtt->base.start = 0;
1140 ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; 1224 ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
1141 ppgtt->debug_dump = gen6_dump_ppgtt; 1225 ppgtt->debug_dump = gen6_dump_ppgtt;
1142 1226
1143 ppgtt->pd_offset = 1227 ppgtt->pd.pd_offset =
1144 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); 1228 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
1145 1229
1146 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); 1230 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
@@ -1151,7 +1235,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1151 1235
1152 gen6_write_pdes(ppgtt); 1236 gen6_write_pdes(ppgtt);
1153 DRM_DEBUG("Adding PPGTT at offset %x\n", 1237 DRM_DEBUG("Adding PPGTT at offset %x\n",
1154 ppgtt->pd_offset << 10); 1238 ppgtt->pd.pd_offset << 10);
1155 1239
1156 return 0; 1240 return 0;
1157} 1241}
@@ -1753,6 +1837,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
1753 1837
1754 /* Subtract the guard page ... */ 1838 /* Subtract the guard page ... */
1755 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); 1839 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
1840
1841 dev_priv->gtt.base.start = start;
1842 dev_priv->gtt.base.total = end - start;
1843
1844 if (intel_vgpu_active(dev)) {
1845 ret = intel_vgt_balloon(dev);
1846 if (ret)
1847 return ret;
1848 }
1849
1756 if (!HAS_LLC(dev)) 1850 if (!HAS_LLC(dev))
1757 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; 1851 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
1758 1852
@@ -1772,9 +1866,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
1772 vma->bound |= GLOBAL_BIND; 1866 vma->bound |= GLOBAL_BIND;
1773 } 1867 }
1774 1868
1775 dev_priv->gtt.base.start = start;
1776 dev_priv->gtt.base.total = end - start;
1777
1778 /* Clear any non-preallocated blocks */ 1869 /* Clear any non-preallocated blocks */
1779 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { 1870 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
1780 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 1871 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -1826,6 +1917,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
1826 } 1917 }
1827 1918
1828 if (drm_mm_initialized(&vm->mm)) { 1919 if (drm_mm_initialized(&vm->mm)) {
1920 if (intel_vgpu_active(dev))
1921 intel_vgt_deballoon();
1922
1829 drm_mm_takedown(&vm->mm); 1923 drm_mm_takedown(&vm->mm);
1830 list_del(&vm->global_link); 1924 list_del(&vm->global_link);
1831 } 1925 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e377c7d27bd4..c9e93f5070bc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -88,7 +88,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
88#define GEN8_PDE_MASK 0x1ff 88#define GEN8_PDE_MASK 0x1ff
89#define GEN8_PTE_SHIFT 12 89#define GEN8_PTE_SHIFT 12
90#define GEN8_PTE_MASK 0x1ff 90#define GEN8_PTE_MASK 0x1ff
91#define GEN8_LEGACY_PDPS 4 91#define GEN8_LEGACY_PDPES 4
92#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 92#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
93#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 93#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
94 94
@@ -187,6 +187,26 @@ struct i915_vma {
187 u32 flags); 187 u32 flags);
188}; 188};
189 189
190struct i915_page_table_entry {
191 struct page *page;
192 dma_addr_t daddr;
193};
194
195struct i915_page_directory_entry {
196 struct page *page; /* NULL for GEN6-GEN7 */
197 union {
198 uint32_t pd_offset;
199 dma_addr_t daddr;
200 };
201
202 struct i915_page_table_entry *page_table[GEN6_PPGTT_PD_ENTRIES]; /* PDEs */
203};
204
205struct i915_page_directory_pointer_entry {
206 /* struct page *page; */
207 struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
208};
209
190struct i915_address_space { 210struct i915_address_space {
191 struct drm_mm mm; 211 struct drm_mm mm;
192 struct drm_device *dev; 212 struct drm_device *dev;
@@ -272,17 +292,8 @@ struct i915_hw_ppgtt {
272 unsigned num_pd_entries; 292 unsigned num_pd_entries;
273 unsigned num_pd_pages; /* gen8+ */ 293 unsigned num_pd_pages; /* gen8+ */
274 union { 294 union {
275 struct page **pt_pages; 295 struct i915_page_directory_pointer_entry pdp;
276 struct page **gen8_pt_pages[GEN8_LEGACY_PDPS]; 296 struct i915_page_directory_entry pd;
277 };
278 struct page *pd_pages;
279 union {
280 uint32_t pd_offset;
281 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
282 };
283 union {
284 dma_addr_t *pt_dma_addr;
285 dma_addr_t *gen8_pt_dma_addr[4];
286 }; 297 };
287 298
288 struct drm_i915_file_private *file_priv; 299 struct drm_i915_file_private *file_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9c6f93ec886b..f8da71682c96 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -231,7 +231,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
231 dev_priv->mm.stolen_base + compressed_llb->start); 231 dev_priv->mm.stolen_base + compressed_llb->start);
232 } 232 }
233 233
234 dev_priv->fbc.size = size / dev_priv->fbc.threshold; 234 dev_priv->fbc.uncompressed_size = size;
235 235
236 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 236 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
237 size); 237 size);
@@ -253,7 +253,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c
253 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 253 if (!drm_mm_initialized(&dev_priv->mm.stolen))
254 return -ENODEV; 254 return -ENODEV;
255 255
256 if (size < dev_priv->fbc.size) 256 if (size <= dev_priv->fbc.uncompressed_size)
257 return 0; 257 return 0;
258 258
259 /* Release any current block */ 259 /* Release any current block */
@@ -266,7 +266,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
266{ 266{
267 struct drm_i915_private *dev_priv = dev->dev_private; 267 struct drm_i915_private *dev_priv = dev->dev_private;
268 268
269 if (dev_priv->fbc.size == 0) 269 if (dev_priv->fbc.uncompressed_size == 0)
270 return; 270 return;
271 271
272 drm_mm_remove_node(&dev_priv->fbc.compressed_fb); 272 drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
@@ -276,7 +276,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
276 kfree(dev_priv->fbc.compressed_llb); 276 kfree(dev_priv->fbc.compressed_llb);
277 } 277 }
278 278
279 dev_priv->fbc.size = 0; 279 dev_priv->fbc.uncompressed_size = 0;
280} 280}
281 281
282void i915_gem_cleanup_stolen(struct drm_device *dev) 282void i915_gem_cleanup_stolen(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 48ddbf44c862..a982849a5edd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -994,12 +994,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
994 i915_error_ggtt_object_create(dev_priv, 994 i915_error_ggtt_object_create(dev_priv,
995 ring->scratch.obj); 995 ring->scratch.obj);
996 996
997 if (request->file_priv) { 997 if (request->pid) {
998 struct task_struct *task; 998 struct task_struct *task;
999 999
1000 rcu_read_lock(); 1000 rcu_read_lock();
1001 task = pid_task(request->file_priv->file->pid, 1001 task = pid_task(request->pid, PIDTYPE_PID);
1002 PIDTYPE_PID);
1003 if (task) { 1002 if (task) {
1004 strcpy(error->ring[i].comm, task->comm); 1003 strcpy(error->ring[i].comm, task->comm);
1005 error->ring[i].pid = task->pid; 1004 error->ring[i].pid = task->pid;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ede5bbbd8a08..9baecb79de8c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -492,31 +492,6 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
492 spin_unlock_irq(&dev_priv->irq_lock); 492 spin_unlock_irq(&dev_priv->irq_lock);
493} 493}
494 494
495/**
496 * i915_pipe_enabled - check if a pipe is enabled
497 * @dev: DRM device
498 * @pipe: pipe to check
499 *
500 * Reading certain registers when the pipe is disabled can hang the chip.
501 * Use this routine to make sure the PLL is running and the pipe is active
502 * before reading such registers if unsure.
503 */
504static int
505i915_pipe_enabled(struct drm_device *dev, int pipe)
506{
507 struct drm_i915_private *dev_priv = dev->dev_private;
508
509 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
510 /* Locking is horribly broken here, but whatever. */
511 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
513
514 return intel_crtc->active;
515 } else {
516 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
517 }
518}
519
520/* 495/*
521 * This timing diagram depicts the video signal in and 496 * This timing diagram depicts the video signal in and
522 * around the vertical blanking period. 497 * around the vertical blanking period.
@@ -582,34 +557,16 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
582 unsigned long high_frame; 557 unsigned long high_frame;
583 unsigned long low_frame; 558 unsigned long low_frame;
584 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 559 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
560 struct intel_crtc *intel_crtc =
561 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
562 const struct drm_display_mode *mode =
563 &intel_crtc->config->base.adjusted_mode;
585 564
586 if (!i915_pipe_enabled(dev, pipe)) { 565 htotal = mode->crtc_htotal;
587 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 566 hsync_start = mode->crtc_hsync_start;
588 "pipe %c\n", pipe_name(pipe)); 567 vbl_start = mode->crtc_vblank_start;
589 return 0; 568 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
590 } 569 vbl_start = DIV_ROUND_UP(vbl_start, 2);
591
592 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
593 struct intel_crtc *intel_crtc =
594 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
595 const struct drm_display_mode *mode =
596 &intel_crtc->config->base.adjusted_mode;
597
598 htotal = mode->crtc_htotal;
599 hsync_start = mode->crtc_hsync_start;
600 vbl_start = mode->crtc_vblank_start;
601 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
602 vbl_start = DIV_ROUND_UP(vbl_start, 2);
603 } else {
604 enum transcoder cpu_transcoder = (enum transcoder) pipe;
605
606 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
607 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
608 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
609 if ((I915_READ(PIPECONF(cpu_transcoder)) &
610 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
611 vbl_start = DIV_ROUND_UP(vbl_start, 2);
612 }
613 570
614 /* Convert to pixel count */ 571 /* Convert to pixel count */
615 vbl_start *= htotal; 572 vbl_start *= htotal;
@@ -648,12 +605,6 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
648 struct drm_i915_private *dev_priv = dev->dev_private; 605 struct drm_i915_private *dev_priv = dev->dev_private;
649 int reg = PIPE_FRMCOUNT_GM45(pipe); 606 int reg = PIPE_FRMCOUNT_GM45(pipe);
650 607
651 if (!i915_pipe_enabled(dev, pipe)) {
652 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
653 "pipe %c\n", pipe_name(pipe));
654 return 0;
655 }
656
657 return I915_READ(reg); 608 return I915_READ(reg);
658} 609}
659 610
@@ -840,7 +791,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
840 return -EINVAL; 791 return -EINVAL;
841 } 792 }
842 793
843 if (!crtc->enabled) { 794 if (!crtc->state->enable) {
844 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 795 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
845 return -EBUSY; 796 return -EBUSY;
846 } 797 }
@@ -1243,10 +1194,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1243 1194
1244 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1195 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1245 1196
1246 if (IS_VALLEYVIEW(dev_priv->dev)) 1197 intel_set_rps(dev_priv->dev, new_delay);
1247 valleyview_set_rps(dev_priv->dev, new_delay);
1248 else
1249 gen6_set_rps(dev_priv->dev, new_delay);
1250 1198
1251 mutex_unlock(&dev_priv->rps.hw_lock); 1199 mutex_unlock(&dev_priv->rps.hw_lock);
1252} 1200}
@@ -2662,9 +2610,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
2662 struct drm_i915_private *dev_priv = dev->dev_private; 2610 struct drm_i915_private *dev_priv = dev->dev_private;
2663 unsigned long irqflags; 2611 unsigned long irqflags;
2664 2612
2665 if (!i915_pipe_enabled(dev, pipe))
2666 return -EINVAL;
2667
2668 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2613 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2669 if (INTEL_INFO(dev)->gen >= 4) 2614 if (INTEL_INFO(dev)->gen >= 4)
2670 i915_enable_pipestat(dev_priv, pipe, 2615 i915_enable_pipestat(dev_priv, pipe,
@@ -2684,9 +2629,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2684 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2629 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2685 DE_PIPE_VBLANK(pipe); 2630 DE_PIPE_VBLANK(pipe);
2686 2631
2687 if (!i915_pipe_enabled(dev, pipe))
2688 return -EINVAL;
2689
2690 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2632 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2691 ironlake_enable_display_irq(dev_priv, bit); 2633 ironlake_enable_display_irq(dev_priv, bit);
2692 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2634 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2699,9 +2641,6 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2699 struct drm_i915_private *dev_priv = dev->dev_private; 2641 struct drm_i915_private *dev_priv = dev->dev_private;
2700 unsigned long irqflags; 2642 unsigned long irqflags;
2701 2643
2702 if (!i915_pipe_enabled(dev, pipe))
2703 return -EINVAL;
2704
2705 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2644 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2706 i915_enable_pipestat(dev_priv, pipe, 2645 i915_enable_pipestat(dev_priv, pipe,
2707 PIPE_START_VBLANK_INTERRUPT_STATUS); 2646 PIPE_START_VBLANK_INTERRUPT_STATUS);
@@ -2715,9 +2654,6 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2715 struct drm_i915_private *dev_priv = dev->dev_private; 2654 struct drm_i915_private *dev_priv = dev->dev_private;
2716 unsigned long irqflags; 2655 unsigned long irqflags;
2717 2656
2718 if (!i915_pipe_enabled(dev, pipe))
2719 return -EINVAL;
2720
2721 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2657 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2722 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2658 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2723 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2659 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -2769,9 +2705,6 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2769 struct drm_i915_private *dev_priv = dev->dev_private; 2705 struct drm_i915_private *dev_priv = dev->dev_private;
2770 unsigned long irqflags; 2706 unsigned long irqflags;
2771 2707
2772 if (!i915_pipe_enabled(dev, pipe))
2773 return;
2774
2775 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2776 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2709 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2777 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2710 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -4392,10 +4325,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4392 if (!IS_GEN2(dev_priv)) 4325 if (!IS_GEN2(dev_priv))
4393 dev->vblank_disable_immediate = true; 4326 dev->vblank_disable_immediate = true;
4394 4327
4395 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4328 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4396 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4329 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4397 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4398 }
4399 4330
4400 if (IS_CHERRYVIEW(dev_priv)) { 4331 if (IS_CHERRYVIEW(dev_priv)) {
4401 dev->driver->irq_handler = cherryview_irq_handler; 4332 dev->driver->irq_handler = cherryview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 33b3d0a24071..55143cb36e74 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,7 +139,21 @@
139#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) 139#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
140#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) 140#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
141 141
142#define GEN8_R_PWR_CLK_STATE 0x20C8
143#define GEN8_RPCS_ENABLE (1 << 31)
144#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
145#define GEN8_RPCS_S_CNT_SHIFT 15
146#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
147#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
148#define GEN8_RPCS_SS_CNT_SHIFT 8
149#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
150#define GEN8_RPCS_EU_MAX_SHIFT 4
151#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
152#define GEN8_RPCS_EU_MIN_SHIFT 0
153#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
154
142#define GAM_ECOCHK 0x4090 155#define GAM_ECOCHK 0x4090
156#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
143#define ECOCHK_SNB_BIT (1<<10) 157#define ECOCHK_SNB_BIT (1<<10)
144#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) 158#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
145#define ECOCHK_PPGTT_CACHE64B (0x3<<3) 159#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
@@ -586,6 +600,19 @@ enum punit_power_well {
586 PUNIT_POWER_WELL_NUM, 600 PUNIT_POWER_WELL_NUM,
587}; 601};
588 602
603enum skl_disp_power_wells {
604 SKL_DISP_PW_MISC_IO,
605 SKL_DISP_PW_DDI_A_E,
606 SKL_DISP_PW_DDI_B,
607 SKL_DISP_PW_DDI_C,
608 SKL_DISP_PW_DDI_D,
609 SKL_DISP_PW_1 = 14,
610 SKL_DISP_PW_2,
611};
612
613#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
614#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
615
589#define PUNIT_REG_PWRGT_CTRL 0x60 616#define PUNIT_REG_PWRGT_CTRL 0x60
590#define PUNIT_REG_PWRGT_STATUS 0x61 617#define PUNIT_REG_PWRGT_STATUS 0x61
591#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2)) 618#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
@@ -1011,6 +1038,16 @@ enum punit_power_well {
1011#define DPIO_CHV_PROP_COEFF_SHIFT 0 1038#define DPIO_CHV_PROP_COEFF_SHIFT 0
1012#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) 1039#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
1013 1040
1041#define _CHV_PLL_DW8_CH0 0x8020
1042#define _CHV_PLL_DW8_CH1 0x81A0
1043#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
1044
1045#define _CHV_PLL_DW9_CH0 0x8024
1046#define _CHV_PLL_DW9_CH1 0x81A4
1047#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
1048#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
1049#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
1050
1014#define _CHV_CMN_DW5_CH0 0x8114 1051#define _CHV_CMN_DW5_CH0 0x8114
1015#define CHV_BUFRIGHTENA1_DISABLE (0 << 20) 1052#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
1016#define CHV_BUFRIGHTENA1_NORMAL (1 << 20) 1053#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
@@ -1314,6 +1351,8 @@ enum punit_power_well {
1314#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) 1351#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
1315#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) 1352#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
1316#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) 1353#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
1354#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2))
1355#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2))
1317 1356
1318#define GFX_MODE 0x02520 1357#define GFX_MODE 0x02520
1319#define GFX_MODE_GEN7 0x0229c 1358#define GFX_MODE_GEN7 0x0229c
@@ -1470,6 +1509,7 @@ enum punit_power_well {
1470#define CACHE_MODE_1 0x7004 /* IVB+ */ 1509#define CACHE_MODE_1 0x7004 /* IVB+ */
1471#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 1510#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
1472#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) 1511#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
1512#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
1473 1513
1474#define GEN6_BLITTER_ECOSKPD 0x221d0 1514#define GEN6_BLITTER_ECOSKPD 0x221d0
1475#define GEN6_BLITTER_LOCK_SHIFT 16 1515#define GEN6_BLITTER_LOCK_SHIFT 16
@@ -1491,6 +1531,17 @@ enum punit_power_well {
1491#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 1531#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
1492#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) 1532#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
1493 1533
1534#define GEN8_FUSE2 0x9120
1535#define GEN8_F2_S_ENA_SHIFT 25
1536#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
1537
1538#define GEN9_F2_SS_DIS_SHIFT 20
1539#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
1540
1541#define GEN8_EU_DISABLE0 0x9134
1542#define GEN8_EU_DISABLE1 0x9138
1543#define GEN8_EU_DISABLE2 0x913c
1544
1494#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 1545#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
1495#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 1546#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
1496#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) 1547#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3865,6 +3916,7 @@ enum punit_power_well {
3865#define PIPECONF_INTERLACE_MODE_MASK (7 << 21) 3916#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
3866#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20) 3917#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
3867#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 3918#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
3919#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
3868#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) 3920#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
3869#define PIPECONF_BPC_MASK (0x7 << 5) 3921#define PIPECONF_BPC_MASK (0x7 << 5)
3870#define PIPECONF_8BPC (0<<5) 3922#define PIPECONF_8BPC (0<<5)
@@ -5221,14 +5273,22 @@ enum punit_power_well {
5221#define HSW_NDE_RSTWRN_OPT 0x46408 5273#define HSW_NDE_RSTWRN_OPT 0x46408
5222#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 5274#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
5223 5275
5276#define FF_SLICE_CS_CHICKEN2 0x02e4
5277#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
5278
5224/* GEN7 chicken */ 5279/* GEN7 chicken */
5225#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 5280#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
5226# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 5281# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
5282# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
5227#define COMMON_SLICE_CHICKEN2 0x7014 5283#define COMMON_SLICE_CHICKEN2 0x7014
5228# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 5284# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
5229 5285
5230#define HIZ_CHICKEN 0x7018 5286#define HIZ_CHICKEN 0x7018
5231# define CHV_HZ_8X8_MODE_IN_1X (1<<15) 5287# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
5288# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
5289
5290#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
5291#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
5232 5292
5233#define GEN7_L3SQCREG1 0xB010 5293#define GEN7_L3SQCREG1 0xB010
5234#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 5294#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -5245,11 +5305,16 @@ enum punit_power_well {
5245#define GEN7_L3SQCREG4 0xb034 5305#define GEN7_L3SQCREG4 0xb034
5246#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) 5306#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
5247 5307
5308#define GEN8_L3SQCREG4 0xb118
5309#define GEN8_LQSC_RO_PERF_DIS (1<<27)
5310
5248/* GEN8 chicken */ 5311/* GEN8 chicken */
5249#define HDC_CHICKEN0 0x7300 5312#define HDC_CHICKEN0 0x7300
5250#define HDC_FORCE_NON_COHERENT (1<<4)
5251#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
5252#define HDC_FENCE_DEST_SLM_DISABLE (1<<14) 5313#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
5314#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
5315#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
5316#define HDC_FORCE_NON_COHERENT (1<<4)
5317#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
5253 5318
5254/* WaCatErrorRejectionIssue */ 5319/* WaCatErrorRejectionIssue */
5255#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 5320#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5258,6 +5323,9 @@ enum punit_power_well {
5258#define HSW_SCRATCH1 0xb038 5323#define HSW_SCRATCH1 0xb038
5259#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) 5324#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
5260 5325
5326#define BDW_SCRATCH1 0xb11c
5327#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
5328
5261/* PCH */ 5329/* PCH */
5262 5330
5263/* south display engine interrupt: IBX */ 5331/* south display engine interrupt: IBX */
@@ -5980,6 +6048,7 @@ enum punit_power_well {
5980#define HSW_IDICR 0x9008 6048#define HSW_IDICR 0x9008
5981#define IDIHASHMSK(x) (((x) & 0x3f) << 16) 6049#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
5982#define HSW_EDRAM_PRESENT 0x120010 6050#define HSW_EDRAM_PRESENT 0x120010
6051#define EDRAM_ENABLED 0x1
5983 6052
5984#define GEN6_UCGCTL1 0x9400 6053#define GEN6_UCGCTL1 0x9400
5985# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) 6054# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
@@ -6003,6 +6072,7 @@ enum punit_power_well {
6003#define GEN6_RSTCTL 0x9420 6072#define GEN6_RSTCTL 0x9420
6004 6073
6005#define GEN8_UCGCTL6 0x9430 6074#define GEN8_UCGCTL6 0x9430
6075#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
6006#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) 6076#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
6007 6077
6008#define GEN6_GFXPAUSE 0xA000 6078#define GEN6_GFXPAUSE 0xA000
@@ -6155,6 +6225,26 @@ enum punit_power_well {
6155#define GEN6_RC6 3 6225#define GEN6_RC6 3
6156#define GEN6_RC7 4 6226#define GEN6_RC7 4
6157 6227
6228#define GEN9_SLICE0_PGCTL_ACK 0x804c
6229#define GEN9_SLICE1_PGCTL_ACK 0x8050
6230#define GEN9_SLICE2_PGCTL_ACK 0x8054
6231#define GEN9_PGCTL_SLICE_ACK (1 << 0)
6232
6233#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
6234#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
6235#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
6236#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
6237#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
6238#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
6239#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
6240#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
6241#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
6242#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
6243#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
6244#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
6245#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
6246#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
6247
6158#define GEN7_MISCCPCTL (0x9424) 6248#define GEN7_MISCCPCTL (0x9424)
6159#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) 6249#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
6160 6250
@@ -6185,6 +6275,7 @@ enum punit_power_well {
6185 6275
6186#define GEN9_HALF_SLICE_CHICKEN5 0xe188 6276#define GEN9_HALF_SLICE_CHICKEN5 0xe188
6187#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5) 6277#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
6278#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
6188 6279
6189#define GEN8_ROW_CHICKEN 0xe4f0 6280#define GEN8_ROW_CHICKEN 0xe4f0
6190#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) 6281#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
@@ -6200,8 +6291,12 @@ enum punit_power_well {
6200#define HALF_SLICE_CHICKEN3 0xe184 6291#define HALF_SLICE_CHICKEN3 0xe184
6201#define HSW_SAMPLE_C_PERFORMANCE (1<<9) 6292#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
6202#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) 6293#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
6294#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
6203#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) 6295#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
6204 6296
6297#define GEN9_HALF_SLICE_CHICKEN7 0xe194
6298#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
6299
6205/* Audio */ 6300/* Audio */
6206#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) 6301#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
6207#define INTEL_AUDIO_DEVCL 0x808629FB 6302#define INTEL_AUDIO_DEVCL 0x808629FB
@@ -6351,6 +6446,13 @@ enum punit_power_well {
6351#define HSW_PWR_WELL_FORCE_ON (1<<19) 6446#define HSW_PWR_WELL_FORCE_ON (1<<19)
6352#define HSW_PWR_WELL_CTL6 0x45414 6447#define HSW_PWR_WELL_CTL6 0x45414
6353 6448
6449/* SKL Fuse Status */
6450#define SKL_FUSE_STATUS 0x42000
6451#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
6452#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
6453#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
6454#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
6455
6354/* Per-pipe DDI Function Control */ 6456/* Per-pipe DDI Function Control */
6355#define TRANS_DDI_FUNC_CTL_A 0x60400 6457#define TRANS_DDI_FUNC_CTL_A 0x60400
6356#define TRANS_DDI_FUNC_CTL_B 0x61400 6458#define TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 9f19ed38cdc3..cf67f82f7b7f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,166 +29,6 @@
29#include "intel_drv.h" 29#include "intel_drv.h"
30#include "i915_reg.h" 30#include "i915_reg.h"
31 31
32static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
33{
34 struct drm_i915_private *dev_priv = dev->dev_private;
35
36 I915_WRITE8(index_port, reg);
37 return I915_READ8(data_port);
38}
39
40static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
41{
42 struct drm_i915_private *dev_priv = dev->dev_private;
43
44 I915_READ8(st01);
45 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
46 return I915_READ8(VGA_AR_DATA_READ);
47}
48
49static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
50{
51 struct drm_i915_private *dev_priv = dev->dev_private;
52
53 I915_READ8(st01);
54 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
55 I915_WRITE8(VGA_AR_DATA_WRITE, val);
56}
57
58static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
59{
60 struct drm_i915_private *dev_priv = dev->dev_private;
61
62 I915_WRITE8(index_port, reg);
63 I915_WRITE8(data_port, val);
64}
65
66static void i915_save_vga(struct drm_device *dev)
67{
68 struct drm_i915_private *dev_priv = dev->dev_private;
69 int i;
70 u16 cr_index, cr_data, st01;
71
72 /* VGA state */
73 dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
74 dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
75 dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
76 dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
77
78 /* VGA color palette registers */
79 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
80
81 /* MSR bits */
82 dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
83 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
84 cr_index = VGA_CR_INDEX_CGA;
85 cr_data = VGA_CR_DATA_CGA;
86 st01 = VGA_ST01_CGA;
87 } else {
88 cr_index = VGA_CR_INDEX_MDA;
89 cr_data = VGA_CR_DATA_MDA;
90 st01 = VGA_ST01_MDA;
91 }
92
93 /* CRT controller regs */
94 i915_write_indexed(dev, cr_index, cr_data, 0x11,
95 i915_read_indexed(dev, cr_index, cr_data, 0x11) &
96 (~0x80));
97 for (i = 0; i <= 0x24; i++)
98 dev_priv->regfile.saveCR[i] =
99 i915_read_indexed(dev, cr_index, cr_data, i);
100 /* Make sure we don't turn off CR group 0 writes */
101 dev_priv->regfile.saveCR[0x11] &= ~0x80;
102
103 /* Attribute controller registers */
104 I915_READ8(st01);
105 dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
106 for (i = 0; i <= 0x14; i++)
107 dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
108 I915_READ8(st01);
109 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
110 I915_READ8(st01);
111
112 /* Graphics controller registers */
113 for (i = 0; i < 9; i++)
114 dev_priv->regfile.saveGR[i] =
115 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
116
117 dev_priv->regfile.saveGR[0x10] =
118 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
119 dev_priv->regfile.saveGR[0x11] =
120 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
121 dev_priv->regfile.saveGR[0x18] =
122 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
123
124 /* Sequencer registers */
125 for (i = 0; i < 8; i++)
126 dev_priv->regfile.saveSR[i] =
127 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
128}
129
130static void i915_restore_vga(struct drm_device *dev)
131{
132 struct drm_i915_private *dev_priv = dev->dev_private;
133 int i;
134 u16 cr_index, cr_data, st01;
135
136 /* VGA state */
137 I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
138
139 I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
140 I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
141 I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
142 POSTING_READ(VGA_PD);
143 udelay(150);
144
145 /* MSR bits */
146 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
147 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
148 cr_index = VGA_CR_INDEX_CGA;
149 cr_data = VGA_CR_DATA_CGA;
150 st01 = VGA_ST01_CGA;
151 } else {
152 cr_index = VGA_CR_INDEX_MDA;
153 cr_data = VGA_CR_DATA_MDA;
154 st01 = VGA_ST01_MDA;
155 }
156
157 /* Sequencer registers, don't write SR07 */
158 for (i = 0; i < 7; i++)
159 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
160 dev_priv->regfile.saveSR[i]);
161
162 /* CRT controller regs */
163 /* Enable CR group 0 writes */
164 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
165 for (i = 0; i <= 0x24; i++)
166 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
167
168 /* Graphics controller regs */
169 for (i = 0; i < 9; i++)
170 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
171 dev_priv->regfile.saveGR[i]);
172
173 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
174 dev_priv->regfile.saveGR[0x10]);
175 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
176 dev_priv->regfile.saveGR[0x11]);
177 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
178 dev_priv->regfile.saveGR[0x18]);
179
180 /* Attribute controller registers */
181 I915_READ8(st01); /* switch back to index mode */
182 for (i = 0; i <= 0x14; i++)
183 i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
184 I915_READ8(st01); /* switch back to index mode */
185 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
186 I915_READ8(st01);
187
188 /* VGA color palette registers */
189 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
190}
191
192static void i915_save_display(struct drm_device *dev) 32static void i915_save_display(struct drm_device *dev)
193{ 33{
194 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -197,11 +37,6 @@ static void i915_save_display(struct drm_device *dev)
197 if (INTEL_INFO(dev)->gen <= 4) 37 if (INTEL_INFO(dev)->gen <= 4)
198 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); 38 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
199 39
200 /* This is only meaningful in non-KMS mode */
201 /* Don't regfile.save them in KMS mode */
202 if (!drm_core_check_feature(dev, DRIVER_MODESET))
203 i915_save_display_reg(dev);
204
205 /* LVDS state */ 40 /* LVDS state */
206 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 41 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
207 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 42 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
@@ -224,9 +59,6 @@ static void i915_save_display(struct drm_device *dev)
224 /* save FBC interval */ 59 /* save FBC interval */
225 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) 60 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
226 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); 61 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
227
228 if (!drm_core_check_feature(dev, DRIVER_MODESET))
229 i915_save_vga(dev);
230} 62}
231 63
232static void i915_restore_display(struct drm_device *dev) 64static void i915_restore_display(struct drm_device *dev)
@@ -238,11 +70,7 @@ static void i915_restore_display(struct drm_device *dev)
238 if (INTEL_INFO(dev)->gen <= 4) 70 if (INTEL_INFO(dev)->gen <= 4)
239 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); 71 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
240 72
241 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 73 mask = ~LVDS_PORT_EN;
242 i915_restore_display_reg(dev);
243
244 if (drm_core_check_feature(dev, DRIVER_MODESET))
245 mask = ~LVDS_PORT_EN;
246 74
247 /* LVDS state */ 75 /* LVDS state */
248 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 76 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -270,10 +98,7 @@ static void i915_restore_display(struct drm_device *dev)
270 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) 98 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
271 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); 99 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
272 100
273 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 101 i915_redisable_vga(dev);
274 i915_restore_vga(dev);
275 else
276 i915_redisable_vga(dev);
277} 102}
278 103
279int i915_save_state(struct drm_device *dev) 104int i915_save_state(struct drm_device *dev)
@@ -285,24 +110,6 @@ int i915_save_state(struct drm_device *dev)
285 110
286 i915_save_display(dev); 111 i915_save_display(dev);
287 112
288 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
289 /* Interrupt state */
290 if (HAS_PCH_SPLIT(dev)) {
291 dev_priv->regfile.saveDEIER = I915_READ(DEIER);
292 dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
293 dev_priv->regfile.saveGTIER = I915_READ(GTIER);
294 dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
295 dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
296 dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
297 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
298 I915_READ(RSTDBYCTL);
299 dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
300 } else {
301 dev_priv->regfile.saveIER = I915_READ(IER);
302 dev_priv->regfile.saveIMR = I915_READ(IMR);
303 }
304 }
305
306 if (IS_GEN4(dev)) 113 if (IS_GEN4(dev))
307 pci_read_config_word(dev->pdev, GCDGMBUS, 114 pci_read_config_word(dev->pdev, GCDGMBUS,
308 &dev_priv->regfile.saveGCDGMBUS); 115 &dev_priv->regfile.saveGCDGMBUS);
@@ -341,24 +148,6 @@ int i915_restore_state(struct drm_device *dev)
341 dev_priv->regfile.saveGCDGMBUS); 148 dev_priv->regfile.saveGCDGMBUS);
342 i915_restore_display(dev); 149 i915_restore_display(dev);
343 150
344 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
345 /* Interrupt state */
346 if (HAS_PCH_SPLIT(dev)) {
347 I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
348 I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
349 I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
350 I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
351 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
352 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
353 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
354 I915_WRITE(RSTDBYCTL,
355 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
356 } else {
357 I915_WRITE(IER, dev_priv->regfile.saveIER);
358 I915_WRITE(IMR, dev_priv->regfile.saveIMR);
359 }
360 }
361
362 /* Cache mode state */ 151 /* Cache mode state */
363 if (INTEL_INFO(dev)->gen < 7) 152 if (INTEL_INFO(dev)->gen < 7)
364 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 153 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 49f5ade0edb7..67bd07edcbb0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -127,10 +127,19 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
127 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 127 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
128} 128}
129 129
130static ssize_t
131show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
132{
133 struct drm_minor *dminor = dev_get_drvdata(kdev);
134 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
135 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
136}
137
130static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); 138static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
131static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); 139static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
132static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); 140static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
133static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); 141static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
142static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
134 143
135static struct attribute *rc6_attrs[] = { 144static struct attribute *rc6_attrs[] = {
136 &dev_attr_rc6_enable.attr, 145 &dev_attr_rc6_enable.attr,
@@ -153,6 +162,16 @@ static struct attribute_group rc6p_attr_group = {
153 .name = power_group_name, 162 .name = power_group_name,
154 .attrs = rc6p_attrs 163 .attrs = rc6p_attrs
155}; 164};
165
166static struct attribute *media_rc6_attrs[] = {
167 &dev_attr_media_rc6_residency_ms.attr,
168 NULL
169};
170
171static struct attribute_group media_rc6_attr_group = {
172 .name = power_group_name,
173 .attrs = media_rc6_attrs
174};
156#endif 175#endif
157 176
158static int l3_access_valid(struct drm_device *dev, loff_t offset) 177static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -402,10 +421,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
402 /* We still need *_set_rps to process the new max_delay and 421 /* We still need *_set_rps to process the new max_delay and
403 * update the interrupt limits and PMINTRMSK even though 422 * update the interrupt limits and PMINTRMSK even though
404 * frequency request may be unchanged. */ 423 * frequency request may be unchanged. */
405 if (IS_VALLEYVIEW(dev)) 424 intel_set_rps(dev, val);
406 valleyview_set_rps(dev, val);
407 else
408 gen6_set_rps(dev, val);
409 425
410 mutex_unlock(&dev_priv->rps.hw_lock); 426 mutex_unlock(&dev_priv->rps.hw_lock);
411 427
@@ -464,10 +480,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
464 /* We still need *_set_rps to process the new min_delay and 480 /* We still need *_set_rps to process the new min_delay and
465 * update the interrupt limits and PMINTRMSK even though 481 * update the interrupt limits and PMINTRMSK even though
466 * frequency request may be unchanged. */ 482 * frequency request may be unchanged. */
467 if (IS_VALLEYVIEW(dev)) 483 intel_set_rps(dev, val);
468 valleyview_set_rps(dev, val);
469 else
470 gen6_set_rps(dev, val);
471 484
472 mutex_unlock(&dev_priv->rps.hw_lock); 485 mutex_unlock(&dev_priv->rps.hw_lock);
473 486
@@ -493,38 +506,17 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
493 struct drm_minor *minor = dev_to_drm_minor(kdev); 506 struct drm_minor *minor = dev_to_drm_minor(kdev);
494 struct drm_device *dev = minor->dev; 507 struct drm_device *dev = minor->dev;
495 struct drm_i915_private *dev_priv = dev->dev_private; 508 struct drm_i915_private *dev_priv = dev->dev_private;
496 u32 val, rp_state_cap; 509 u32 val;
497 ssize_t ret;
498
499 ret = mutex_lock_interruptible(&dev->struct_mutex);
500 if (ret)
501 return ret;
502 intel_runtime_pm_get(dev_priv);
503 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
504 intel_runtime_pm_put(dev_priv);
505 mutex_unlock(&dev->struct_mutex);
506 510
507 if (attr == &dev_attr_gt_RP0_freq_mhz) { 511 if (attr == &dev_attr_gt_RP0_freq_mhz)
508 if (IS_VALLEYVIEW(dev)) 512 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
509 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); 513 else if (attr == &dev_attr_gt_RP1_freq_mhz)
510 else 514 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
511 val = intel_gpu_freq(dev_priv, 515 else if (attr == &dev_attr_gt_RPn_freq_mhz)
512 ((rp_state_cap & 0x0000ff) >> 0)); 516 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
513 } else if (attr == &dev_attr_gt_RP1_freq_mhz) { 517 else
514 if (IS_VALLEYVIEW(dev))
515 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
516 else
517 val = intel_gpu_freq(dev_priv,
518 ((rp_state_cap & 0x00ff00) >> 8));
519 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
520 if (IS_VALLEYVIEW(dev))
521 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
522 else
523 val = intel_gpu_freq(dev_priv,
524 ((rp_state_cap & 0xff0000) >> 16));
525 } else {
526 BUG(); 518 BUG();
527 } 519
528 return snprintf(buf, PAGE_SIZE, "%d\n", val); 520 return snprintf(buf, PAGE_SIZE, "%d\n", val);
529} 521}
530 522
@@ -633,6 +625,12 @@ void i915_setup_sysfs(struct drm_device *dev)
633 if (ret) 625 if (ret)
634 DRM_ERROR("RC6p residency sysfs setup failed\n"); 626 DRM_ERROR("RC6p residency sysfs setup failed\n");
635 } 627 }
628 if (IS_VALLEYVIEW(dev)) {
629 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
630 &media_rc6_attr_group);
631 if (ret)
632 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
633 }
636#endif 634#endif
637 if (HAS_L3_DPF(dev)) { 635 if (HAS_L3_DPF(dev)) {
638 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); 636 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 6058a01b4443..f004d3d89b87 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_vma_bind,
115 TP_STRUCT__entry( 115 TP_STRUCT__entry(
116 __field(struct drm_i915_gem_object *, obj) 116 __field(struct drm_i915_gem_object *, obj)
117 __field(struct i915_address_space *, vm) 117 __field(struct i915_address_space *, vm)
118 __field(u32, offset) 118 __field(u64, offset)
119 __field(u32, size) 119 __field(u32, size)
120 __field(unsigned, flags) 120 __field(unsigned, flags)
121 ), 121 ),
@@ -128,7 +128,7 @@ TRACE_EVENT(i915_vma_bind,
128 __entry->flags = flags; 128 __entry->flags = flags;
129 ), 129 ),
130 130
131 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", 131 TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
132 __entry->obj, __entry->offset, __entry->size, 132 __entry->obj, __entry->offset, __entry->size,
133 __entry->flags & PIN_MAPPABLE ? ", mappable" : "", 133 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
134 __entry->vm) 134 __entry->vm)
@@ -141,7 +141,7 @@ TRACE_EVENT(i915_vma_unbind,
141 TP_STRUCT__entry( 141 TP_STRUCT__entry(
142 __field(struct drm_i915_gem_object *, obj) 142 __field(struct drm_i915_gem_object *, obj)
143 __field(struct i915_address_space *, vm) 143 __field(struct i915_address_space *, vm)
144 __field(u32, offset) 144 __field(u64, offset)
145 __field(u32, size) 145 __field(u32, size)
146 ), 146 ),
147 147
@@ -152,7 +152,7 @@ TRACE_EVENT(i915_vma_unbind,
152 __entry->size = vma->node.size; 152 __entry->size = vma->node.size;
153 ), 153 ),
154 154
155 TP_printk("obj=%p, offset=%08x size=%x vm=%p", 155 TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
156 __entry->obj, __entry->offset, __entry->size, __entry->vm) 156 __entry->obj, __entry->offset, __entry->size, __entry->vm)
157); 157);
158 158
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
deleted file mode 100644
index d10fe3e9c49f..000000000000
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ /dev/null
@@ -1,552 +0,0 @@
1/*
2 *
3 * Copyright 2008 (c) Intel Corporation
4 * Jesse Barnes <jbarnes@virtuousgeek.org>
5 * Copyright 2013 (c) Intel Corporation
6 * Daniel Vetter <daniel.vetter@ffwll.ch>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
31#include "intel_drv.h"
32#include "i915_reg.h"
33
34static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
35{
36 struct drm_i915_private *dev_priv = dev->dev_private;
37 u32 dpll_reg;
38
39 /* On IVB, 3rd pipe shares PLL with another one */
40 if (pipe > 1)
41 return false;
42
43 if (HAS_PCH_SPLIT(dev))
44 dpll_reg = PCH_DPLL(pipe);
45 else
46 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
47
48 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
49}
50
51static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
52{
53 struct drm_i915_private *dev_priv = dev->dev_private;
54 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
55 u32 *array;
56 int i;
57
58 if (!i915_pipe_enabled(dev, pipe))
59 return;
60
61 if (HAS_PCH_SPLIT(dev))
62 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
63
64 if (pipe == PIPE_A)
65 array = dev_priv->regfile.save_palette_a;
66 else
67 array = dev_priv->regfile.save_palette_b;
68
69 for (i = 0; i < 256; i++)
70 array[i] = I915_READ(reg + (i << 2));
71}
72
73static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
74{
75 struct drm_i915_private *dev_priv = dev->dev_private;
76 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
77 u32 *array;
78 int i;
79
80 if (!i915_pipe_enabled(dev, pipe))
81 return;
82
83 if (HAS_PCH_SPLIT(dev))
84 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
85
86 if (pipe == PIPE_A)
87 array = dev_priv->regfile.save_palette_a;
88 else
89 array = dev_priv->regfile.save_palette_b;
90
91 for (i = 0; i < 256; i++)
92 I915_WRITE(reg + (i << 2), array[i]);
93}
94
95void i915_save_display_reg(struct drm_device *dev)
96{
97 struct drm_i915_private *dev_priv = dev->dev_private;
98 int i;
99
100 /* Cursor state */
101 dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
102 dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
103 dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
104 dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
105 dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
106 dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
107 if (IS_GEN2(dev))
108 dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
109
110 if (HAS_PCH_SPLIT(dev)) {
111 dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
112 dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
113 }
114
115 /* Pipe & plane A info */
116 dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
117 dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
118 if (HAS_PCH_SPLIT(dev)) {
119 dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
120 dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
121 dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
122 } else {
123 dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
124 dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
125 dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
126 }
127 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
128 dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
129 dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
130 dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
131 dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
132 dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
133 dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
134 dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
135 if (!HAS_PCH_SPLIT(dev))
136 dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
137
138 if (HAS_PCH_SPLIT(dev)) {
139 dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
140 dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
141 dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
142 dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
143
144 dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
145 dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
146
147 dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
148 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
149 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
150
151 dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
152 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
153 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
154 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
155 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
156 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
157 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
158 }
159
160 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
161 dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
162 dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
163 dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
164 dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
165 if (INTEL_INFO(dev)->gen >= 4) {
166 dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
167 dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
168 }
169 i915_save_palette(dev, PIPE_A);
170 dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
171
172 /* Pipe & plane B info */
173 dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
174 dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
175 if (HAS_PCH_SPLIT(dev)) {
176 dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
177 dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
178 dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
179 } else {
180 dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
181 dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
182 dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
183 }
184 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
185 dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
186 dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
187 dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
188 dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
189 dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
190 dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
191 dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
192 if (!HAS_PCH_SPLIT(dev))
193 dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
194
195 if (HAS_PCH_SPLIT(dev)) {
196 dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
197 dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
198 dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
199 dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
200
201 dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
202 dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
203
204 dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
205 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
206 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
207
208 dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
209 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
210 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
211 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
212 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
213 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
214 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
215 }
216
217 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
218 dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
219 dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
220 dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
221 dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
222 if (INTEL_INFO(dev)->gen >= 4) {
223 dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
224 dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
225 }
226 i915_save_palette(dev, PIPE_B);
227 dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
228
229 /* Fences */
230 switch (INTEL_INFO(dev)->gen) {
231 case 7:
232 case 6:
233 for (i = 0; i < 16; i++)
234 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
235 break;
236 case 5:
237 case 4:
238 for (i = 0; i < 16; i++)
239 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
240 break;
241 case 3:
242 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
243 for (i = 0; i < 8; i++)
244 dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
245 case 2:
246 for (i = 0; i < 8; i++)
247 dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
248 break;
249 }
250
251 /* CRT state */
252 if (HAS_PCH_SPLIT(dev))
253 dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
254 else
255 dev_priv->regfile.saveADPA = I915_READ(ADPA);
256
257 /* Display Port state */
258 if (SUPPORTS_INTEGRATED_DP(dev)) {
259 dev_priv->regfile.saveDP_B = I915_READ(DP_B);
260 dev_priv->regfile.saveDP_C = I915_READ(DP_C);
261 dev_priv->regfile.saveDP_D = I915_READ(DP_D);
262 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
263 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
264 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
265 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
266 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
267 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
268 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
269 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
270 }
271 /* FIXME: regfile.save TV & SDVO state */
272
273 /* Panel fitter */
274 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
275 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
276 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
277 }
278
279 /* Backlight */
280 if (INTEL_INFO(dev)->gen <= 4)
281 pci_read_config_byte(dev->pdev, PCI_LBPC,
282 &dev_priv->regfile.saveLBB);
283
284 if (HAS_PCH_SPLIT(dev)) {
285 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
286 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
287 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
288 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
289 } else {
290 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
291 if (INTEL_INFO(dev)->gen >= 4)
292 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
293 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
294 }
295
296 return;
297}
298
299void i915_restore_display_reg(struct drm_device *dev)
300{
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 int dpll_a_reg, fpa0_reg, fpa1_reg;
303 int dpll_b_reg, fpb0_reg, fpb1_reg;
304 int i;
305
306 /* Backlight */
307 if (INTEL_INFO(dev)->gen <= 4)
308 pci_write_config_byte(dev->pdev, PCI_LBPC,
309 dev_priv->regfile.saveLBB);
310
311 if (HAS_PCH_SPLIT(dev)) {
312 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
313 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
314 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
315 * otherwise we get blank eDP screen after S3 on some machines
316 */
317 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
318 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
319 } else {
320 if (INTEL_INFO(dev)->gen >= 4)
321 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
322 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
323 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
324 }
325
326 /* Panel fitter */
327 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
328 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
329 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
330 }
331
332 /* Display port ratios (must be done before clock is set) */
333 if (SUPPORTS_INTEGRATED_DP(dev)) {
334 I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
335 I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
336 I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
337 I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
338 I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
339 I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
340 I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
341 I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
342 }
343
344 /* Fences */
345 switch (INTEL_INFO(dev)->gen) {
346 case 7:
347 case 6:
348 for (i = 0; i < 16; i++)
349 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
350 break;
351 case 5:
352 case 4:
353 for (i = 0; i < 16; i++)
354 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
355 break;
356 case 3:
357 case 2:
358 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
359 for (i = 0; i < 8; i++)
360 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
361 for (i = 0; i < 8; i++)
362 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
363 break;
364 }
365
366
367 if (HAS_PCH_SPLIT(dev)) {
368 dpll_a_reg = _PCH_DPLL_A;
369 dpll_b_reg = _PCH_DPLL_B;
370 fpa0_reg = _PCH_FPA0;
371 fpb0_reg = _PCH_FPB0;
372 fpa1_reg = _PCH_FPA1;
373 fpb1_reg = _PCH_FPB1;
374 } else {
375 dpll_a_reg = _DPLL_A;
376 dpll_b_reg = _DPLL_B;
377 fpa0_reg = _FPA0;
378 fpb0_reg = _FPB0;
379 fpa1_reg = _FPA1;
380 fpb1_reg = _FPB1;
381 }
382
383 if (HAS_PCH_SPLIT(dev)) {
384 I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
385 I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
386 }
387
388 /* Pipe & plane A info */
389 /* Prime the clock */
390 if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
391 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
392 ~DPLL_VCO_ENABLE);
393 POSTING_READ(dpll_a_reg);
394 udelay(150);
395 }
396 I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
397 I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
398 /* Actually enable it */
399 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
400 POSTING_READ(dpll_a_reg);
401 udelay(150);
402 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
403 I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
404 POSTING_READ(_DPLL_A_MD);
405 }
406 udelay(150);
407
408 /* Restore mode */
409 I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
410 I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
411 I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
412 I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
413 I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
414 I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
415 if (!HAS_PCH_SPLIT(dev))
416 I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
417
418 if (HAS_PCH_SPLIT(dev)) {
419 I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
420 I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
421 I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
422 I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
423
424 I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
425 I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
426
427 I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
428 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
429 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
430
431 I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
432 I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
433 I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
434 I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
435 I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
436 I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
437 I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
438 }
439
440 /* Restore plane info */
441 I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
442 I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
443 I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
444 I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
445 I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
446 if (INTEL_INFO(dev)->gen >= 4) {
447 I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
448 I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
449 }
450
451 I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
452
453 i915_restore_palette(dev, PIPE_A);
454 /* Enable the plane */
455 I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
456 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
457
458 /* Pipe & plane B info */
459 if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
460 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
461 ~DPLL_VCO_ENABLE);
462 POSTING_READ(dpll_b_reg);
463 udelay(150);
464 }
465 I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
466 I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
467 /* Actually enable it */
468 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
469 POSTING_READ(dpll_b_reg);
470 udelay(150);
471 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
472 I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
473 POSTING_READ(_DPLL_B_MD);
474 }
475 udelay(150);
476
477 /* Restore mode */
478 I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
479 I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
480 I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
481 I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
482 I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
483 I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
484 if (!HAS_PCH_SPLIT(dev))
485 I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
486
487 if (HAS_PCH_SPLIT(dev)) {
488 I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
489 I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
490 I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
491 I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
492
493 I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
494 I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
495
496 I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
497 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
498 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
499
500 I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
501 I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
502 I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
503 I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
504 I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
505 I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
506 I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
507 }
508
509 /* Restore plane info */
510 I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
511 I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
512 I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
513 I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
514 I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
515 if (INTEL_INFO(dev)->gen >= 4) {
516 I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
517 I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
518 }
519
520 I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
521
522 i915_restore_palette(dev, PIPE_B);
523 /* Enable the plane */
524 I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
525 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
526
527 /* Cursor state */
528 I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
529 I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
530 I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
531 I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
532 I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
533 I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
534 if (IS_GEN2(dev))
535 I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
536
537 /* CRT state */
538 if (HAS_PCH_SPLIT(dev))
539 I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
540 else
541 I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
542
543 /* Display Port state */
544 if (SUPPORTS_INTEGRATED_DP(dev)) {
545 I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
546 I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
547 I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
548 }
549 /* FIXME: restore TV & SDVO state */
550
551 return;
552}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
new file mode 100644
index 000000000000..5eee75bff170
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -0,0 +1,264 @@
1/*
2 * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "intel_drv.h"
25#include "i915_vgpu.h"
26
27/**
28 * DOC: Intel GVT-g guest support
29 *
30 * Intel GVT-g is a graphics virtualization technology which shares the
31 * GPU among multiple virtual machines on a time-sharing basis. Each
32 * virtual machine is presented a virtual GPU (vGPU), which has equivalent
33 * features as the underlying physical GPU (pGPU), so i915 driver can run
34 * seamlessly in a virtual machine. This file provides vGPU specific
35 * optimizations when running in a virtual machine, to reduce the complexity
36 * of vGPU emulation and to improve the overall performance.
37 *
38 * A primary function introduced here is so-called "address space ballooning"
39 * technique. Intel GVT-g partitions global graphics memory among multiple VMs,
40 * so each VM can directly access a portion of the memory without hypervisor's
41 * intervention, e.g. filling textures or queuing commands. However with the
42 * partitioning an unmodified i915 driver would assume a smaller graphics
43 * memory starting from address ZERO, then requires vGPU emulation module to
44 * translate the graphics address between 'guest view' and 'host view', for
45 * all registers and command opcodes which contain a graphics memory address.
46 * To reduce the complexity, Intel GVT-g introduces "address space ballooning",
47 * by telling the exact partitioning knowledge to each guest i915 driver, which
48 * then reserves and prevents non-allocated portions from allocation. Thus vGPU
49 * emulation module only needs to scan and validate graphics addresses without
50 * complexity of address translation.
51 *
52 */
53
54/**
55 * i915_check_vgpu - detect virtual GPU
56 * @dev: drm device *
57 *
58 * This function is called at the initialization stage, to detect whether
59 * running on a vGPU.
60 */
61void i915_check_vgpu(struct drm_device *dev)
62{
63 struct drm_i915_private *dev_priv = to_i915(dev);
64 uint64_t magic;
65 uint32_t version;
66
67 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
68
69 if (!IS_HASWELL(dev))
70 return;
71
72 magic = readq(dev_priv->regs + vgtif_reg(magic));
73 if (magic != VGT_MAGIC)
74 return;
75
76 version = INTEL_VGT_IF_VERSION_ENCODE(
77 readw(dev_priv->regs + vgtif_reg(version_major)),
78 readw(dev_priv->regs + vgtif_reg(version_minor)));
79 if (version != INTEL_VGT_IF_VERSION) {
80 DRM_INFO("VGT interface version mismatch!\n");
81 return;
82 }
83
84 dev_priv->vgpu.active = true;
85 DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
86}
87
88struct _balloon_info_ {
89 /*
90 * There are up to 2 regions per mappable/unmappable graphic
91 * memory that might be ballooned. Here, index 0/1 is for mappable
92 * graphic memory, 2/3 for unmappable graphic memory.
93 */
94 struct drm_mm_node space[4];
95};
96
97static struct _balloon_info_ bl_info;
98
99/**
100 * intel_vgt_deballoon - deballoon reserved graphics address trunks
101 *
102 * This function is called to deallocate the ballooned-out graphic memory, when
103 * driver is unloaded or when ballooning fails.
104 */
105void intel_vgt_deballoon(void)
106{
107 int i;
108
109 DRM_DEBUG("VGT deballoon.\n");
110
111 for (i = 0; i < 4; i++) {
112 if (bl_info.space[i].allocated)
113 drm_mm_remove_node(&bl_info.space[i]);
114 }
115
116 memset(&bl_info, 0, sizeof(bl_info));
117}
118
119static int vgt_balloon_space(struct drm_mm *mm,
120 struct drm_mm_node *node,
121 unsigned long start, unsigned long end)
122{
123 unsigned long size = end - start;
124
125 if (start == end)
126 return -EINVAL;
127
128 DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
129 start, end, size / 1024);
130
131 node->start = start;
132 node->size = size;
133
134 return drm_mm_reserve_node(mm, node);
135}
136
137/**
138 * intel_vgt_balloon - balloon out reserved graphics address trunks
139 * @dev: drm device
140 *
141 * This function is called at the initialization stage, to balloon out the
142 * graphic address space allocated to other vGPUs, by marking these spaces as
143 * reserved. The ballooning related knowledge(starting address and size of
144 * the mappable/unmappable graphic memory) is described in the vgt_if structure
145 * in a reserved mmio range.
146 *
147 * To give an example, the drawing below depicts one typical scenario after
148 * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
149 * out each for the mappable and the non-mappable part. From the vGPU1 point of
150 * view, the total size is the same as the physical one, with the start address
151 * of its graphic space being zero. Yet there are some portions ballooned out(
152 * the shadow part, which are marked as reserved by drm allocator). From the
153 * host point of view, the graphic address space is partitioned by multiple
154 * vGPUs in different VMs.
155 *
156 * vGPU1 view Host view
157 * 0 ------> +-----------+ +-----------+
158 * ^ |///////////| | vGPU3 |
159 * | |///////////| +-----------+
160 * | |///////////| | vGPU2 |
161 * | +-----------+ +-----------+
162 * mappable GM | available | ==> | vGPU1 |
163 * | +-----------+ +-----------+
164 * | |///////////| | |
165 * v |///////////| | Host |
166 * +=======+===========+ +===========+
167 * ^ |///////////| | vGPU3 |
168 * | |///////////| +-----------+
169 * | |///////////| | vGPU2 |
170 * | +-----------+ +-----------+
171 * unmappable GM | available | ==> | vGPU1 |
172 * | +-----------+ +-----------+
173 * | |///////////| | |
174 * | |///////////| | Host |
175 * v |///////////| | |
176 * total GM size ------> +-----------+ +-----------+
177 *
178 * Returns:
179 * zero on success, non-zero if configuration invalid or ballooning failed
180 */
181int intel_vgt_balloon(struct drm_device *dev)
182{
183 struct drm_i915_private *dev_priv = to_i915(dev);
184 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
185 unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
186
187 unsigned long mappable_base, mappable_size, mappable_end;
188 unsigned long unmappable_base, unmappable_size, unmappable_end;
189 int ret;
190
191 mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
192 mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
193 unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
194 unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
195
196 mappable_end = mappable_base + mappable_size;
197 unmappable_end = unmappable_base + unmappable_size;
198
199 DRM_INFO("VGT ballooning configuration:\n");
200 DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
201 mappable_base, mappable_size / 1024);
202 DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
203 unmappable_base, unmappable_size / 1024);
204
205 if (mappable_base < ggtt_vm->start ||
206 mappable_end > dev_priv->gtt.mappable_end ||
207 unmappable_base < dev_priv->gtt.mappable_end ||
208 unmappable_end > ggtt_vm_end) {
209 DRM_ERROR("Invalid ballooning configuration!\n");
210 return -EINVAL;
211 }
212
213 /* Unmappable graphic memory ballooning */
214 if (unmappable_base > dev_priv->gtt.mappable_end) {
215 ret = vgt_balloon_space(&ggtt_vm->mm,
216 &bl_info.space[2],
217 dev_priv->gtt.mappable_end,
218 unmappable_base);
219
220 if (ret)
221 goto err;
222 }
223
224 /*
225 * No need to partition out the last physical page,
226 * because it is reserved to the guard page.
227 */
228 if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
229 ret = vgt_balloon_space(&ggtt_vm->mm,
230 &bl_info.space[3],
231 unmappable_end,
232 ggtt_vm_end - PAGE_SIZE);
233 if (ret)
234 goto err;
235 }
236
237 /* Mappable graphic memory ballooning */
238 if (mappable_base > ggtt_vm->start) {
239 ret = vgt_balloon_space(&ggtt_vm->mm,
240 &bl_info.space[0],
241 ggtt_vm->start, mappable_base);
242
243 if (ret)
244 goto err;
245 }
246
247 if (mappable_end < dev_priv->gtt.mappable_end) {
248 ret = vgt_balloon_space(&ggtt_vm->mm,
249 &bl_info.space[1],
250 mappable_end,
251 dev_priv->gtt.mappable_end);
252
253 if (ret)
254 goto err;
255 }
256
257 DRM_INFO("VGT balloon successfully\n");
258 return 0;
259
260err:
261 DRM_ERROR("VGT balloon fail\n");
262 intel_vgt_deballoon();
263 return ret;
264}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
new file mode 100644
index 000000000000..0db9ccf32605
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _I915_VGPU_H_
25#define _I915_VGPU_H_
26
27/* The MMIO offset of the shared info between guest and host emulator */
28#define VGT_PVINFO_PAGE 0x78000
29#define VGT_PVINFO_SIZE 0x1000
30
31/*
32 * The following structure pages are defined in GEN MMIO space
33 * for virtualization. (One page for now)
34 */
35#define VGT_MAGIC 0x4776544776544776 /* 'vGTvGTvG' */
36#define VGT_VERSION_MAJOR 1
37#define VGT_VERSION_MINOR 0
38
39#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42
43struct vgt_if {
44 uint64_t magic; /* VGT_MAGIC */
45 uint16_t version_major;
46 uint16_t version_minor;
47 uint32_t vgt_id; /* ID of vGT instance */
48 uint32_t rsv1[12]; /* pad to offset 0x40 */
49 /*
50 * Data structure to describe the balooning info of resources.
51 * Each VM can only have one portion of continuous area for now.
52 * (May support scattered resource in future)
53 * (starting from offset 0x40)
54 */
55 struct {
56 /* Aperture register balooning */
57 struct {
58 uint32_t base;
59 uint32_t size;
60 } mappable_gmadr; /* aperture */
61 /* GMADR register balooning */
62 struct {
63 uint32_t base;
64 uint32_t size;
65 } nonmappable_gmadr; /* non aperture */
66 /* allowed fence registers */
67 uint32_t fence_num;
68 uint32_t rsv2[3];
69 } avail_rs; /* available/assigned resource */
70 uint32_t rsv3[0x200 - 24]; /* pad to half page */
71 /*
72 * The bottom half page is for response from Gfx driver to hypervisor.
73 * Set to reserved fields temporarily by now.
74 */
75 uint32_t rsv4;
76 uint32_t display_ready; /* ready for display owner switch */
77 uint32_t rsv5[0x200 - 2]; /* pad to one page */
78} __packed;
79
80#define vgtif_reg(x) \
81 (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
82
83/* vGPU display status to be used by the host side */
84#define VGT_DRV_DISPLAY_NOT_READY 0
85#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
86
87extern void i915_check_vgpu(struct drm_device *dev);
88extern int intel_vgt_balloon(struct drm_device *dev);
89extern void intel_vgt_deballoon(void);
90
91#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 19a9dd5408f3..011b8960fd75 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -134,9 +134,9 @@ int intel_atomic_commit(struct drm_device *dev,
134 * FIXME: The proper sequence here will eventually be: 134 * FIXME: The proper sequence here will eventually be:
135 * 135 *
136 * drm_atomic_helper_swap_state(dev, state) 136 * drm_atomic_helper_swap_state(dev, state)
137 * drm_atomic_helper_commit_pre_planes(dev, state); 137 * drm_atomic_helper_commit_modeset_disables(dev, state);
138 * drm_atomic_helper_commit_planes(dev, state); 138 * drm_atomic_helper_commit_planes(dev, state);
139 * drm_atomic_helper_commit_post_planes(dev, state); 139 * drm_atomic_helper_commit_modeset_enables(dev, state);
140 * drm_atomic_helper_wait_for_vblanks(dev, state); 140 * drm_atomic_helper_wait_for_vblanks(dev, state);
141 * drm_atomic_helper_cleanup_planes(dev, state); 141 * drm_atomic_helper_cleanup_planes(dev, state);
142 * drm_atomic_state_free(state); 142 * drm_atomic_state_free(state);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 3f178258d9f9..c684085cb56a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -662,6 +662,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
662 edp_link_params->vswing); 662 edp_link_params->vswing);
663 break; 663 break;
664 } 664 }
665
666 if (bdb->version >= 173) {
667 uint8_t vswing;
668
669 vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
670 dev_priv->vbt.edp_low_vswing = vswing == 0;
671 }
665} 672}
666 673
667static void 674static void
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index a6a8710f665f..6afd5be33367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -554,6 +554,7 @@ struct bdb_edp {
554 /* ith bit indicates enabled/disabled for (i+1)th panel */ 554 /* ith bit indicates enabled/disabled for (i+1)th panel */
555 u16 edp_s3d_feature; 555 u16 edp_s3d_feature;
556 u16 edp_t3_optimization; 556 u16 edp_t3_optimization;
557 u64 edp_vswing_preemph; /* v173 */
557} __packed; 558} __packed;
558 559
559struct psr_table { 560struct psr_table {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f14e8a2a022d..985d531aaf9e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -139,6 +139,21 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
139 { 0x00004014, 0x00000087 }, 139 { 0x00004014, 0x00000087 },
140}; 140};
141 141
142/* eDP 1.4 low vswing translation parameters */
143static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
144 { 0x00000018, 0x000000a8 },
145 { 0x00002016, 0x000000ab },
146 { 0x00006012, 0x000000a2 },
147 { 0x00008010, 0x00000088 },
148 { 0x00000018, 0x000000ab },
149 { 0x00004014, 0x000000a2 },
150 { 0x00006012, 0x000000a6 },
151 { 0x00000018, 0x000000a2 },
152 { 0x00005013, 0x0000009c },
153 { 0x00000018, 0x00000088 },
154};
155
156
142static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { 157static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
143 /* Idx NT mV T mV db */ 158 /* Idx NT mV T mV db */
144 { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */ 159 { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
@@ -187,7 +202,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
187{ 202{
188 struct drm_i915_private *dev_priv = dev->dev_private; 203 struct drm_i915_private *dev_priv = dev->dev_private;
189 u32 reg; 204 u32 reg;
190 int i, n_hdmi_entries, hdmi_800mV_0dB; 205 int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_800mV_0dB,
206 size;
191 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; 207 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
192 const struct ddi_buf_trans *ddi_translations_fdi; 208 const struct ddi_buf_trans *ddi_translations_fdi;
193 const struct ddi_buf_trans *ddi_translations_dp; 209 const struct ddi_buf_trans *ddi_translations_dp;
@@ -198,7 +214,15 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
198 if (IS_SKYLAKE(dev)) { 214 if (IS_SKYLAKE(dev)) {
199 ddi_translations_fdi = NULL; 215 ddi_translations_fdi = NULL;
200 ddi_translations_dp = skl_ddi_translations_dp; 216 ddi_translations_dp = skl_ddi_translations_dp;
201 ddi_translations_edp = skl_ddi_translations_dp; 217 n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
218 if (dev_priv->vbt.edp_low_vswing) {
219 ddi_translations_edp = skl_ddi_translations_edp;
220 n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
221 } else {
222 ddi_translations_edp = skl_ddi_translations_dp;
223 n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
224 }
225
202 ddi_translations_hdmi = skl_ddi_translations_hdmi; 226 ddi_translations_hdmi = skl_ddi_translations_hdmi;
203 n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); 227 n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
204 hdmi_800mV_0dB = 7; 228 hdmi_800mV_0dB = 7;
@@ -207,6 +231,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
207 ddi_translations_dp = bdw_ddi_translations_dp; 231 ddi_translations_dp = bdw_ddi_translations_dp;
208 ddi_translations_edp = bdw_ddi_translations_edp; 232 ddi_translations_edp = bdw_ddi_translations_edp;
209 ddi_translations_hdmi = bdw_ddi_translations_hdmi; 233 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
234 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
235 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
210 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 236 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
211 hdmi_800mV_0dB = 7; 237 hdmi_800mV_0dB = 7;
212 } else if (IS_HASWELL(dev)) { 238 } else if (IS_HASWELL(dev)) {
@@ -214,6 +240,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
214 ddi_translations_dp = hsw_ddi_translations_dp; 240 ddi_translations_dp = hsw_ddi_translations_dp;
215 ddi_translations_edp = hsw_ddi_translations_dp; 241 ddi_translations_edp = hsw_ddi_translations_dp;
216 ddi_translations_hdmi = hsw_ddi_translations_hdmi; 242 ddi_translations_hdmi = hsw_ddi_translations_hdmi;
243 n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
217 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); 244 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
218 hdmi_800mV_0dB = 6; 245 hdmi_800mV_0dB = 6;
219 } else { 246 } else {
@@ -222,6 +249,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
222 ddi_translations_fdi = bdw_ddi_translations_fdi; 249 ddi_translations_fdi = bdw_ddi_translations_fdi;
223 ddi_translations_dp = bdw_ddi_translations_dp; 250 ddi_translations_dp = bdw_ddi_translations_dp;
224 ddi_translations_hdmi = bdw_ddi_translations_hdmi; 251 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
252 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
253 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
225 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 254 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
226 hdmi_800mV_0dB = 7; 255 hdmi_800mV_0dB = 7;
227 } 256 }
@@ -229,29 +258,34 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
229 switch (port) { 258 switch (port) {
230 case PORT_A: 259 case PORT_A:
231 ddi_translations = ddi_translations_edp; 260 ddi_translations = ddi_translations_edp;
261 size = n_edp_entries;
232 break; 262 break;
233 case PORT_B: 263 case PORT_B:
234 case PORT_C: 264 case PORT_C:
235 ddi_translations = ddi_translations_dp; 265 ddi_translations = ddi_translations_dp;
266 size = n_dp_entries;
236 break; 267 break;
237 case PORT_D: 268 case PORT_D:
238 if (intel_dp_is_edp(dev, PORT_D)) 269 if (intel_dp_is_edp(dev, PORT_D)) {
239 ddi_translations = ddi_translations_edp; 270 ddi_translations = ddi_translations_edp;
240 else 271 size = n_edp_entries;
272 } else {
241 ddi_translations = ddi_translations_dp; 273 ddi_translations = ddi_translations_dp;
274 size = n_dp_entries;
275 }
242 break; 276 break;
243 case PORT_E: 277 case PORT_E:
244 if (ddi_translations_fdi) 278 if (ddi_translations_fdi)
245 ddi_translations = ddi_translations_fdi; 279 ddi_translations = ddi_translations_fdi;
246 else 280 else
247 ddi_translations = ddi_translations_dp; 281 ddi_translations = ddi_translations_dp;
282 size = n_dp_entries;
248 break; 283 break;
249 default: 284 default:
250 BUG(); 285 BUG();
251 } 286 }
252 287
253 for (i = 0, reg = DDI_BUF_TRANS(port); 288 for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
254 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
255 I915_WRITE(reg, ddi_translations[i].trans1); 289 I915_WRITE(reg, ddi_translations[i].trans1);
256 reg += 4; 290 reg += 4;
257 I915_WRITE(reg, ddi_translations[i].trans2); 291 I915_WRITE(reg, ddi_translations[i].trans2);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e730789b53b7..31f3b11589b9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -390,7 +390,7 @@ static const intel_limit_t intel_limits_chv = {
390 * them would make no difference. 390 * them would make no difference.
391 */ 391 */
392 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 392 .dot = { .min = 25000 * 5, .max = 540000 * 5},
393 .vco = { .min = 4860000, .max = 6700000 }, 393 .vco = { .min = 4860000, .max = 6480000 },
394 .n = { .min = 1, .max = 1 }, 394 .n = { .min = 1, .max = 1 },
395 .m1 = { .min = 2, .max = 2 }, 395 .m1 = { .min = 2, .max = 2 },
396 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 396 .m2 = { .min = 24 << 22, .max = 175 << 22 },
@@ -2190,11 +2190,50 @@ static bool need_vtd_wa(struct drm_device *dev)
2190} 2190}
2191 2191
2192int 2192int
2193intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling) 2193intel_fb_align_height(struct drm_device *dev, int height,
2194 uint32_t pixel_format,
2195 uint64_t fb_format_modifier)
2194{ 2196{
2195 int tile_height; 2197 int tile_height;
2198 uint32_t bits_per_pixel;
2199
2200 switch (fb_format_modifier) {
2201 case DRM_FORMAT_MOD_NONE:
2202 tile_height = 1;
2203 break;
2204 case I915_FORMAT_MOD_X_TILED:
2205 tile_height = IS_GEN2(dev) ? 16 : 8;
2206 break;
2207 case I915_FORMAT_MOD_Y_TILED:
2208 tile_height = 32;
2209 break;
2210 case I915_FORMAT_MOD_Yf_TILED:
2211 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2212 switch (bits_per_pixel) {
2213 default:
2214 case 8:
2215 tile_height = 64;
2216 break;
2217 case 16:
2218 case 32:
2219 tile_height = 32;
2220 break;
2221 case 64:
2222 tile_height = 16;
2223 break;
2224 case 128:
2225 WARN_ONCE(1,
2226 "128-bit pixels are not supported for display!");
2227 tile_height = 16;
2228 break;
2229 }
2230 break;
2231 default:
2232 MISSING_CASE(fb_format_modifier);
2233 tile_height = 1;
2234 break;
2235 }
2196 2236
2197 tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
2198 return ALIGN(height, tile_height); 2237 return ALIGN(height, tile_height);
2199} 2238}
2200 2239
@@ -2211,8 +2250,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2211 2250
2212 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2251 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2213 2252
2214 switch (obj->tiling_mode) { 2253 switch (fb->modifier[0]) {
2215 case I915_TILING_NONE: 2254 case DRM_FORMAT_MOD_NONE:
2216 if (INTEL_INFO(dev)->gen >= 9) 2255 if (INTEL_INFO(dev)->gen >= 9)
2217 alignment = 256 * 1024; 2256 alignment = 256 * 1024;
2218 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2257 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2222,7 +2261,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2222 else 2261 else
2223 alignment = 64 * 1024; 2262 alignment = 64 * 1024;
2224 break; 2263 break;
2225 case I915_TILING_X: 2264 case I915_FORMAT_MOD_X_TILED:
2226 if (INTEL_INFO(dev)->gen >= 9) 2265 if (INTEL_INFO(dev)->gen >= 9)
2227 alignment = 256 * 1024; 2266 alignment = 256 * 1024;
2228 else { 2267 else {
@@ -2230,11 +2269,16 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2230 alignment = 0; 2269 alignment = 0;
2231 } 2270 }
2232 break; 2271 break;
2233 case I915_TILING_Y: 2272 case I915_FORMAT_MOD_Y_TILED:
2234 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2273 case I915_FORMAT_MOD_Yf_TILED:
2235 return -EINVAL; 2274 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2275 "Y tiling bo slipped through, driver bug!\n"))
2276 return -EINVAL;
2277 alignment = 1 * 1024 * 1024;
2278 break;
2236 default: 2279 default:
2237 BUG(); 2280 MISSING_CASE(fb->modifier[0]);
2281 return -EINVAL;
2238 } 2282 }
2239 2283
2240 /* Note that the w/a also requires 64 PTE of padding following the 2284 /* Note that the w/a also requires 64 PTE of padding following the
@@ -2282,7 +2326,7 @@ err_interruptible:
2282 return ret; 2326 return ret;
2283} 2327}
2284 2328
2285void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2329static void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2286{ 2330{
2287 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2331 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2288 2332
@@ -2371,6 +2415,7 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
2371 struct drm_device *dev = crtc->base.dev; 2415 struct drm_device *dev = crtc->base.dev;
2372 struct drm_i915_gem_object *obj = NULL; 2416 struct drm_i915_gem_object *obj = NULL;
2373 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2417 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2418 struct drm_framebuffer *fb = &plane_config->fb->base;
2374 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2419 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2375 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2420 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2376 PAGE_SIZE); 2421 PAGE_SIZE);
@@ -2389,16 +2434,18 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
2389 2434
2390 obj->tiling_mode = plane_config->tiling; 2435 obj->tiling_mode = plane_config->tiling;
2391 if (obj->tiling_mode == I915_TILING_X) 2436 if (obj->tiling_mode == I915_TILING_X)
2392 obj->stride = crtc->base.primary->fb->pitches[0]; 2437 obj->stride = fb->pitches[0];
2393 2438
2394 mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; 2439 mode_cmd.pixel_format = fb->pixel_format;
2395 mode_cmd.width = crtc->base.primary->fb->width; 2440 mode_cmd.width = fb->width;
2396 mode_cmd.height = crtc->base.primary->fb->height; 2441 mode_cmd.height = fb->height;
2397 mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; 2442 mode_cmd.pitches[0] = fb->pitches[0];
2443 mode_cmd.modifier[0] = fb->modifier[0];
2444 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2398 2445
2399 mutex_lock(&dev->struct_mutex); 2446 mutex_lock(&dev->struct_mutex);
2400 2447
2401 if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), 2448 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2402 &mode_cmd, obj)) { 2449 &mode_cmd, obj)) {
2403 DRM_DEBUG_KMS("intel fb init failed\n"); 2450 DRM_DEBUG_KMS("intel fb init failed\n");
2404 goto out_unref_obj; 2451 goto out_unref_obj;
@@ -2416,6 +2463,20 @@ out_unref_obj:
2416 return false; 2463 return false;
2417} 2464}
2418 2465
2466/* Update plane->state->fb to match plane->fb after driver-internal updates */
2467static void
2468update_state_fb(struct drm_plane *plane)
2469{
2470 if (plane->fb == plane->state->fb)
2471 return;
2472
2473 if (plane->state->fb)
2474 drm_framebuffer_unreference(plane->state->fb);
2475 plane->state->fb = plane->fb;
2476 if (plane->state->fb)
2477 drm_framebuffer_reference(plane->state->fb);
2478}
2479
2419static void 2480static void
2420intel_find_plane_obj(struct intel_crtc *intel_crtc, 2481intel_find_plane_obj(struct intel_crtc *intel_crtc,
2421 struct intel_initial_plane_config *plane_config) 2482 struct intel_initial_plane_config *plane_config)
@@ -2426,14 +2487,20 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2426 struct intel_crtc *i; 2487 struct intel_crtc *i;
2427 struct drm_i915_gem_object *obj; 2488 struct drm_i915_gem_object *obj;
2428 2489
2429 if (!intel_crtc->base.primary->fb) 2490 if (!plane_config->fb)
2430 return; 2491 return;
2431 2492
2432 if (intel_alloc_plane_obj(intel_crtc, plane_config)) 2493 if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
2494 struct drm_plane *primary = intel_crtc->base.primary;
2495
2496 primary->fb = &plane_config->fb->base;
2497 primary->state->crtc = &intel_crtc->base;
2498 update_state_fb(primary);
2499
2433 return; 2500 return;
2501 }
2434 2502
2435 kfree(intel_crtc->base.primary->fb); 2503 kfree(plane_config->fb);
2436 intel_crtc->base.primary->fb = NULL;
2437 2504
2438 /* 2505 /*
2439 * Failed to alloc the obj, check to see if we should share 2506 * Failed to alloc the obj, check to see if we should share
@@ -2453,15 +2520,20 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2453 continue; 2520 continue;
2454 2521
2455 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2522 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2523 struct drm_plane *primary = intel_crtc->base.primary;
2524
2456 if (obj->tiling_mode != I915_TILING_NONE) 2525 if (obj->tiling_mode != I915_TILING_NONE)
2457 dev_priv->preserve_bios_swizzle = true; 2526 dev_priv->preserve_bios_swizzle = true;
2458 2527
2459 drm_framebuffer_reference(c->primary->fb); 2528 drm_framebuffer_reference(c->primary->fb);
2460 intel_crtc->base.primary->fb = c->primary->fb; 2529 primary->fb = c->primary->fb;
2530 primary->state->crtc = &intel_crtc->base;
2531 update_state_fb(intel_crtc->base.primary);
2461 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2532 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2462 break; 2533 break;
2463 } 2534 }
2464 } 2535 }
2536
2465} 2537}
2466 2538
2467static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2539static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2701,6 +2773,40 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2701 POSTING_READ(reg); 2773 POSTING_READ(reg);
2702} 2774}
2703 2775
2776u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2777 uint32_t pixel_format)
2778{
2779 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2780
2781 /*
2782 * The stride is either expressed as a multiple of 64 bytes
2783 * chunks for linear buffers or in number of tiles for tiled
2784 * buffers.
2785 */
2786 switch (fb_modifier) {
2787 case DRM_FORMAT_MOD_NONE:
2788 return 64;
2789 case I915_FORMAT_MOD_X_TILED:
2790 if (INTEL_INFO(dev)->gen == 2)
2791 return 128;
2792 return 512;
2793 case I915_FORMAT_MOD_Y_TILED:
2794 /* No need to check for old gens and Y tiling since this is
2795 * about the display engine and those will be blocked before
2796 * we get here.
2797 */
2798 return 128;
2799 case I915_FORMAT_MOD_Yf_TILED:
2800 if (bits_per_pixel == 8)
2801 return 64;
2802 else
2803 return 128;
2804 default:
2805 MISSING_CASE(fb_modifier);
2806 return 64;
2807 }
2808}
2809
2704static void skylake_update_primary_plane(struct drm_crtc *crtc, 2810static void skylake_update_primary_plane(struct drm_crtc *crtc,
2705 struct drm_framebuffer *fb, 2811 struct drm_framebuffer *fb,
2706 int x, int y) 2812 int x, int y)
@@ -2708,10 +2814,9 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2708 struct drm_device *dev = crtc->dev; 2814 struct drm_device *dev = crtc->dev;
2709 struct drm_i915_private *dev_priv = dev->dev_private; 2815 struct drm_i915_private *dev_priv = dev->dev_private;
2710 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2816 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2711 struct intel_framebuffer *intel_fb;
2712 struct drm_i915_gem_object *obj; 2817 struct drm_i915_gem_object *obj;
2713 int pipe = intel_crtc->pipe; 2818 int pipe = intel_crtc->pipe;
2714 u32 plane_ctl, stride; 2819 u32 plane_ctl, stride_div;
2715 2820
2716 if (!intel_crtc->primary_enabled) { 2821 if (!intel_crtc->primary_enabled) {
2717 I915_WRITE(PLANE_CTL(pipe, 0), 0); 2822 I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -2755,29 +2860,30 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2755 BUG(); 2860 BUG();
2756 } 2861 }
2757 2862
2758 intel_fb = to_intel_framebuffer(fb); 2863 switch (fb->modifier[0]) {
2759 obj = intel_fb->obj; 2864 case DRM_FORMAT_MOD_NONE:
2760
2761 /*
2762 * The stride is either expressed as a multiple of 64 bytes chunks for
2763 * linear buffers or in number of tiles for tiled buffers.
2764 */
2765 switch (obj->tiling_mode) {
2766 case I915_TILING_NONE:
2767 stride = fb->pitches[0] >> 6;
2768 break; 2865 break;
2769 case I915_TILING_X: 2866 case I915_FORMAT_MOD_X_TILED:
2770 plane_ctl |= PLANE_CTL_TILED_X; 2867 plane_ctl |= PLANE_CTL_TILED_X;
2771 stride = fb->pitches[0] >> 9; 2868 break;
2869 case I915_FORMAT_MOD_Y_TILED:
2870 plane_ctl |= PLANE_CTL_TILED_Y;
2871 break;
2872 case I915_FORMAT_MOD_Yf_TILED:
2873 plane_ctl |= PLANE_CTL_TILED_YF;
2772 break; 2874 break;
2773 default: 2875 default:
2774 BUG(); 2876 MISSING_CASE(fb->modifier[0]);
2775 } 2877 }
2776 2878
2777 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 2879 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2778 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) 2880 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
2779 plane_ctl |= PLANE_CTL_ROTATE_180; 2881 plane_ctl |= PLANE_CTL_ROTATE_180;
2780 2882
2883 obj = intel_fb_obj(fb);
2884 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
2885 fb->pixel_format);
2886
2781 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 2887 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2782 2888
2783 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", 2889 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
@@ -2790,7 +2896,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2790 I915_WRITE(PLANE_SIZE(pipe, 0), 2896 I915_WRITE(PLANE_SIZE(pipe, 0),
2791 (intel_crtc->config->pipe_src_h - 1) << 16 | 2897 (intel_crtc->config->pipe_src_h - 1) << 16 |
2792 (intel_crtc->config->pipe_src_w - 1)); 2898 (intel_crtc->config->pipe_src_w - 1));
2793 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 2899 I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
2794 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); 2900 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2795 2901
2796 POSTING_READ(PLANE_SURF(pipe, 0)); 2902 POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3044,7 +3150,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
3044 3150
3045static bool pipe_has_enabled_pch(struct intel_crtc *crtc) 3151static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
3046{ 3152{
3047 return crtc->base.enabled && crtc->active && 3153 return crtc->base.state->enable && crtc->active &&
3048 crtc->config->has_pch_encoder; 3154 crtc->config->has_pch_encoder;
3049} 3155}
3050 3156
@@ -4182,7 +4288,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4182 bool reenable_ips = false; 4288 bool reenable_ips = false;
4183 4289
4184 /* The clocks have to be on to load the palette. */ 4290 /* The clocks have to be on to load the palette. */
4185 if (!crtc->enabled || !intel_crtc->active) 4291 if (!crtc->state->enable || !intel_crtc->active)
4186 return; 4292 return;
4187 4293
4188 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 4294 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
@@ -4266,11 +4372,10 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4266 struct drm_i915_private *dev_priv = dev->dev_private; 4372 struct drm_i915_private *dev_priv = dev->dev_private;
4267 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4373 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4268 int pipe = intel_crtc->pipe; 4374 int pipe = intel_crtc->pipe;
4269 int plane = intel_crtc->plane;
4270 4375
4271 intel_crtc_wait_for_pending_flips(crtc); 4376 intel_crtc_wait_for_pending_flips(crtc);
4272 4377
4273 if (dev_priv->fbc.plane == plane) 4378 if (dev_priv->fbc.crtc == intel_crtc)
4274 intel_fbc_disable(dev); 4379 intel_fbc_disable(dev);
4275 4380
4276 hsw_disable_ips(intel_crtc); 4381 hsw_disable_ips(intel_crtc);
@@ -4296,7 +4401,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4296 struct intel_encoder *encoder; 4401 struct intel_encoder *encoder;
4297 int pipe = intel_crtc->pipe; 4402 int pipe = intel_crtc->pipe;
4298 4403
4299 WARN_ON(!crtc->enabled); 4404 WARN_ON(!crtc->state->enable);
4300 4405
4301 if (intel_crtc->active) 4406 if (intel_crtc->active)
4302 return; 4407 return;
@@ -4305,7 +4410,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4305 intel_prepare_shared_dpll(intel_crtc); 4410 intel_prepare_shared_dpll(intel_crtc);
4306 4411
4307 if (intel_crtc->config->has_dp_encoder) 4412 if (intel_crtc->config->has_dp_encoder)
4308 intel_dp_set_m_n(intel_crtc); 4413 intel_dp_set_m_n(intel_crtc, M1_N1);
4309 4414
4310 intel_set_pipe_timings(intel_crtc); 4415 intel_set_pipe_timings(intel_crtc);
4311 4416
@@ -4404,7 +4509,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4404 struct intel_encoder *encoder; 4509 struct intel_encoder *encoder;
4405 int pipe = intel_crtc->pipe; 4510 int pipe = intel_crtc->pipe;
4406 4511
4407 WARN_ON(!crtc->enabled); 4512 WARN_ON(!crtc->state->enable);
4408 4513
4409 if (intel_crtc->active) 4514 if (intel_crtc->active)
4410 return; 4515 return;
@@ -4413,7 +4518,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4413 intel_enable_shared_dpll(intel_crtc); 4518 intel_enable_shared_dpll(intel_crtc);
4414 4519
4415 if (intel_crtc->config->has_dp_encoder) 4520 if (intel_crtc->config->has_dp_encoder)
4416 intel_dp_set_m_n(intel_crtc); 4521 intel_dp_set_m_n(intel_crtc, M1_N1);
4417 4522
4418 intel_set_pipe_timings(intel_crtc); 4523 intel_set_pipe_timings(intel_crtc);
4419 4524
@@ -4751,7 +4856,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4751 for_each_intel_crtc(dev, crtc) { 4856 for_each_intel_crtc(dev, crtc) {
4752 enum intel_display_power_domain domain; 4857 enum intel_display_power_domain domain;
4753 4858
4754 if (!crtc->base.enabled) 4859 if (!crtc->base.state->enable)
4755 continue; 4860 continue;
4756 4861
4757 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); 4862 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
@@ -4972,7 +5077,7 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
4972 5077
4973 /* disable/enable all currently active pipes while we change cdclk */ 5078 /* disable/enable all currently active pipes while we change cdclk */
4974 for_each_intel_crtc(dev, intel_crtc) 5079 for_each_intel_crtc(dev, intel_crtc)
4975 if (intel_crtc->base.enabled) 5080 if (intel_crtc->base.state->enable)
4976 *prepare_pipes |= (1 << intel_crtc->pipe); 5081 *prepare_pipes |= (1 << intel_crtc->pipe);
4977} 5082}
4978 5083
@@ -5012,7 +5117,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
5012 int pipe = intel_crtc->pipe; 5117 int pipe = intel_crtc->pipe;
5013 bool is_dsi; 5118 bool is_dsi;
5014 5119
5015 WARN_ON(!crtc->enabled); 5120 WARN_ON(!crtc->state->enable);
5016 5121
5017 if (intel_crtc->active) 5122 if (intel_crtc->active)
5018 return; 5123 return;
@@ -5027,7 +5132,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
5027 } 5132 }
5028 5133
5029 if (intel_crtc->config->has_dp_encoder) 5134 if (intel_crtc->config->has_dp_encoder)
5030 intel_dp_set_m_n(intel_crtc); 5135 intel_dp_set_m_n(intel_crtc, M1_N1);
5031 5136
5032 intel_set_pipe_timings(intel_crtc); 5137 intel_set_pipe_timings(intel_crtc);
5033 5138
@@ -5095,7 +5200,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
5095 struct intel_encoder *encoder; 5200 struct intel_encoder *encoder;
5096 int pipe = intel_crtc->pipe; 5201 int pipe = intel_crtc->pipe;
5097 5202
5098 WARN_ON(!crtc->enabled); 5203 WARN_ON(!crtc->state->enable);
5099 5204
5100 if (intel_crtc->active) 5205 if (intel_crtc->active)
5101 return; 5206 return;
@@ -5103,7 +5208,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
5103 i9xx_set_pll_dividers(intel_crtc); 5208 i9xx_set_pll_dividers(intel_crtc);
5104 5209
5105 if (intel_crtc->config->has_dp_encoder) 5210 if (intel_crtc->config->has_dp_encoder)
5106 intel_dp_set_m_n(intel_crtc); 5211 intel_dp_set_m_n(intel_crtc, M1_N1);
5107 5212
5108 intel_set_pipe_timings(intel_crtc); 5213 intel_set_pipe_timings(intel_crtc);
5109 5214
@@ -5294,7 +5399,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
5294 struct drm_i915_private *dev_priv = dev->dev_private; 5399 struct drm_i915_private *dev_priv = dev->dev_private;
5295 5400
5296 /* crtc should still be enabled when we disable it. */ 5401 /* crtc should still be enabled when we disable it. */
5297 WARN_ON(!crtc->enabled); 5402 WARN_ON(!crtc->state->enable);
5298 5403
5299 dev_priv->display.crtc_disable(crtc); 5404 dev_priv->display.crtc_disable(crtc);
5300 dev_priv->display.off(crtc); 5405 dev_priv->display.off(crtc);
@@ -5372,7 +5477,8 @@ static void intel_connector_check_state(struct intel_connector *connector)
5372 5477
5373 crtc = encoder->base.crtc; 5478 crtc = encoder->base.crtc;
5374 5479
5375 I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n"); 5480 I915_STATE_WARN(!crtc->state->enable,
5481 "crtc not enabled\n");
5376 I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 5482 I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5377 I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, 5483 I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
5378 "encoder active on the wrong pipe\n"); 5484 "encoder active on the wrong pipe\n");
@@ -5559,7 +5665,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5559 * - LVDS dual channel mode 5665 * - LVDS dual channel mode
5560 * - Double wide pipe 5666 * - Double wide pipe
5561 */ 5667 */
5562 if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5668 if ((intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5563 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5669 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5564 pipe_config->pipe_src_w &= ~1; 5670 pipe_config->pipe_src_w &= ~1;
5565 5671
@@ -5862,7 +5968,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5862 * for gen < 8) and if DRRS is supported (to make sure the 5968 * for gen < 8) and if DRRS is supported (to make sure the
5863 * registers are not unnecessarily accessed). 5969 * registers are not unnecessarily accessed).
5864 */ 5970 */
5865 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 5971 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
5866 crtc->config->has_drrs) { 5972 crtc->config->has_drrs) {
5867 I915_WRITE(PIPE_DATA_M2(transcoder), 5973 I915_WRITE(PIPE_DATA_M2(transcoder),
5868 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 5974 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
@@ -5878,13 +5984,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5878 } 5984 }
5879} 5985}
5880 5986
5881void intel_dp_set_m_n(struct intel_crtc *crtc) 5987void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
5882{ 5988{
5989 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5990
5991 if (m_n == M1_N1) {
5992 dp_m_n = &crtc->config->dp_m_n;
5993 dp_m2_n2 = &crtc->config->dp_m2_n2;
5994 } else if (m_n == M2_N2) {
5995
5996 /*
5997 * M2_N2 registers are not supported. Hence m2_n2 divider value
5998 * needs to be programmed into M1_N1.
5999 */
6000 dp_m_n = &crtc->config->dp_m2_n2;
6001 } else {
6002 DRM_ERROR("Unsupported divider value\n");
6003 return;
6004 }
6005
5883 if (crtc->config->has_pch_encoder) 6006 if (crtc->config->has_pch_encoder)
5884 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6007 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
5885 else 6008 else
5886 intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n, 6009 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
5887 &crtc->config->dp_m2_n2);
5888} 6010}
5889 6011
5890static void vlv_update_pll(struct intel_crtc *crtc, 6012static void vlv_update_pll(struct intel_crtc *crtc,
@@ -6602,6 +6724,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6602 struct drm_framebuffer *fb; 6724 struct drm_framebuffer *fb;
6603 struct intel_framebuffer *intel_fb; 6725 struct intel_framebuffer *intel_fb;
6604 6726
6727 val = I915_READ(DSPCNTR(plane));
6728 if (!(val & DISPLAY_PLANE_ENABLE))
6729 return;
6730
6605 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6731 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6606 if (!intel_fb) { 6732 if (!intel_fb) {
6607 DRM_DEBUG_KMS("failed to alloc fb\n"); 6733 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -6610,11 +6736,12 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6610 6736
6611 fb = &intel_fb->base; 6737 fb = &intel_fb->base;
6612 6738
6613 val = I915_READ(DSPCNTR(plane)); 6739 if (INTEL_INFO(dev)->gen >= 4) {
6614 6740 if (val & DISPPLANE_TILED) {
6615 if (INTEL_INFO(dev)->gen >= 4)
6616 if (val & DISPPLANE_TILED)
6617 plane_config->tiling = I915_TILING_X; 6741 plane_config->tiling = I915_TILING_X;
6742 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
6743 }
6744 }
6618 6745
6619 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 6746 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6620 fourcc = i9xx_format_to_fourcc(pixel_format); 6747 fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -6640,7 +6767,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6640 fb->pitches[0] = val & 0xffffffc0; 6767 fb->pitches[0] = val & 0xffffffc0;
6641 6768
6642 aligned_height = intel_fb_align_height(dev, fb->height, 6769 aligned_height = intel_fb_align_height(dev, fb->height,
6643 plane_config->tiling); 6770 fb->pixel_format,
6771 fb->modifier[0]);
6644 6772
6645 plane_config->size = fb->pitches[0] * aligned_height; 6773 plane_config->size = fb->pitches[0] * aligned_height;
6646 6774
@@ -6649,7 +6777,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6649 fb->bits_per_pixel, base, fb->pitches[0], 6777 fb->bits_per_pixel, base, fb->pitches[0],
6650 plane_config->size); 6778 plane_config->size);
6651 6779
6652 crtc->base.primary->fb = fb; 6780 plane_config->fb = intel_fb;
6653} 6781}
6654 6782
6655static void chv_crtc_clock_get(struct intel_crtc *crtc, 6783static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7627,7 +7755,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7627{ 7755{
7628 struct drm_device *dev = crtc->base.dev; 7756 struct drm_device *dev = crtc->base.dev;
7629 struct drm_i915_private *dev_priv = dev->dev_private; 7757 struct drm_i915_private *dev_priv = dev->dev_private;
7630 u32 val, base, offset, stride_mult; 7758 u32 val, base, offset, stride_mult, tiling;
7631 int pipe = crtc->pipe; 7759 int pipe = crtc->pipe;
7632 int fourcc, pixel_format; 7760 int fourcc, pixel_format;
7633 int aligned_height; 7761 int aligned_height;
@@ -7643,8 +7771,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7643 fb = &intel_fb->base; 7771 fb = &intel_fb->base;
7644 7772
7645 val = I915_READ(PLANE_CTL(pipe, 0)); 7773 val = I915_READ(PLANE_CTL(pipe, 0));
7646 if (val & PLANE_CTL_TILED_MASK) 7774 if (!(val & PLANE_CTL_ENABLE))
7647 plane_config->tiling = I915_TILING_X; 7775 goto error;
7648 7776
7649 pixel_format = val & PLANE_CTL_FORMAT_MASK; 7777 pixel_format = val & PLANE_CTL_FORMAT_MASK;
7650 fourcc = skl_format_to_fourcc(pixel_format, 7778 fourcc = skl_format_to_fourcc(pixel_format,
@@ -7653,6 +7781,26 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7653 fb->pixel_format = fourcc; 7781 fb->pixel_format = fourcc;
7654 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 7782 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
7655 7783
7784 tiling = val & PLANE_CTL_TILED_MASK;
7785 switch (tiling) {
7786 case PLANE_CTL_TILED_LINEAR:
7787 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
7788 break;
7789 case PLANE_CTL_TILED_X:
7790 plane_config->tiling = I915_TILING_X;
7791 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
7792 break;
7793 case PLANE_CTL_TILED_Y:
7794 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
7795 break;
7796 case PLANE_CTL_TILED_YF:
7797 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
7798 break;
7799 default:
7800 MISSING_CASE(tiling);
7801 goto error;
7802 }
7803
7656 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 7804 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
7657 plane_config->base = base; 7805 plane_config->base = base;
7658 7806
@@ -7663,21 +7811,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7663 fb->width = ((val >> 0) & 0x1fff) + 1; 7811 fb->width = ((val >> 0) & 0x1fff) + 1;
7664 7812
7665 val = I915_READ(PLANE_STRIDE(pipe, 0)); 7813 val = I915_READ(PLANE_STRIDE(pipe, 0));
7666 switch (plane_config->tiling) { 7814 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
7667 case I915_TILING_NONE: 7815 fb->pixel_format);
7668 stride_mult = 64;
7669 break;
7670 case I915_TILING_X:
7671 stride_mult = 512;
7672 break;
7673 default:
7674 MISSING_CASE(plane_config->tiling);
7675 goto error;
7676 }
7677 fb->pitches[0] = (val & 0x3ff) * stride_mult; 7816 fb->pitches[0] = (val & 0x3ff) * stride_mult;
7678 7817
7679 aligned_height = intel_fb_align_height(dev, fb->height, 7818 aligned_height = intel_fb_align_height(dev, fb->height,
7680 plane_config->tiling); 7819 fb->pixel_format,
7820 fb->modifier[0]);
7681 7821
7682 plane_config->size = fb->pitches[0] * aligned_height; 7822 plane_config->size = fb->pitches[0] * aligned_height;
7683 7823
@@ -7686,7 +7826,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7686 fb->bits_per_pixel, base, fb->pitches[0], 7826 fb->bits_per_pixel, base, fb->pitches[0],
7687 plane_config->size); 7827 plane_config->size);
7688 7828
7689 crtc->base.primary->fb = fb; 7829 plane_config->fb = intel_fb;
7690 return; 7830 return;
7691 7831
7692error: 7832error:
@@ -7730,6 +7870,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7730 struct drm_framebuffer *fb; 7870 struct drm_framebuffer *fb;
7731 struct intel_framebuffer *intel_fb; 7871 struct intel_framebuffer *intel_fb;
7732 7872
7873 val = I915_READ(DSPCNTR(pipe));
7874 if (!(val & DISPLAY_PLANE_ENABLE))
7875 return;
7876
7733 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7877 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7734 if (!intel_fb) { 7878 if (!intel_fb) {
7735 DRM_DEBUG_KMS("failed to alloc fb\n"); 7879 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -7738,11 +7882,12 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7738 7882
7739 fb = &intel_fb->base; 7883 fb = &intel_fb->base;
7740 7884
7741 val = I915_READ(DSPCNTR(pipe)); 7885 if (INTEL_INFO(dev)->gen >= 4) {
7742 7886 if (val & DISPPLANE_TILED) {
7743 if (INTEL_INFO(dev)->gen >= 4)
7744 if (val & DISPPLANE_TILED)
7745 plane_config->tiling = I915_TILING_X; 7887 plane_config->tiling = I915_TILING_X;
7888 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
7889 }
7890 }
7746 7891
7747 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7892 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7748 fourcc = i9xx_format_to_fourcc(pixel_format); 7893 fourcc = i9xx_format_to_fourcc(pixel_format);
@@ -7768,7 +7913,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7768 fb->pitches[0] = val & 0xffffffc0; 7913 fb->pitches[0] = val & 0xffffffc0;
7769 7914
7770 aligned_height = intel_fb_align_height(dev, fb->height, 7915 aligned_height = intel_fb_align_height(dev, fb->height,
7771 plane_config->tiling); 7916 fb->pixel_format,
7917 fb->modifier[0]);
7772 7918
7773 plane_config->size = fb->pitches[0] * aligned_height; 7919 plane_config->size = fb->pitches[0] * aligned_height;
7774 7920
@@ -7777,7 +7923,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7777 fb->bits_per_pixel, base, fb->pitches[0], 7923 fb->bits_per_pixel, base, fb->pitches[0],
7778 plane_config->size); 7924 plane_config->size);
7779 7925
7780 crtc->base.primary->fb = fb; 7926 plane_config->fb = intel_fb;
7781} 7927}
7782 7928
7783static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 7929static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@@ -8651,7 +8797,7 @@ retry:
8651 i++; 8797 i++;
8652 if (!(encoder->possible_crtcs & (1 << i))) 8798 if (!(encoder->possible_crtcs & (1 << i)))
8653 continue; 8799 continue;
8654 if (possible_crtc->enabled) 8800 if (possible_crtc->state->enable)
8655 continue; 8801 continue;
8656 /* This can occur when applying the pipe A quirk on resume. */ 8802 /* This can occur when applying the pipe A quirk on resume. */
8657 if (to_intel_crtc(possible_crtc)->new_enabled) 8803 if (to_intel_crtc(possible_crtc)->new_enabled)
@@ -8720,7 +8866,7 @@ retry:
8720 return true; 8866 return true;
8721 8867
8722 fail: 8868 fail:
8723 intel_crtc->new_enabled = crtc->enabled; 8869 intel_crtc->new_enabled = crtc->state->enable;
8724 if (intel_crtc->new_enabled) 8870 if (intel_crtc->new_enabled)
8725 intel_crtc->new_config = intel_crtc->config; 8871 intel_crtc->new_config = intel_crtc->config;
8726 else 8872 else
@@ -9071,9 +9217,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
9071 enum pipe pipe = to_intel_crtc(work->crtc)->pipe; 9217 enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9072 9218
9073 mutex_lock(&dev->struct_mutex); 9219 mutex_lock(&dev->struct_mutex);
9074 intel_unpin_fb_obj(work->old_fb_obj); 9220 intel_unpin_fb_obj(intel_fb_obj(work->old_fb));
9075 drm_gem_object_unreference(&work->pending_flip_obj->base); 9221 drm_gem_object_unreference(&work->pending_flip_obj->base);
9076 drm_gem_object_unreference(&work->old_fb_obj->base); 9222 drm_framebuffer_unreference(work->old_fb);
9077 9223
9078 intel_fbc_update(dev); 9224 intel_fbc_update(dev);
9079 9225
@@ -9598,69 +9744,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
9598 return 0; 9744 return 0;
9599} 9745}
9600 9746
9601static int intel_gen9_queue_flip(struct drm_device *dev,
9602 struct drm_crtc *crtc,
9603 struct drm_framebuffer *fb,
9604 struct drm_i915_gem_object *obj,
9605 struct intel_engine_cs *ring,
9606 uint32_t flags)
9607{
9608 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9609 uint32_t plane = 0, stride;
9610 int ret;
9611
9612 switch(intel_crtc->pipe) {
9613 case PIPE_A:
9614 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
9615 break;
9616 case PIPE_B:
9617 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
9618 break;
9619 case PIPE_C:
9620 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
9621 break;
9622 default:
9623 WARN_ONCE(1, "unknown plane in flip command\n");
9624 return -ENODEV;
9625 }
9626
9627 switch (obj->tiling_mode) {
9628 case I915_TILING_NONE:
9629 stride = fb->pitches[0] >> 6;
9630 break;
9631 case I915_TILING_X:
9632 stride = fb->pitches[0] >> 9;
9633 break;
9634 default:
9635 WARN_ONCE(1, "unknown tiling in flip command\n");
9636 return -ENODEV;
9637 }
9638
9639 ret = intel_ring_begin(ring, 10);
9640 if (ret)
9641 return ret;
9642
9643 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9644 intel_ring_emit(ring, DERRMR);
9645 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9646 DERRMR_PIPEB_PRI_FLIP_DONE |
9647 DERRMR_PIPEC_PRI_FLIP_DONE));
9648 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9649 MI_SRM_LRM_GLOBAL_GTT);
9650 intel_ring_emit(ring, DERRMR);
9651 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9652 intel_ring_emit(ring, 0);
9653
9654 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
9655 intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
9656 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9657
9658 intel_mark_page_flip_active(intel_crtc);
9659 __intel_ring_advance(ring);
9660
9661 return 0;
9662}
9663
9664static int intel_default_queue_flip(struct drm_device *dev, 9747static int intel_default_queue_flip(struct drm_device *dev,
9665 struct drm_crtc *crtc, 9748 struct drm_crtc *crtc,
9666 struct drm_framebuffer *fb, 9749 struct drm_framebuffer *fb,
@@ -9690,10 +9773,10 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
9690 !i915_gem_request_completed(work->flip_queued_req, true)) 9773 !i915_gem_request_completed(work->flip_queued_req, true))
9691 return false; 9774 return false;
9692 9775
9693 work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); 9776 work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
9694 } 9777 }
9695 9778
9696 if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3) 9779 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
9697 return false; 9780 return false;
9698 9781
9699 /* Potential stall - if we see that the flip has happened, 9782 /* Potential stall - if we see that the flip has happened,
@@ -9724,7 +9807,8 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9724 spin_lock(&dev->event_lock); 9807 spin_lock(&dev->event_lock);
9725 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { 9808 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
9726 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 9809 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
9727 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 9810 intel_crtc->unpin_work->flip_queued_vblank,
9811 drm_vblank_count(dev, pipe));
9728 page_flip_completed(intel_crtc); 9812 page_flip_completed(intel_crtc);
9729 } 9813 }
9730 spin_unlock(&dev->event_lock); 9814 spin_unlock(&dev->event_lock);
@@ -9776,7 +9860,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9776 9860
9777 work->event = event; 9861 work->event = event;
9778 work->crtc = crtc; 9862 work->crtc = crtc;
9779 work->old_fb_obj = intel_fb_obj(old_fb); 9863 work->old_fb = old_fb;
9780 INIT_WORK(&work->work, intel_unpin_work_fn); 9864 INIT_WORK(&work->work, intel_unpin_work_fn);
9781 9865
9782 ret = drm_crtc_vblank_get(crtc); 9866 ret = drm_crtc_vblank_get(crtc);
@@ -9812,10 +9896,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9812 goto cleanup; 9896 goto cleanup;
9813 9897
9814 /* Reference the objects for the scheduled work. */ 9898 /* Reference the objects for the scheduled work. */
9815 drm_gem_object_reference(&work->old_fb_obj->base); 9899 drm_framebuffer_reference(work->old_fb);
9816 drm_gem_object_reference(&obj->base); 9900 drm_gem_object_reference(&obj->base);
9817 9901
9818 crtc->primary->fb = fb; 9902 crtc->primary->fb = fb;
9903 update_state_fb(crtc->primary);
9819 9904
9820 work->pending_flip_obj = obj; 9905 work->pending_flip_obj = obj;
9821 9906
@@ -9827,7 +9912,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9827 9912
9828 if (IS_VALLEYVIEW(dev)) { 9913 if (IS_VALLEYVIEW(dev)) {
9829 ring = &dev_priv->ring[BCS]; 9914 ring = &dev_priv->ring[BCS];
9830 if (obj->tiling_mode != work->old_fb_obj->tiling_mode) 9915 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
9831 /* vlv: DISPLAY_FLIP fails to change tiling */ 9916 /* vlv: DISPLAY_FLIP fails to change tiling */
9832 ring = NULL; 9917 ring = NULL;
9833 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 9918 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
@@ -9865,10 +9950,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9865 intel_ring_get_request(ring)); 9950 intel_ring_get_request(ring));
9866 } 9951 }
9867 9952
9868 work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); 9953 work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
9869 work->enable_stall_check = true; 9954 work->enable_stall_check = true;
9870 9955
9871 i915_gem_track_fb(work->old_fb_obj, obj, 9956 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
9872 INTEL_FRONTBUFFER_PRIMARY(pipe)); 9957 INTEL_FRONTBUFFER_PRIMARY(pipe));
9873 9958
9874 intel_fbc_disable(dev); 9959 intel_fbc_disable(dev);
@@ -9884,7 +9969,8 @@ cleanup_unpin:
9884cleanup_pending: 9969cleanup_pending:
9885 atomic_dec(&intel_crtc->unpin_work_count); 9970 atomic_dec(&intel_crtc->unpin_work_count);
9886 crtc->primary->fb = old_fb; 9971 crtc->primary->fb = old_fb;
9887 drm_gem_object_unreference(&work->old_fb_obj->base); 9972 update_state_fb(crtc->primary);
9973 drm_framebuffer_unreference(work->old_fb);
9888 drm_gem_object_unreference(&obj->base); 9974 drm_gem_object_unreference(&obj->base);
9889 mutex_unlock(&dev->struct_mutex); 9975 mutex_unlock(&dev->struct_mutex);
9890 9976
@@ -9940,7 +10026,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9940 } 10026 }
9941 10027
9942 for_each_intel_crtc(dev, crtc) { 10028 for_each_intel_crtc(dev, crtc) {
9943 crtc->new_enabled = crtc->base.enabled; 10029 crtc->new_enabled = crtc->base.state->enable;
9944 10030
9945 if (crtc->new_enabled) 10031 if (crtc->new_enabled)
9946 crtc->new_config = crtc->config; 10032 crtc->new_config = crtc->config;
@@ -9970,6 +10056,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
9970 } 10056 }
9971 10057
9972 for_each_intel_crtc(dev, crtc) { 10058 for_each_intel_crtc(dev, crtc) {
10059 crtc->base.state->enable = crtc->new_enabled;
9973 crtc->base.enabled = crtc->new_enabled; 10060 crtc->base.enabled = crtc->new_enabled;
9974 } 10061 }
9975} 10062}
@@ -10233,6 +10320,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
10233 if (!pipe_config) 10320 if (!pipe_config)
10234 return ERR_PTR(-ENOMEM); 10321 return ERR_PTR(-ENOMEM);
10235 10322
10323 pipe_config->base.crtc = crtc;
10236 drm_mode_copy(&pipe_config->base.adjusted_mode, mode); 10324 drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
10237 drm_mode_copy(&pipe_config->base.mode, mode); 10325 drm_mode_copy(&pipe_config->base.mode, mode);
10238 10326
@@ -10381,7 +10469,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10381 10469
10382 /* Check for pipes that will be enabled/disabled ... */ 10470 /* Check for pipes that will be enabled/disabled ... */
10383 for_each_intel_crtc(dev, intel_crtc) { 10471 for_each_intel_crtc(dev, intel_crtc) {
10384 if (intel_crtc->base.enabled == intel_crtc->new_enabled) 10472 if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
10385 continue; 10473 continue;
10386 10474
10387 if (!intel_crtc->new_enabled) 10475 if (!intel_crtc->new_enabled)
@@ -10456,10 +10544,10 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10456 10544
10457 /* Double check state. */ 10545 /* Double check state. */
10458 for_each_intel_crtc(dev, intel_crtc) { 10546 for_each_intel_crtc(dev, intel_crtc) {
10459 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); 10547 WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
10460 WARN_ON(intel_crtc->new_config && 10548 WARN_ON(intel_crtc->new_config &&
10461 intel_crtc->new_config != intel_crtc->config); 10549 intel_crtc->new_config != intel_crtc->config);
10462 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); 10550 WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
10463 } 10551 }
10464 10552
10465 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 10553 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -10846,7 +10934,7 @@ check_crtc_state(struct drm_device *dev)
10846 DRM_DEBUG_KMS("[CRTC:%d]\n", 10934 DRM_DEBUG_KMS("[CRTC:%d]\n",
10847 crtc->base.base.id); 10935 crtc->base.base.id);
10848 10936
10849 I915_STATE_WARN(crtc->active && !crtc->base.enabled, 10937 I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
10850 "active crtc, but not enabled in sw tracking\n"); 10938 "active crtc, but not enabled in sw tracking\n");
10851 10939
10852 for_each_intel_encoder(dev, encoder) { 10940 for_each_intel_encoder(dev, encoder) {
@@ -10860,9 +10948,10 @@ check_crtc_state(struct drm_device *dev)
10860 I915_STATE_WARN(active != crtc->active, 10948 I915_STATE_WARN(active != crtc->active,
10861 "crtc's computed active state doesn't match tracked active state " 10949 "crtc's computed active state doesn't match tracked active state "
10862 "(expected %i, found %i)\n", active, crtc->active); 10950 "(expected %i, found %i)\n", active, crtc->active);
10863 I915_STATE_WARN(enabled != crtc->base.enabled, 10951 I915_STATE_WARN(enabled != crtc->base.state->enable,
10864 "crtc's computed enabled state doesn't match tracked enabled state " 10952 "crtc's computed enabled state doesn't match tracked enabled state "
10865 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 10953 "(expected %i, found %i)\n", enabled,
10954 crtc->base.state->enable);
10866 10955
10867 active = dev_priv->display.get_pipe_config(crtc, 10956 active = dev_priv->display.get_pipe_config(crtc,
10868 &pipe_config); 10957 &pipe_config);
@@ -10926,7 +11015,7 @@ check_shared_dpll_state(struct drm_device *dev)
10926 pll->on, active); 11015 pll->on, active);
10927 11016
10928 for_each_intel_crtc(dev, crtc) { 11017 for_each_intel_crtc(dev, crtc) {
10929 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) 11018 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
10930 enabled_crtcs++; 11019 enabled_crtcs++;
10931 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 11020 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10932 active_crtcs++; 11021 active_crtcs++;
@@ -11112,7 +11201,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11112 intel_crtc_disable(&intel_crtc->base); 11201 intel_crtc_disable(&intel_crtc->base);
11113 11202
11114 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11203 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
11115 if (intel_crtc->base.enabled) 11204 if (intel_crtc->base.state->enable)
11116 dev_priv->display.crtc_disable(&intel_crtc->base); 11205 dev_priv->display.crtc_disable(&intel_crtc->base);
11117 } 11206 }
11118 11207
@@ -11168,7 +11257,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11168 11257
11169 /* FIXME: add subpixel order */ 11258 /* FIXME: add subpixel order */
11170done: 11259done:
11171 if (ret && crtc->enabled) 11260 if (ret && crtc->state->enable)
11172 crtc->mode = *saved_mode; 11261 crtc->mode = *saved_mode;
11173 11262
11174 kfree(saved_mode); 11263 kfree(saved_mode);
@@ -11264,7 +11353,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
11264 */ 11353 */
11265 count = 0; 11354 count = 0;
11266 for_each_crtc(dev, crtc) { 11355 for_each_crtc(dev, crtc) {
11267 config->save_crtc_enabled[count++] = crtc->enabled; 11356 config->save_crtc_enabled[count++] = crtc->state->enable;
11268 } 11357 }
11269 11358
11270 count = 0; 11359 count = 0;
@@ -11498,7 +11587,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11498 } 11587 }
11499 } 11588 }
11500 11589
11501 if (crtc->new_enabled != crtc->base.enabled) { 11590 if (crtc->new_enabled != crtc->base.state->enable) {
11502 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", 11591 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
11503 crtc->new_enabled ? "en" : "dis"); 11592 crtc->new_enabled ? "en" : "dis");
11504 config->mode_changed = true; 11593 config->mode_changed = true;
@@ -11803,7 +11892,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
11803 */ 11892 */
11804int 11893int
11805intel_prepare_plane_fb(struct drm_plane *plane, 11894intel_prepare_plane_fb(struct drm_plane *plane,
11806 struct drm_framebuffer *fb) 11895 struct drm_framebuffer *fb,
11896 const struct drm_plane_state *new_state)
11807{ 11897{
11808 struct drm_device *dev = plane->dev; 11898 struct drm_device *dev = plane->dev;
11809 struct intel_plane *intel_plane = to_intel_plane(plane); 11899 struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -11857,7 +11947,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
11857 */ 11947 */
11858void 11948void
11859intel_cleanup_plane_fb(struct drm_plane *plane, 11949intel_cleanup_plane_fb(struct drm_plane *plane,
11860 struct drm_framebuffer *fb) 11950 struct drm_framebuffer *fb,
11951 const struct drm_plane_state *old_state)
11861{ 11952{
11862 struct drm_device *dev = plane->dev; 11953 struct drm_device *dev = plane->dev;
11863 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11954 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -11913,7 +12004,7 @@ intel_check_primary_plane(struct drm_plane *plane,
11913 */ 12004 */
11914 if (intel_crtc->primary_enabled && 12005 if (intel_crtc->primary_enabled &&
11915 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 12006 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11916 dev_priv->fbc.plane == intel_crtc->plane && 12007 dev_priv->fbc.crtc == intel_crtc &&
11917 state->base.rotation != BIT(DRM_ROTATE_0)) { 12008 state->base.rotation != BIT(DRM_ROTATE_0)) {
11918 intel_crtc->atomic.disable_fbc = true; 12009 intel_crtc->atomic.disable_fbc = true;
11919 } 12010 }
@@ -11932,6 +12023,12 @@ intel_check_primary_plane(struct drm_plane *plane,
11932 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 12023 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
11933 12024
11934 intel_crtc->atomic.update_fbc = true; 12025 intel_crtc->atomic.update_fbc = true;
12026
12027 /* Update watermarks on tiling changes. */
12028 if (!plane->state->fb || !state->base.fb ||
12029 plane->state->fb->modifier[0] !=
12030 state->base.fb->modifier[0])
12031 intel_crtc->atomic.update_wm = true;
11935 } 12032 }
11936 12033
11937 return 0; 12034 return 0;
@@ -12085,8 +12182,8 @@ void intel_plane_destroy(struct drm_plane *plane)
12085} 12182}
12086 12183
12087const struct drm_plane_funcs intel_plane_funcs = { 12184const struct drm_plane_funcs intel_plane_funcs = {
12088 .update_plane = drm_plane_helper_update, 12185 .update_plane = drm_atomic_helper_update_plane,
12089 .disable_plane = drm_plane_helper_disable, 12186 .disable_plane = drm_atomic_helper_disable_plane,
12090 .destroy = intel_plane_destroy, 12187 .destroy = intel_plane_destroy,
12091 .set_property = drm_atomic_helper_plane_set_property, 12188 .set_property = drm_atomic_helper_plane_set_property,
12092 .atomic_get_property = intel_plane_atomic_get_property, 12189 .atomic_get_property = intel_plane_atomic_get_property,
@@ -12204,7 +12301,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
12204 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 12301 DRM_DEBUG_KMS("cursor cannot be tiled\n");
12205 ret = -EINVAL; 12302 ret = -EINVAL;
12206 } 12303 }
12207 mutex_unlock(&dev->struct_mutex);
12208 12304
12209finish: 12305finish:
12210 if (intel_crtc->active) { 12306 if (intel_crtc->active) {
@@ -12322,6 +12418,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
12322 if (!crtc_state) 12418 if (!crtc_state)
12323 goto fail; 12419 goto fail;
12324 intel_crtc_set_state(intel_crtc, crtc_state); 12420 intel_crtc_set_state(intel_crtc, crtc_state);
12421 crtc_state->base.crtc = &intel_crtc->base;
12325 12422
12326 primary = intel_primary_plane_create(dev, pipe); 12423 primary = intel_primary_plane_create(dev, pipe);
12327 if (!primary) 12424 if (!primary)
@@ -12399,9 +12496,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
12399 struct drm_crtc *drmmode_crtc; 12496 struct drm_crtc *drmmode_crtc;
12400 struct intel_crtc *crtc; 12497 struct intel_crtc *crtc;
12401 12498
12402 if (!drm_core_check_feature(dev, DRIVER_MODESET))
12403 return -ENODEV;
12404
12405 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 12499 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
12406 12500
12407 if (!drmmode_crtc) { 12501 if (!drmmode_crtc) {
@@ -12674,52 +12768,100 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
12674 .create_handle = intel_user_framebuffer_create_handle, 12768 .create_handle = intel_user_framebuffer_create_handle,
12675}; 12769};
12676 12770
12771static
12772u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
12773 uint32_t pixel_format)
12774{
12775 u32 gen = INTEL_INFO(dev)->gen;
12776
12777 if (gen >= 9) {
12778 /* "The stride in bytes must not exceed the of the size of 8K
12779 * pixels and 32K bytes."
12780 */
12781 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
12782 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
12783 return 32*1024;
12784 } else if (gen >= 4) {
12785 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
12786 return 16*1024;
12787 else
12788 return 32*1024;
12789 } else if (gen >= 3) {
12790 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
12791 return 8*1024;
12792 else
12793 return 16*1024;
12794 } else {
12795 /* XXX DSPC is limited to 4k tiled */
12796 return 8*1024;
12797 }
12798}
12799
12677static int intel_framebuffer_init(struct drm_device *dev, 12800static int intel_framebuffer_init(struct drm_device *dev,
12678 struct intel_framebuffer *intel_fb, 12801 struct intel_framebuffer *intel_fb,
12679 struct drm_mode_fb_cmd2 *mode_cmd, 12802 struct drm_mode_fb_cmd2 *mode_cmd,
12680 struct drm_i915_gem_object *obj) 12803 struct drm_i915_gem_object *obj)
12681{ 12804{
12682 int aligned_height; 12805 int aligned_height;
12683 int pitch_limit;
12684 int ret; 12806 int ret;
12807 u32 pitch_limit, stride_alignment;
12685 12808
12686 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 12809 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
12687 12810
12688 if (obj->tiling_mode == I915_TILING_Y) { 12811 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
12689 DRM_DEBUG("hardware does not support tiling Y\n"); 12812 /* Enforce that fb modifier and tiling mode match, but only for
12690 return -EINVAL; 12813 * X-tiled. This is needed for FBC. */
12814 if (!!(obj->tiling_mode == I915_TILING_X) !=
12815 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
12816 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
12817 return -EINVAL;
12818 }
12819 } else {
12820 if (obj->tiling_mode == I915_TILING_X)
12821 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
12822 else if (obj->tiling_mode == I915_TILING_Y) {
12823 DRM_DEBUG("No Y tiling for legacy addfb\n");
12824 return -EINVAL;
12825 }
12691 } 12826 }
12692 12827
12693 if (mode_cmd->pitches[0] & 63) { 12828 /* Passed in modifier sanity checking. */
12694 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", 12829 switch (mode_cmd->modifier[0]) {
12695 mode_cmd->pitches[0]); 12830 case I915_FORMAT_MOD_Y_TILED:
12831 case I915_FORMAT_MOD_Yf_TILED:
12832 if (INTEL_INFO(dev)->gen < 9) {
12833 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
12834 mode_cmd->modifier[0]);
12835 return -EINVAL;
12836 }
12837 case DRM_FORMAT_MOD_NONE:
12838 case I915_FORMAT_MOD_X_TILED:
12839 break;
12840 default:
12841 DRM_ERROR("Unsupported fb modifier 0x%llx!\n",
12842 mode_cmd->modifier[0]);
12696 return -EINVAL; 12843 return -EINVAL;
12697 } 12844 }
12698 12845
12699 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { 12846 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
12700 pitch_limit = 32*1024; 12847 mode_cmd->pixel_format);
12701 } else if (INTEL_INFO(dev)->gen >= 4) { 12848 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
12702 if (obj->tiling_mode) 12849 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
12703 pitch_limit = 16*1024; 12850 mode_cmd->pitches[0], stride_alignment);
12704 else 12851 return -EINVAL;
12705 pitch_limit = 32*1024; 12852 }
12706 } else if (INTEL_INFO(dev)->gen >= 3) {
12707 if (obj->tiling_mode)
12708 pitch_limit = 8*1024;
12709 else
12710 pitch_limit = 16*1024;
12711 } else
12712 /* XXX DSPC is limited to 4k tiled */
12713 pitch_limit = 8*1024;
12714 12853
12854 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
12855 mode_cmd->pixel_format);
12715 if (mode_cmd->pitches[0] > pitch_limit) { 12856 if (mode_cmd->pitches[0] > pitch_limit) {
12716 DRM_DEBUG("%s pitch (%d) must be at less than %d\n", 12857 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
12717 obj->tiling_mode ? "tiled" : "linear", 12858 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
12859 "tiled" : "linear",
12718 mode_cmd->pitches[0], pitch_limit); 12860 mode_cmd->pitches[0], pitch_limit);
12719 return -EINVAL; 12861 return -EINVAL;
12720 } 12862 }
12721 12863
12722 if (obj->tiling_mode != I915_TILING_NONE && 12864 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
12723 mode_cmd->pitches[0] != obj->stride) { 12865 mode_cmd->pitches[0] != obj->stride) {
12724 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 12866 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
12725 mode_cmd->pitches[0], obj->stride); 12867 mode_cmd->pitches[0], obj->stride);
@@ -12774,7 +12916,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
12774 return -EINVAL; 12916 return -EINVAL;
12775 12917
12776 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 12918 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
12777 obj->tiling_mode); 12919 mode_cmd->pixel_format,
12920 mode_cmd->modifier[0]);
12778 /* FIXME drm helper for size checks (especially planar formats)? */ 12921 /* FIXME drm helper for size checks (especially planar formats)? */
12779 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 12922 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
12780 return -EINVAL; 12923 return -EINVAL;
@@ -12936,9 +13079,6 @@ static void intel_init_display(struct drm_device *dev)
12936 valleyview_modeset_global_resources; 13079 valleyview_modeset_global_resources;
12937 } 13080 }
12938 13081
12939 /* Default just returns -ENODEV to indicate unsupported */
12940 dev_priv->display.queue_flip = intel_default_queue_flip;
12941
12942 switch (INTEL_INFO(dev)->gen) { 13082 switch (INTEL_INFO(dev)->gen) {
12943 case 2: 13083 case 2:
12944 dev_priv->display.queue_flip = intel_gen2_queue_flip; 13084 dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -12961,8 +13101,10 @@ static void intel_init_display(struct drm_device *dev)
12961 dev_priv->display.queue_flip = intel_gen7_queue_flip; 13101 dev_priv->display.queue_flip = intel_gen7_queue_flip;
12962 break; 13102 break;
12963 case 9: 13103 case 9:
12964 dev_priv->display.queue_flip = intel_gen9_queue_flip; 13104 /* Drop through - unsupported since execlist only. */
12965 break; 13105 default:
13106 /* Default just returns -ENODEV to indicate unsupported */
13107 dev_priv->display.queue_flip = intel_default_queue_flip;
12966 } 13108 }
12967 13109
12968 intel_panel_init_backlight_funcs(dev); 13110 intel_panel_init_backlight_funcs(dev);
@@ -13181,6 +13323,8 @@ void intel_modeset_init(struct drm_device *dev)
13181 dev->mode_config.preferred_depth = 24; 13323 dev->mode_config.preferred_depth = 24;
13182 dev->mode_config.prefer_shadow = 1; 13324 dev->mode_config.prefer_shadow = 1;
13183 13325
13326 dev->mode_config.allow_fb_modifiers = true;
13327
13184 dev->mode_config.funcs = &intel_mode_funcs; 13328 dev->mode_config.funcs = &intel_mode_funcs;
13185 13329
13186 intel_init_quirks(dev); 13330 intel_init_quirks(dev);
@@ -13326,11 +13470,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13326 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 13470 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
13327 13471
13328 /* restore vblank interrupts to correct state */ 13472 /* restore vblank interrupts to correct state */
13473 drm_crtc_vblank_reset(&crtc->base);
13329 if (crtc->active) { 13474 if (crtc->active) {
13330 update_scanline_offset(crtc); 13475 update_scanline_offset(crtc);
13331 drm_vblank_on(dev, crtc->pipe); 13476 drm_crtc_vblank_on(&crtc->base);
13332 } else 13477 }
13333 drm_vblank_off(dev, crtc->pipe);
13334 13478
13335 /* We need to sanitize the plane -> pipe mapping first because this will 13479 /* We need to sanitize the plane -> pipe mapping first because this will
13336 * disable the crtc (and hence change the state) if it is wrong. Note 13480 * disable the crtc (and hence change the state) if it is wrong. Note
@@ -13370,6 +13514,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13370 } 13514 }
13371 13515
13372 WARN_ON(crtc->active); 13516 WARN_ON(crtc->active);
13517 crtc->base.state->enable = false;
13373 crtc->base.enabled = false; 13518 crtc->base.enabled = false;
13374 } 13519 }
13375 13520
@@ -13386,7 +13531,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13386 * have active connectors/encoders. */ 13531 * have active connectors/encoders. */
13387 intel_crtc_update_dpms(&crtc->base); 13532 intel_crtc_update_dpms(&crtc->base);
13388 13533
13389 if (crtc->active != crtc->base.enabled) { 13534 if (crtc->active != crtc->base.state->enable) {
13390 struct intel_encoder *encoder; 13535 struct intel_encoder *encoder;
13391 13536
13392 /* This can happen either due to bugs in the get_hw_state 13537 /* This can happen either due to bugs in the get_hw_state
@@ -13394,9 +13539,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13394 * pipe A quirk. */ 13539 * pipe A quirk. */
13395 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 13540 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
13396 crtc->base.base.id, 13541 crtc->base.base.id,
13397 crtc->base.enabled ? "enabled" : "disabled", 13542 crtc->base.state->enable ? "enabled" : "disabled",
13398 crtc->active ? "enabled" : "disabled"); 13543 crtc->active ? "enabled" : "disabled");
13399 13544
13545 crtc->base.state->enable = crtc->active;
13400 crtc->base.enabled = crtc->active; 13546 crtc->base.enabled = crtc->active;
13401 13547
13402 /* Because we only establish the connector -> encoder -> 13548 /* Because we only establish the connector -> encoder ->
@@ -13533,6 +13679,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
13533 crtc->active = dev_priv->display.get_pipe_config(crtc, 13679 crtc->active = dev_priv->display.get_pipe_config(crtc,
13534 crtc->config); 13680 crtc->config);
13535 13681
13682 crtc->base.state->enable = crtc->active;
13536 crtc->base.enabled = crtc->active; 13683 crtc->base.enabled = crtc->active;
13537 crtc->primary_enabled = primary_get_hw_state(crtc); 13684 crtc->primary_enabled = primary_get_hw_state(crtc);
13538 13685
@@ -13718,6 +13865,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
13718 to_intel_crtc(c)->pipe); 13865 to_intel_crtc(c)->pipe);
13719 drm_framebuffer_unreference(c->primary->fb); 13866 drm_framebuffer_unreference(c->primary->fb);
13720 c->primary->fb = NULL; 13867 c->primary->fb = NULL;
13868 update_state_fb(c->primary);
13721 } 13869 }
13722 } 13870 }
13723 mutex_unlock(&dev->struct_mutex); 13871 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a74aaf9242b9..d1141d37e205 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -240,7 +240,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
240 return v; 240 return v;
241} 241}
242 242
243void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 243static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
244{ 244{
245 int i; 245 int i;
246 if (dst_bytes > 4) 246 if (dst_bytes > 4)
@@ -2691,11 +2691,14 @@ static uint8_t
2691intel_dp_voltage_max(struct intel_dp *intel_dp) 2691intel_dp_voltage_max(struct intel_dp *intel_dp)
2692{ 2692{
2693 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2694 struct drm_i915_private *dev_priv = dev->dev_private;
2694 enum port port = dp_to_dig_port(intel_dp)->port; 2695 enum port port = dp_to_dig_port(intel_dp)->port;
2695 2696
2696 if (INTEL_INFO(dev)->gen >= 9) 2697 if (INTEL_INFO(dev)->gen >= 9) {
2698 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2699 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2697 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2700 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2698 else if (IS_VALLEYVIEW(dev)) 2701 } else if (IS_VALLEYVIEW(dev))
2699 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 2702 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2700 else if (IS_GEN7(dev) && port == PORT_A) 2703 else if (IS_GEN7(dev) && port == PORT_A)
2701 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2704 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2719,6 +2722,8 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2719 return DP_TRAIN_PRE_EMPH_LEVEL_2; 2722 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2720 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 2723 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2721 return DP_TRAIN_PRE_EMPH_LEVEL_1; 2724 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2725 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2726 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2722 default: 2727 default:
2723 return DP_TRAIN_PRE_EMPH_LEVEL_0; 2728 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2724 } 2729 }
@@ -3201,6 +3206,9 @@ intel_hsw_signal_levels(uint8_t train_set)
3201 return DDI_BUF_TRANS_SELECT(7); 3206 return DDI_BUF_TRANS_SELECT(7);
3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3203 return DDI_BUF_TRANS_SELECT(8); 3208 return DDI_BUF_TRANS_SELECT(8);
3209
3210 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3211 return DDI_BUF_TRANS_SELECT(9);
3204 default: 3212 default:
3205 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3213 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3206 "0x%x\n", signal_levels); 3214 "0x%x\n", signal_levels);
@@ -3803,7 +3811,7 @@ go_again:
3803 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3811 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3804 * 4. Check link status on receipt of hot-plug interrupt 3812 * 4. Check link status on receipt of hot-plug interrupt
3805 */ 3813 */
3806void 3814static void
3807intel_dp_check_link_status(struct intel_dp *intel_dp) 3815intel_dp_check_link_status(struct intel_dp *intel_dp)
3808{ 3816{
3809 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3817 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4736,6 +4744,18 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4736 I915_READ(pp_div_reg)); 4744 I915_READ(pp_div_reg));
4737} 4745}
4738 4746
4747/**
4748 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4749 * @dev: DRM device
4750 * @refresh_rate: RR to be programmed
4751 *
4752 * This function gets called when refresh rate (RR) has to be changed from
4753 * one frequency to another. Switches can be between high and low RR
4754 * supported by the panel or to any other RR based on media playback (in
4755 * this case, RR value needs to be passed from user space).
4756 *
4757 * The caller of this function needs to take a lock on dev_priv->drrs.
4758 */
4739static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) 4759static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4740{ 4760{
4741 struct drm_i915_private *dev_priv = dev->dev_private; 4761 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4793,14 +4813,32 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4793 return; 4813 return;
4794 } 4814 }
4795 4815
4796 if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) { 4816 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4817 switch (index) {
4818 case DRRS_HIGH_RR:
4819 intel_dp_set_m_n(intel_crtc, M1_N1);
4820 break;
4821 case DRRS_LOW_RR:
4822 intel_dp_set_m_n(intel_crtc, M2_N2);
4823 break;
4824 case DRRS_MAX_RR:
4825 default:
4826 DRM_ERROR("Unsupported refreshrate type\n");
4827 }
4828 } else if (INTEL_INFO(dev)->gen > 6) {
4797 reg = PIPECONF(intel_crtc->config->cpu_transcoder); 4829 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4798 val = I915_READ(reg); 4830 val = I915_READ(reg);
4831
4799 if (index > DRRS_HIGH_RR) { 4832 if (index > DRRS_HIGH_RR) {
4800 val |= PIPECONF_EDP_RR_MODE_SWITCH; 4833 if (IS_VALLEYVIEW(dev))
4801 intel_dp_set_m_n(intel_crtc); 4834 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4835 else
4836 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4802 } else { 4837 } else {
4803 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 4838 if (IS_VALLEYVIEW(dev))
4839 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4840 else
4841 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4804 } 4842 }
4805 I915_WRITE(reg, val); 4843 I915_WRITE(reg, val);
4806 } 4844 }
@@ -4810,6 +4848,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4810 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); 4848 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4811} 4849}
4812 4850
4851/**
4852 * intel_edp_drrs_enable - init drrs struct if supported
4853 * @intel_dp: DP struct
4854 *
4855 * Initializes frontbuffer_bits and drrs.dp
4856 */
4813void intel_edp_drrs_enable(struct intel_dp *intel_dp) 4857void intel_edp_drrs_enable(struct intel_dp *intel_dp)
4814{ 4858{
4815 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4859 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4837,6 +4881,11 @@ unlock:
4837 mutex_unlock(&dev_priv->drrs.mutex); 4881 mutex_unlock(&dev_priv->drrs.mutex);
4838} 4882}
4839 4883
4884/**
4885 * intel_edp_drrs_disable - Disable DRRS
4886 * @intel_dp: DP struct
4887 *
4888 */
4840void intel_edp_drrs_disable(struct intel_dp *intel_dp) 4889void intel_edp_drrs_disable(struct intel_dp *intel_dp)
4841{ 4890{
4842 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4891 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4896,6 +4945,17 @@ unlock:
4896 mutex_unlock(&dev_priv->drrs.mutex); 4945 mutex_unlock(&dev_priv->drrs.mutex);
4897} 4946}
4898 4947
4948/**
4949 * intel_edp_drrs_invalidate - Invalidate DRRS
4950 * @dev: DRM device
4951 * @frontbuffer_bits: frontbuffer plane tracking bits
4952 *
4953 * When there is a disturbance on screen (due to cursor movement/time
4954 * update etc), DRRS needs to be invalidated, i.e. need to switch to
4955 * high RR.
4956 *
4957 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
4958 */
4899void intel_edp_drrs_invalidate(struct drm_device *dev, 4959void intel_edp_drrs_invalidate(struct drm_device *dev,
4900 unsigned frontbuffer_bits) 4960 unsigned frontbuffer_bits)
4901{ 4961{
@@ -4923,6 +4983,17 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
4923 mutex_unlock(&dev_priv->drrs.mutex); 4983 mutex_unlock(&dev_priv->drrs.mutex);
4924} 4984}
4925 4985
4986/**
4987 * intel_edp_drrs_flush - Flush DRRS
4988 * @dev: DRM device
4989 * @frontbuffer_bits: frontbuffer plane tracking bits
4990 *
4991 * When there is no movement on screen, DRRS work can be scheduled.
4992 * This DRRS work is responsible for setting relevant registers after a
4993 * timeout of 1 second.
4994 *
4995 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
4996 */
4926void intel_edp_drrs_flush(struct drm_device *dev, 4997void intel_edp_drrs_flush(struct drm_device *dev,
4927 unsigned frontbuffer_bits) 4998 unsigned frontbuffer_bits)
4928{ 4999{
@@ -4947,6 +5018,56 @@ void intel_edp_drrs_flush(struct drm_device *dev,
4947 mutex_unlock(&dev_priv->drrs.mutex); 5018 mutex_unlock(&dev_priv->drrs.mutex);
4948} 5019}
4949 5020
5021/**
5022 * DOC: Display Refresh Rate Switching (DRRS)
5023 *
5024 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5025 * which enables swtching between low and high refresh rates,
5026 * dynamically, based on the usage scenario. This feature is applicable
5027 * for internal panels.
5028 *
5029 * Indication that the panel supports DRRS is given by the panel EDID, which
5030 * would list multiple refresh rates for one resolution.
5031 *
5032 * DRRS is of 2 types - static and seamless.
5033 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5034 * (may appear as a blink on screen) and is used in dock-undock scenario.
5035 * Seamless DRRS involves changing RR without any visual effect to the user
5036 * and can be used during normal system usage. This is done by programming
5037 * certain registers.
5038 *
5039 * Support for static/seamless DRRS may be indicated in the VBT based on
5040 * inputs from the panel spec.
5041 *
5042 * DRRS saves power by switching to low RR based on usage scenarios.
5043 *
5044 * eDP DRRS:-
5045 * The implementation is based on frontbuffer tracking implementation.
5046 * When there is a disturbance on the screen triggered by user activity or a
5047 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5048 * When there is no movement on screen, after a timeout of 1 second, a switch
5049 * to low RR is made.
5050 * For integration with frontbuffer tracking code,
5051 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5052 *
5053 * DRRS can be further extended to support other internal panels and also
5054 * the scenario of video playback wherein RR is set based on the rate
5055 * requested by userspace.
5056 */
5057
5058/**
5059 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5060 * @intel_connector: eDP connector
5061 * @fixed_mode: preferred mode of panel
5062 *
5063 * This function is called only once at driver load to initialize basic
5064 * DRRS stuff.
5065 *
5066 * Returns:
5067 * Downclock mode if panel supports it, else return NULL.
5068 * DRRS support is determined by the presence of downclock mode (apart
5069 * from VBT setting).
5070 */
4950static struct drm_display_mode * 5071static struct drm_display_mode *
4951intel_dp_drrs_init(struct intel_connector *intel_connector, 5072intel_dp_drrs_init(struct intel_connector *intel_connector,
4952 struct drm_display_mode *fixed_mode) 5073 struct drm_display_mode *fixed_mode)
@@ -4970,7 +5091,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
4970 (dev, fixed_mode, connector); 5091 (dev, fixed_mode, connector);
4971 5092
4972 if (!downclock_mode) { 5093 if (!downclock_mode) {
4973 DRM_DEBUG_KMS("DRRS not supported\n"); 5094 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4974 return NULL; 5095 return NULL;
4975 } 5096 }
4976 5097
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index eef79ccd0b7c..f4aa849b243e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -258,6 +258,7 @@ struct intel_plane_state {
258}; 258};
259 259
260struct intel_initial_plane_config { 260struct intel_initial_plane_config {
261 struct intel_framebuffer *fb;
261 unsigned int tiling; 262 unsigned int tiling;
262 int size; 263 int size;
263 u32 base; 264 u32 base;
@@ -500,6 +501,7 @@ struct intel_plane_wm_parameters {
500 uint8_t bytes_per_pixel; 501 uint8_t bytes_per_pixel;
501 bool enabled; 502 bool enabled;
502 bool scaled; 503 bool scaled;
504 u64 tiling;
503}; 505};
504 506
505struct intel_plane { 507struct intel_plane {
@@ -592,6 +594,26 @@ struct intel_hdmi {
592struct intel_dp_mst_encoder; 594struct intel_dp_mst_encoder;
593#define DP_MAX_DOWNSTREAM_PORTS 0x10 595#define DP_MAX_DOWNSTREAM_PORTS 0x10
594 596
597/*
598 * enum link_m_n_set:
599 * When platform provides two set of M_N registers for dp, we can
600 * program them and switch between them incase of DRRS.
601 * But When only one such register is provided, we have to program the
602 * required divider value on that registers itself based on the DRRS state.
603 *
604 * M1_N1 : Program dp_m_n on M1_N1 registers
605 * dp_m2_n2 on M2_N2 registers (If supported)
606 *
607 * M2_N2 : Program dp_m2_n2 on M1_N1 registers
608 * M2_N2 registers are not supported
609 */
610
611enum link_m_n_set {
612 /* Sets the m1_n1 and m2_n2 */
613 M1_N1 = 0,
614 M2_N2
615};
616
595struct intel_dp { 617struct intel_dp {
596 uint32_t output_reg; 618 uint32_t output_reg;
597 uint32_t aux_ch_ctl_reg; 619 uint32_t aux_ch_ctl_reg;
@@ -710,7 +732,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
710struct intel_unpin_work { 732struct intel_unpin_work {
711 struct work_struct work; 733 struct work_struct work;
712 struct drm_crtc *crtc; 734 struct drm_crtc *crtc;
713 struct drm_i915_gem_object *old_fb_obj; 735 struct drm_framebuffer *old_fb;
714 struct drm_i915_gem_object *pending_flip_obj; 736 struct drm_i915_gem_object *pending_flip_obj;
715 struct drm_pending_vblank_event *event; 737 struct drm_pending_vblank_event *event;
716 atomic_t pending; 738 atomic_t pending;
@@ -878,9 +900,12 @@ void intel_frontbuffer_flip(struct drm_device *dev,
878} 900}
879 901
880int intel_fb_align_height(struct drm_device *dev, int height, 902int intel_fb_align_height(struct drm_device *dev, int height,
881 unsigned int tiling); 903 uint32_t pixel_format,
904 uint64_t fb_format_modifier);
882void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); 905void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
883 906
907u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
908 uint32_t pixel_format);
884 909
885/* intel_audio.c */ 910/* intel_audio.c */
886void intel_init_audio(struct drm_device *dev); 911void intel_init_audio(struct drm_device *dev);
@@ -932,7 +957,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
932int intel_pin_and_fence_fb_obj(struct drm_plane *plane, 957int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
933 struct drm_framebuffer *fb, 958 struct drm_framebuffer *fb,
934 struct intel_engine_cs *pipelined); 959 struct intel_engine_cs *pipelined);
935void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
936struct drm_framebuffer * 960struct drm_framebuffer *
937__intel_framebuffer_create(struct drm_device *dev, 961__intel_framebuffer_create(struct drm_device *dev,
938 struct drm_mode_fb_cmd2 *mode_cmd, 962 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -942,9 +966,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe);
942void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 966void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
943void intel_check_page_flip(struct drm_device *dev, int pipe); 967void intel_check_page_flip(struct drm_device *dev, int pipe);
944int intel_prepare_plane_fb(struct drm_plane *plane, 968int intel_prepare_plane_fb(struct drm_plane *plane,
945 struct drm_framebuffer *fb); 969 struct drm_framebuffer *fb,
970 const struct drm_plane_state *new_state);
946void intel_cleanup_plane_fb(struct drm_plane *plane, 971void intel_cleanup_plane_fb(struct drm_plane *plane,
947 struct drm_framebuffer *fb); 972 struct drm_framebuffer *fb,
973 const struct drm_plane_state *old_state);
948int intel_plane_atomic_get_property(struct drm_plane *plane, 974int intel_plane_atomic_get_property(struct drm_plane *plane,
949 const struct drm_plane_state *state, 975 const struct drm_plane_state *state,
950 struct drm_property *property, 976 struct drm_property *property,
@@ -993,7 +1019,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
993void hsw_disable_pc8(struct drm_i915_private *dev_priv); 1019void hsw_disable_pc8(struct drm_i915_private *dev_priv);
994void intel_dp_get_m_n(struct intel_crtc *crtc, 1020void intel_dp_get_m_n(struct intel_crtc *crtc,
995 struct intel_crtc_state *pipe_config); 1021 struct intel_crtc_state *pipe_config);
996void intel_dp_set_m_n(struct intel_crtc *crtc); 1022void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
997int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1023int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
998void 1024void
999ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 1025ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
@@ -1017,7 +1043,6 @@ void intel_dp_complete_link_train(struct intel_dp *intel_dp);
1017void intel_dp_stop_link_train(struct intel_dp *intel_dp); 1043void intel_dp_stop_link_train(struct intel_dp *intel_dp);
1018void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 1044void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
1019void intel_dp_encoder_destroy(struct drm_encoder *encoder); 1045void intel_dp_encoder_destroy(struct drm_encoder *encoder);
1020void intel_dp_check_link_status(struct intel_dp *intel_dp);
1021int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); 1046int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
1022bool intel_dp_compute_config(struct intel_encoder *encoder, 1047bool intel_dp_compute_config(struct intel_encoder *encoder,
1023 struct intel_crtc_state *pipe_config); 1048 struct intel_crtc_state *pipe_config);
@@ -1036,13 +1061,6 @@ int intel_dp_max_link_bw(struct intel_dp *intel_dp);
1036void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 1061void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
1037void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); 1062void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
1038uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); 1063uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
1039void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
1040int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1041 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
1042 unsigned int crtc_w, unsigned int crtc_h,
1043 uint32_t src_x, uint32_t src_y,
1044 uint32_t src_w, uint32_t src_h);
1045int intel_disable_plane(struct drm_plane *plane);
1046void intel_plane_destroy(struct drm_plane *plane); 1064void intel_plane_destroy(struct drm_plane *plane);
1047void intel_edp_drrs_enable(struct intel_dp *intel_dp); 1065void intel_edp_drrs_enable(struct intel_dp *intel_dp);
1048void intel_edp_drrs_disable(struct intel_dp *intel_dp); 1066void intel_edp_drrs_disable(struct intel_dp *intel_dp);
@@ -1231,9 +1249,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
1231int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); 1249int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1232void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 1250void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1233 enum plane plane); 1251 enum plane plane);
1234int intel_plane_set_property(struct drm_plane *plane,
1235 struct drm_property *prop,
1236 uint64_t val);
1237int intel_plane_restore(struct drm_plane *plane); 1252int intel_plane_restore(struct drm_plane *plane);
1238int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1253int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1239 struct drm_file *file_priv); 1254 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 10ab68457ca8..c8c8b24e300c 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -854,7 +854,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
854 854
855 855
856 /* recovery disables */ 856 /* recovery disables */
857 I915_WRITE(MIPI_EOT_DISABLE(port), val); 857 I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
858 858
859 /* in terms of low power clock */ 859 /* in terms of low power clock */
860 I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count); 860 I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
deleted file mode 100644
index 886779030f1a..000000000000
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Jani Nikula <jani.nikula@intel.com>
24 */
25
26#ifndef _INTEL_DSI_DSI_H
27#define _INTEL_DSI_DSI_H
28
29#include <drm/drmP.h>
30#include <drm/drm_crtc.h>
31#include <video/mipi_display.h>
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include "intel_dsi.h"
35
36void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
37 enum port port);
38
39#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 624d1d92d284..618f7bdab0ba 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -78,7 +78,8 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
78 78
79 dev_priv->fbc.enabled = true; 79 dev_priv->fbc.enabled = true;
80 80
81 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; 81 /* Note: fbc.threshold == 1 for i8xx */
82 cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
82 if (fb->pitches[0] < cfb_pitch) 83 if (fb->pitches[0] < cfb_pitch)
83 cfb_pitch = fb->pitches[0]; 84 cfb_pitch = fb->pitches[0];
84 85
@@ -368,7 +369,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
368 if (work->crtc->primary->fb == work->fb) { 369 if (work->crtc->primary->fb == work->fb) {
369 dev_priv->display.enable_fbc(work->crtc); 370 dev_priv->display.enable_fbc(work->crtc);
370 371
371 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; 372 dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
372 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id; 373 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
373 dev_priv->fbc.y = work->crtc->y; 374 dev_priv->fbc.y = work->crtc->y;
374 } 375 }
@@ -459,7 +460,7 @@ void intel_fbc_disable(struct drm_device *dev)
459 return; 460 return;
460 461
461 dev_priv->display.disable_fbc(dev); 462 dev_priv->display.disable_fbc(dev);
462 dev_priv->fbc.plane = -1; 463 dev_priv->fbc.crtc = NULL;
463} 464}
464 465
465static bool set_no_fbc_reason(struct drm_i915_private *dev_priv, 466static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
@@ -472,6 +473,43 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
472 return true; 473 return true;
473} 474}
474 475
476static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
477{
478 struct drm_crtc *crtc = NULL, *tmp_crtc;
479 enum pipe pipe;
480 bool pipe_a_only = false, one_pipe_only = false;
481
482 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
483 pipe_a_only = true;
484 else if (INTEL_INFO(dev_priv)->gen <= 4)
485 one_pipe_only = true;
486
487 for_each_pipe(dev_priv, pipe) {
488 tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
489
490 if (intel_crtc_active(tmp_crtc) &&
491 to_intel_crtc(tmp_crtc)->primary_enabled) {
492 if (one_pipe_only && crtc) {
493 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
494 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
495 return NULL;
496 }
497 crtc = tmp_crtc;
498 }
499
500 if (pipe_a_only)
501 break;
502 }
503
504 if (!crtc || crtc->primary->fb == NULL) {
505 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
506 DRM_DEBUG_KMS("no output, disabling\n");
507 return NULL;
508 }
509
510 return crtc;
511}
512
475/** 513/**
476 * intel_fbc_update - enable/disable FBC as needed 514 * intel_fbc_update - enable/disable FBC as needed
477 * @dev: the drm_device 515 * @dev: the drm_device
@@ -494,22 +532,30 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
494void intel_fbc_update(struct drm_device *dev) 532void intel_fbc_update(struct drm_device *dev)
495{ 533{
496 struct drm_i915_private *dev_priv = dev->dev_private; 534 struct drm_i915_private *dev_priv = dev->dev_private;
497 struct drm_crtc *crtc = NULL, *tmp_crtc; 535 struct drm_crtc *crtc = NULL;
498 struct intel_crtc *intel_crtc; 536 struct intel_crtc *intel_crtc;
499 struct drm_framebuffer *fb; 537 struct drm_framebuffer *fb;
500 struct drm_i915_gem_object *obj; 538 struct drm_i915_gem_object *obj;
501 const struct drm_display_mode *adjusted_mode; 539 const struct drm_display_mode *adjusted_mode;
502 unsigned int max_width, max_height; 540 unsigned int max_width, max_height;
503 541
504 if (!HAS_FBC(dev)) { 542 if (!HAS_FBC(dev))
505 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
506 return; 543 return;
544
545 /* disable framebuffer compression in vGPU */
546 if (intel_vgpu_active(dev))
547 i915.enable_fbc = 0;
548
549 if (i915.enable_fbc < 0) {
550 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
551 DRM_DEBUG_KMS("disabled per chip default\n");
552 goto out_disable;
507 } 553 }
508 554
509 if (!i915.powersave) { 555 if (!i915.enable_fbc || !i915.powersave) {
510 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 556 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
511 DRM_DEBUG_KMS("fbc disabled per module param\n"); 557 DRM_DEBUG_KMS("fbc disabled per module param\n");
512 return; 558 goto out_disable;
513 } 559 }
514 560
515 /* 561 /*
@@ -521,39 +567,15 @@ void intel_fbc_update(struct drm_device *dev)
521 * - new fb is too large to fit in compressed buffer 567 * - new fb is too large to fit in compressed buffer
522 * - going to an unsupported config (interlace, pixel multiply, etc.) 568 * - going to an unsupported config (interlace, pixel multiply, etc.)
523 */ 569 */
524 for_each_crtc(dev, tmp_crtc) { 570 crtc = intel_fbc_find_crtc(dev_priv);
525 if (intel_crtc_active(tmp_crtc) && 571 if (!crtc)
526 to_intel_crtc(tmp_crtc)->primary_enabled) {
527 if (crtc) {
528 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
529 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
530 goto out_disable;
531 }
532 crtc = tmp_crtc;
533 }
534 }
535
536 if (!crtc || crtc->primary->fb == NULL) {
537 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
538 DRM_DEBUG_KMS("no output, disabling\n");
539 goto out_disable; 572 goto out_disable;
540 }
541 573
542 intel_crtc = to_intel_crtc(crtc); 574 intel_crtc = to_intel_crtc(crtc);
543 fb = crtc->primary->fb; 575 fb = crtc->primary->fb;
544 obj = intel_fb_obj(fb); 576 obj = intel_fb_obj(fb);
545 adjusted_mode = &intel_crtc->config->base.adjusted_mode; 577 adjusted_mode = &intel_crtc->config->base.adjusted_mode;
546 578
547 if (i915.enable_fbc < 0) {
548 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
549 DRM_DEBUG_KMS("disabled per chip default\n");
550 goto out_disable;
551 }
552 if (!i915.enable_fbc) {
553 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
554 DRM_DEBUG_KMS("fbc disabled per module param\n");
555 goto out_disable;
556 }
557 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || 579 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
558 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 580 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
559 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) 581 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
@@ -617,7 +639,7 @@ void intel_fbc_update(struct drm_device *dev)
617 * cannot be unpinned (and have its GTT offset and fence revoked) 639 * cannot be unpinned (and have its GTT offset and fence revoked)
618 * without first being decoupled from the scanout and FBC disabled. 640 * without first being decoupled from the scanout and FBC disabled.
619 */ 641 */
620 if (dev_priv->fbc.plane == intel_crtc->plane && 642 if (dev_priv->fbc.crtc == intel_crtc &&
621 dev_priv->fbc.fb_id == fb->base.id && 643 dev_priv->fbc.fb_id == fb->base.id &&
622 dev_priv->fbc.y == crtc->y) 644 dev_priv->fbc.y == crtc->y)
623 return; 645 return;
@@ -673,6 +695,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
673{ 695{
674 if (!HAS_FBC(dev_priv)) { 696 if (!HAS_FBC(dev_priv)) {
675 dev_priv->fbc.enabled = false; 697 dev_priv->fbc.enabled = false;
698 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
676 return; 699 return;
677 } 700 }
678 701
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 3001a8674611..234a699b8219 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -594,7 +594,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
594 594
595 cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; 595 cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
596 cur_size = intel_fb_align_height(dev, cur_size, 596 cur_size = intel_fb_align_height(dev, cur_size,
597 plane_config->tiling); 597 fb->base.pixel_format,
598 fb->base.modifier[0]);
598 cur_size *= fb->base.pitches[0]; 599 cur_size *= fb->base.pitches[0];
599 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", 600 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
600 pipe_name(intel_crtc->pipe), 601 pipe_name(intel_crtc->pipe),
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e8d3da9f3373..fcb074bd55dc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -254,8 +254,10 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
254 return lrca >> 12; 254 return lrca >> 12;
255} 255}
256 256
257static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj) 257static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
258 struct drm_i915_gem_object *ctx_obj)
258{ 259{
260 struct drm_device *dev = ring->dev;
259 uint64_t desc; 261 uint64_t desc;
260 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); 262 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
261 263
@@ -272,6 +274,13 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
272 * signalling between Command Streamers */ 274 * signalling between Command Streamers */
273 /* desc |= GEN8_CTX_FORCE_RESTORE; */ 275 /* desc |= GEN8_CTX_FORCE_RESTORE; */
274 276
277 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
278 if (IS_GEN9(dev) &&
279 INTEL_REVID(dev) <= SKL_REVID_B0 &&
280 (ring->id == BCS || ring->id == VCS ||
281 ring->id == VECS || ring->id == VCS2))
282 desc |= GEN8_CTX_FORCE_RESTORE;
283
275 return desc; 284 return desc;
276} 285}
277 286
@@ -286,13 +295,13 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
286 295
287 /* XXX: You must always write both descriptors in the order below. */ 296 /* XXX: You must always write both descriptors in the order below. */
288 if (ctx_obj1) 297 if (ctx_obj1)
289 temp = execlists_ctx_descriptor(ctx_obj1); 298 temp = execlists_ctx_descriptor(ring, ctx_obj1);
290 else 299 else
291 temp = 0; 300 temp = 0;
292 desc[1] = (u32)(temp >> 32); 301 desc[1] = (u32)(temp >> 32);
293 desc[0] = (u32)temp; 302 desc[0] = (u32)temp;
294 303
295 temp = execlists_ctx_descriptor(ctx_obj0); 304 temp = execlists_ctx_descriptor(ring, ctx_obj0);
296 desc[3] = (u32)(temp >> 32); 305 desc[3] = (u32)(temp >> 32);
297 desc[2] = (u32)temp; 306 desc[2] = (u32)temp;
298 307
@@ -612,7 +621,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
612 * @vmas: list of vmas. 621 * @vmas: list of vmas.
613 * @batch_obj: the batchbuffer to submit. 622 * @batch_obj: the batchbuffer to submit.
614 * @exec_start: batchbuffer start virtual address pointer. 623 * @exec_start: batchbuffer start virtual address pointer.
615 * @flags: translated execbuffer call flags. 624 * @dispatch_flags: translated execbuffer call flags.
616 * 625 *
617 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts 626 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
618 * away the submission details of the execbuffer ioctl call. 627 * away the submission details of the execbuffer ioctl call.
@@ -625,7 +634,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
625 struct drm_i915_gem_execbuffer2 *args, 634 struct drm_i915_gem_execbuffer2 *args,
626 struct list_head *vmas, 635 struct list_head *vmas,
627 struct drm_i915_gem_object *batch_obj, 636 struct drm_i915_gem_object *batch_obj,
628 u64 exec_start, u32 flags) 637 u64 exec_start, u32 dispatch_flags)
629{ 638{
630 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = dev->dev_private;
631 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; 640 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@@ -698,10 +707,12 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
698 dev_priv->relative_constants_mode = instp_mode; 707 dev_priv->relative_constants_mode = instp_mode;
699 } 708 }
700 709
701 ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags); 710 ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
702 if (ret) 711 if (ret)
703 return ret; 712 return ret;
704 713
714 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
715
705 i915_gem_execbuffer_move_to_active(vmas, ring); 716 i915_gem_execbuffer_move_to_active(vmas, ring);
706 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 717 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
707 718
@@ -776,7 +787,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
776 return 0; 787 return 0;
777} 788}
778 789
779/** 790/*
780 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload 791 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
781 * @ringbuf: Logical Ringbuffer to advance. 792 * @ringbuf: Logical Ringbuffer to advance.
782 * 793 *
@@ -785,9 +796,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
785 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that 796 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
786 * point, the tail *inside* the context is updated and the ELSP written to. 797 * point, the tail *inside* the context is updated and the ELSP written to.
787 */ 798 */
788void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf, 799static void
789 struct intel_context *ctx, 800intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
790 struct drm_i915_gem_request *request) 801 struct intel_context *ctx,
802 struct drm_i915_gem_request *request)
791{ 803{
792 struct intel_engine_cs *ring = ringbuf->ring; 804 struct intel_engine_cs *ring = ringbuf->ring;
793 805
@@ -876,12 +888,9 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
876 return ret; 888 return ret;
877 } 889 }
878 890
879 /* Hold a reference to the context this request belongs to
880 * (we will need it when the time comes to emit/retire the
881 * request).
882 */
883 request->ctx = ctx; 891 request->ctx = ctx;
884 i915_gem_context_reference(request->ctx); 892 i915_gem_context_reference(request->ctx);
893 request->ringbuf = ctx->engine[ring->id].ringbuf;
885 894
886 ring->outstanding_lazy_request = request; 895 ring->outstanding_lazy_request = request;
887 return 0; 896 return 0;
@@ -1140,11 +1149,22 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
1140 return init_workarounds_ring(ring); 1149 return init_workarounds_ring(ring);
1141} 1150}
1142 1151
1152static int gen9_init_render_ring(struct intel_engine_cs *ring)
1153{
1154 int ret;
1155
1156 ret = gen8_init_common_ring(ring);
1157 if (ret)
1158 return ret;
1159
1160 return init_workarounds_ring(ring);
1161}
1162
1143static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, 1163static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
1144 struct intel_context *ctx, 1164 struct intel_context *ctx,
1145 u64 offset, unsigned flags) 1165 u64 offset, unsigned dispatch_flags)
1146{ 1166{
1147 bool ppgtt = !(flags & I915_DISPATCH_SECURE); 1167 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1148 int ret; 1168 int ret;
1149 1169
1150 ret = intel_logical_ring_begin(ringbuf, ctx, 4); 1170 ret = intel_logical_ring_begin(ringbuf, ctx, 4);
@@ -1316,6 +1336,39 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
1316 return 0; 1336 return 0;
1317} 1337}
1318 1338
1339static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
1340 struct intel_context *ctx)
1341{
1342 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
1343 struct render_state so;
1344 struct drm_i915_file_private *file_priv = ctx->file_priv;
1345 struct drm_file *file = file_priv ? file_priv->file : NULL;
1346 int ret;
1347
1348 ret = i915_gem_render_state_prepare(ring, &so);
1349 if (ret)
1350 return ret;
1351
1352 if (so.rodata == NULL)
1353 return 0;
1354
1355 ret = ring->emit_bb_start(ringbuf,
1356 ctx,
1357 so.ggtt_offset,
1358 I915_DISPATCH_SECURE);
1359 if (ret)
1360 goto out;
1361
1362 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
1363
1364 ret = __i915_add_request(ring, file, so.obj);
1365 /* intel_logical_ring_add_request moves object to inactive if it
1366 * fails */
1367out:
1368 i915_gem_render_state_fini(&so);
1369 return ret;
1370}
1371
1319static int gen8_init_rcs_context(struct intel_engine_cs *ring, 1372static int gen8_init_rcs_context(struct intel_engine_cs *ring,
1320 struct intel_context *ctx) 1373 struct intel_context *ctx)
1321{ 1374{
@@ -1399,7 +1452,10 @@ static int logical_render_ring_init(struct drm_device *dev)
1399 if (HAS_L3_DPF(dev)) 1452 if (HAS_L3_DPF(dev))
1400 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1453 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1401 1454
1402 ring->init_hw = gen8_init_render_ring; 1455 if (INTEL_INFO(dev)->gen >= 9)
1456 ring->init_hw = gen9_init_render_ring;
1457 else
1458 ring->init_hw = gen8_init_render_ring;
1403 ring->init_context = gen8_init_rcs_context; 1459 ring->init_context = gen8_init_rcs_context;
1404 ring->cleanup = intel_fini_pipe_control; 1460 ring->cleanup = intel_fini_pipe_control;
1405 ring->get_seqno = gen8_get_seqno; 1461 ring->get_seqno = gen8_get_seqno;
@@ -1581,37 +1637,47 @@ cleanup_render_ring:
1581 return ret; 1637 return ret;
1582} 1638}
1583 1639
1584int intel_lr_context_render_state_init(struct intel_engine_cs *ring, 1640static u32
1585 struct intel_context *ctx) 1641make_rpcs(struct drm_device *dev)
1586{ 1642{
1587 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; 1643 u32 rpcs = 0;
1588 struct render_state so;
1589 struct drm_i915_file_private *file_priv = ctx->file_priv;
1590 struct drm_file *file = file_priv ? file_priv->file : NULL;
1591 int ret;
1592
1593 ret = i915_gem_render_state_prepare(ring, &so);
1594 if (ret)
1595 return ret;
1596 1644
1597 if (so.rodata == NULL) 1645 /*
1646 * No explicit RPCS request is needed to ensure full
1647 * slice/subslice/EU enablement prior to Gen9.
1648 */
1649 if (INTEL_INFO(dev)->gen < 9)
1598 return 0; 1650 return 0;
1599 1651
1600 ret = ring->emit_bb_start(ringbuf, 1652 /*
1601 ctx, 1653 * Starting in Gen9, render power gating can leave
1602 so.ggtt_offset, 1654 * slice/subslice/EU in a partially enabled state. We
1603 I915_DISPATCH_SECURE); 1655 * must make an explicit request through RPCS for full
1604 if (ret) 1656 * enablement.
1605 goto out; 1657 */
1658 if (INTEL_INFO(dev)->has_slice_pg) {
1659 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
1660 rpcs |= INTEL_INFO(dev)->slice_total <<
1661 GEN8_RPCS_S_CNT_SHIFT;
1662 rpcs |= GEN8_RPCS_ENABLE;
1663 }
1606 1664
1607 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); 1665 if (INTEL_INFO(dev)->has_subslice_pg) {
1666 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
1667 rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
1668 GEN8_RPCS_SS_CNT_SHIFT;
1669 rpcs |= GEN8_RPCS_ENABLE;
1670 }
1608 1671
1609 ret = __i915_add_request(ring, file, so.obj); 1672 if (INTEL_INFO(dev)->has_eu_pg) {
1610 /* intel_logical_ring_add_request moves object to inactive if it 1673 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
1611 * fails */ 1674 GEN8_RPCS_EU_MIN_SHIFT;
1612out: 1675 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
1613 i915_gem_render_state_fini(&so); 1676 GEN8_RPCS_EU_MAX_SHIFT;
1614 return ret; 1677 rpcs |= GEN8_RPCS_ENABLE;
1678 }
1679
1680 return rpcs;
1615} 1681}
1616 1682
1617static int 1683static int
@@ -1659,7 +1725,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1659 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED; 1725 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
1660 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); 1726 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
1661 reg_state[CTX_CONTEXT_CONTROL+1] = 1727 reg_state[CTX_CONTEXT_CONTROL+1] =
1662 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT); 1728 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1729 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
1663 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); 1730 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
1664 reg_state[CTX_RING_HEAD+1] = 0; 1731 reg_state[CTX_RING_HEAD+1] = 0;
1665 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); 1732 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1706,18 +1773,18 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1706 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); 1773 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1707 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); 1774 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1708 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); 1775 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1709 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]); 1776 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
1710 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]); 1777 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
1711 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]); 1778 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
1712 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]); 1779 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
1713 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]); 1780 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
1714 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]); 1781 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
1715 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]); 1782 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
1716 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]); 1783 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
1717 if (ring->id == RCS) { 1784 if (ring->id == RCS) {
1718 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 1785 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1719 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8; 1786 reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
1720 reg_state[CTX_R_PWR_CLK_STATE+1] = 0; 1787 reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
1721 } 1788 }
1722 1789
1723 kunmap_atomic(reg_state); 1790 kunmap_atomic(reg_state);
@@ -1925,3 +1992,38 @@ error_unpin_ctx:
1925 drm_gem_object_unreference(&ctx_obj->base); 1992 drm_gem_object_unreference(&ctx_obj->base);
1926 return ret; 1993 return ret;
1927} 1994}
1995
1996void intel_lr_context_reset(struct drm_device *dev,
1997 struct intel_context *ctx)
1998{
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2000 struct intel_engine_cs *ring;
2001 int i;
2002
2003 for_each_ring(ring, dev_priv, i) {
2004 struct drm_i915_gem_object *ctx_obj =
2005 ctx->engine[ring->id].state;
2006 struct intel_ringbuffer *ringbuf =
2007 ctx->engine[ring->id].ringbuf;
2008 uint32_t *reg_state;
2009 struct page *page;
2010
2011 if (!ctx_obj)
2012 continue;
2013
2014 if (i915_gem_object_get_pages(ctx_obj)) {
2015 WARN(1, "Failed get_pages for context obj\n");
2016 continue;
2017 }
2018 page = i915_gem_object_get_page(ctx_obj, 1);
2019 reg_state = kmap_atomic(page);
2020
2021 reg_state[CTX_RING_HEAD+1] = 0;
2022 reg_state[CTX_RING_TAIL+1] = 0;
2023
2024 kunmap_atomic(reg_state);
2025
2026 ringbuf->head = 0;
2027 ringbuf->tail = 0;
2028 }
2029}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 6f2d7da594f6..adb731e49c57 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -30,6 +30,8 @@
30#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 30#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
31#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) 31#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
32#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) 32#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
33#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
34#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
33#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) 35#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
34#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) 36#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
35 37
@@ -40,10 +42,6 @@ int intel_logical_rings_init(struct drm_device *dev);
40 42
41int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf, 43int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
42 struct intel_context *ctx); 44 struct intel_context *ctx);
43void intel_logical_ring_advance_and_submit(
44 struct intel_ringbuffer *ringbuf,
45 struct intel_context *ctx,
46 struct drm_i915_gem_request *request);
47/** 45/**
48 * intel_logical_ring_advance() - advance the ringbuffer tail 46 * intel_logical_ring_advance() - advance the ringbuffer tail
49 * @ringbuf: Ringbuffer to advance. 47 * @ringbuf: Ringbuffer to advance.
@@ -70,13 +68,13 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
70 int num_dwords); 68 int num_dwords);
71 69
72/* Logical Ring Contexts */ 70/* Logical Ring Contexts */
73int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
74 struct intel_context *ctx);
75void intel_lr_context_free(struct intel_context *ctx); 71void intel_lr_context_free(struct intel_context *ctx);
76int intel_lr_context_deferred_create(struct intel_context *ctx, 72int intel_lr_context_deferred_create(struct intel_context *ctx,
77 struct intel_engine_cs *ring); 73 struct intel_engine_cs *ring);
78void intel_lr_context_unpin(struct intel_engine_cs *ring, 74void intel_lr_context_unpin(struct intel_engine_cs *ring,
79 struct intel_context *ctx); 75 struct intel_context *ctx);
76void intel_lr_context_reset(struct drm_device *dev,
77 struct intel_context *ctx);
80 78
81/* Execlists */ 79/* Execlists */
82int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 80int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -86,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
86 struct drm_i915_gem_execbuffer2 *args, 84 struct drm_i915_gem_execbuffer2 *args,
87 struct list_head *vmas, 85 struct list_head *vmas,
88 struct drm_i915_gem_object *batch_obj, 86 struct drm_i915_gem_object *batch_obj,
89 u64 exec_start, u32 flags); 87 u64 exec_start, u32 dispatch_flags);
90u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); 88u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
91 89
92void intel_lrc_irq_handler(struct intel_engine_cs *ring); 90void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 071b96d6e146..24e8730dc189 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -509,7 +509,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
509 intel_connector->panel.fitting_mode = value; 509 intel_connector->panel.fitting_mode = value;
510 510
511 crtc = intel_attached_encoder(connector)->base.crtc; 511 crtc = intel_attached_encoder(connector)->base.crtc;
512 if (crtc && crtc->enabled) { 512 if (crtc && crtc->state->enable) {
513 /* 513 /*
514 * If the CRTC is enabled, the display will be changed 514 * If the CRTC is enabled, the display will be changed
515 * according to the new panel fitting mode. 515 * according to the new panel fitting mode.
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d8de1d5140a7..71e87abdcae7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -744,10 +744,8 @@ void intel_opregion_init(struct drm_device *dev)
744 return; 744 return;
745 745
746 if (opregion->acpi) { 746 if (opregion->acpi) {
747 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 747 intel_didl_outputs(dev);
748 intel_didl_outputs(dev); 748 intel_setup_cadls(dev);
749 intel_setup_cadls(dev);
750 }
751 749
752 /* Notify BIOS we are ready to handle ACPI video ext notifs. 750 /* Notify BIOS we are ready to handle ACPI video ext notifs.
753 * Right now, all the events are handled by the ACPI video module. 751 * Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index f93dfc174495..823d1d97a000 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1065,7 +1065,6 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1065 struct put_image_params *params; 1065 struct put_image_params *params;
1066 int ret; 1066 int ret;
1067 1067
1068 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1069 overlay = dev_priv->overlay; 1068 overlay = dev_priv->overlay;
1070 if (!overlay) { 1069 if (!overlay) {
1071 DRM_DEBUG("userspace bug: no overlay\n"); 1070 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1261,7 +1260,6 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1261 struct overlay_registers __iomem *regs; 1260 struct overlay_registers __iomem *regs;
1262 int ret; 1261 int ret;
1263 1262
1264 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1265 overlay = dev_priv->overlay; 1263 overlay = dev_priv->overlay;
1266 if (!overlay) { 1264 if (!overlay) {
1267 DRM_DEBUG("userspace bug: no overlay\n"); 1265 DRM_DEBUG("userspace bug: no overlay\n");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 24d77ddcc5f4..542cf6844dc3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -56,24 +56,42 @@ static void gen9_init_clock_gating(struct drm_device *dev)
56{ 56{
57 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
58 58
59 /* 59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 * WaDisableSDEUnitClockGating:skl 60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 * This seems to be a pre-production w/a. 61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
62 */ 62}
63 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
64 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
65 63
66 /* 64static void skl_init_clock_gating(struct drm_device *dev)
67 * WaDisableDgMirrorFixInHalfSliceChicken5:skl 65{
68 * This is a pre-production w/a. 66 struct drm_i915_private *dev_priv = dev->dev_private;
69 */
70 I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
71 I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
72 ~GEN9_DG_MIRROR_FIX_ENABLE);
73 67
74 /* Wa4x4STCOptimizationDisable:skl */ 68 gen9_init_clock_gating(dev);
75 I915_WRITE(CACHE_MODE_1, 69
76 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); 70 if (INTEL_REVID(dev) == SKL_REVID_A0) {
71 /*
72 * WaDisableSDEUnitClockGating:skl
73 * WaSetGAPSunitClckGateDisable:skl
74 */
75 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
76 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
77 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
78 }
79
80 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
81 /* WaDisableHDCInvalidation:skl */
82 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
83 BDW_DISABLE_HDC_INVALIDATION);
84
85 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
86 I915_WRITE(FF_SLICE_CS_CHICKEN2,
87 I915_READ(FF_SLICE_CS_CHICKEN2) |
88 GEN9_TSG_BARRIER_ACK_DISABLE);
89 }
90
91 if (INTEL_REVID(dev) <= SKL_REVID_E0)
92 /* WaDisableLSQCROPERFforOCL:skl */
93 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
94 GEN8_LQSC_RO_PERF_DIS);
77} 95}
78 96
79static void i915_pineview_get_mem_freq(struct drm_device *dev) 97static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -1711,6 +1729,8 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
1711 GEN9_MEM_LATENCY_LEVEL_MASK; 1729 GEN9_MEM_LATENCY_LEVEL_MASK;
1712 1730
1713 /* 1731 /*
1732 * WaWmMemoryReadLatency:skl
1733 *
1714 * punit doesn't take into account the read latency so we need 1734 * punit doesn't take into account the read latency so we need
1715 * to add 2us to the various latency levels we retrieve from 1735 * to add 2us to the various latency levels we retrieve from
1716 * the punit. 1736 * the punit.
@@ -2502,6 +2522,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2502 enum pipe pipe = intel_crtc->pipe; 2522 enum pipe pipe = intel_crtc->pipe;
2503 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 2523 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2504 uint16_t alloc_size, start, cursor_blocks; 2524 uint16_t alloc_size, start, cursor_blocks;
2525 uint16_t minimum[I915_MAX_PLANES];
2505 unsigned int total_data_rate; 2526 unsigned int total_data_rate;
2506 int plane; 2527 int plane;
2507 2528
@@ -2520,9 +2541,21 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2520 alloc_size -= cursor_blocks; 2541 alloc_size -= cursor_blocks;
2521 alloc->end -= cursor_blocks; 2542 alloc->end -= cursor_blocks;
2522 2543
2544 /* 1. Allocate the mininum required blocks for each active plane */
2545 for_each_plane(pipe, plane) {
2546 const struct intel_plane_wm_parameters *p;
2547
2548 p = &params->plane[plane];
2549 if (!p->enabled)
2550 continue;
2551
2552 minimum[plane] = 8;
2553 alloc_size -= minimum[plane];
2554 }
2555
2523 /* 2556 /*
2524 * Each active plane get a portion of the remaining space, in 2557 * 2. Distribute the remaining space in proportion to the amount of
2525 * proportion to the amount of data they need to fetch from memory. 2558 * data each plane needs to fetch from memory.
2526 * 2559 *
2527 * FIXME: we may not allocate every single block here. 2560 * FIXME: we may not allocate every single block here.
2528 */ 2561 */
@@ -2544,8 +2577,9 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2544 * promote the expression to 64 bits to avoid overflowing, the 2577 * promote the expression to 64 bits to avoid overflowing, the
2545 * result is < available as data_rate / total_data_rate < 1 2578 * result is < available as data_rate / total_data_rate < 1
2546 */ 2579 */
2547 plane_blocks = div_u64((uint64_t)alloc_size * data_rate, 2580 plane_blocks = minimum[plane];
2548 total_data_rate); 2581 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
2582 total_data_rate);
2549 2583
2550 ddb->plane[pipe][plane].start = start; 2584 ddb->plane[pipe][plane].start = start;
2551 ddb->plane[pipe][plane].end = start + plane_blocks; 2585 ddb->plane[pipe][plane].end = start + plane_blocks;
@@ -2575,7 +2609,7 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2575 if (latency == 0) 2609 if (latency == 0)
2576 return UINT_MAX; 2610 return UINT_MAX;
2577 2611
2578 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel; 2612 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
2579 ret = DIV_ROUND_UP(wm_intermediate_val, 1000); 2613 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
2580 2614
2581 return ret; 2615 return ret;
@@ -2583,17 +2617,29 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2583 2617
2584static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 2618static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2585 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 2619 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2586 uint32_t latency) 2620 uint64_t tiling, uint32_t latency)
2587{ 2621{
2588 uint32_t ret, plane_bytes_per_line, wm_intermediate_val; 2622 uint32_t ret;
2623 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2624 uint32_t wm_intermediate_val;
2589 2625
2590 if (latency == 0) 2626 if (latency == 0)
2591 return UINT_MAX; 2627 return UINT_MAX;
2592 2628
2593 plane_bytes_per_line = horiz_pixels * bytes_per_pixel; 2629 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
2630
2631 if (tiling == I915_FORMAT_MOD_Y_TILED ||
2632 tiling == I915_FORMAT_MOD_Yf_TILED) {
2633 plane_bytes_per_line *= 4;
2634 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2635 plane_blocks_per_line /= 4;
2636 } else {
2637 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2638 }
2639
2594 wm_intermediate_val = latency * pixel_rate; 2640 wm_intermediate_val = latency * pixel_rate;
2595 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * 2641 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
2596 plane_bytes_per_line; 2642 plane_blocks_per_line;
2597 2643
2598 return ret; 2644 return ret;
2599} 2645}
@@ -2642,6 +2688,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2642 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2643 enum pipe pipe = intel_crtc->pipe; 2689 enum pipe pipe = intel_crtc->pipe;
2644 struct drm_plane *plane; 2690 struct drm_plane *plane;
2691 struct drm_framebuffer *fb;
2645 int i = 1; /* Index for sprite planes start */ 2692 int i = 1; /* Index for sprite planes start */
2646 2693
2647 p->active = intel_crtc_active(crtc); 2694 p->active = intel_crtc_active(crtc);
@@ -2657,6 +2704,14 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2657 crtc->primary->fb->bits_per_pixel / 8; 2704 crtc->primary->fb->bits_per_pixel / 8;
2658 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; 2705 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
2659 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; 2706 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
2707 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
2708 fb = crtc->primary->state->fb;
2709 /*
2710 * Framebuffer can be NULL on plane disable, but it does not
2711 * matter for watermarks if we assume no tiling in that case.
2712 */
2713 if (fb)
2714 p->plane[0].tiling = fb->modifier[0];
2660 2715
2661 p->cursor.enabled = true; 2716 p->cursor.enabled = true;
2662 p->cursor.bytes_per_pixel = 4; 2717 p->cursor.bytes_per_pixel = 4;
@@ -2673,41 +2728,60 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2673 } 2728 }
2674} 2729}
2675 2730
2676static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p, 2731static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2732 struct skl_pipe_wm_parameters *p,
2677 struct intel_plane_wm_parameters *p_params, 2733 struct intel_plane_wm_parameters *p_params,
2678 uint16_t ddb_allocation, 2734 uint16_t ddb_allocation,
2679 uint32_t mem_value, 2735 int level,
2680 uint16_t *out_blocks, /* out */ 2736 uint16_t *out_blocks, /* out */
2681 uint8_t *out_lines /* out */) 2737 uint8_t *out_lines /* out */)
2682{ 2738{
2683 uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines; 2739 uint32_t latency = dev_priv->wm.skl_latency[level];
2684 uint32_t result_bytes; 2740 uint32_t method1, method2;
2741 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2742 uint32_t res_blocks, res_lines;
2743 uint32_t selected_result;
2685 2744
2686 if (mem_value == 0 || !p->active || !p_params->enabled) 2745 if (latency == 0 || !p->active || !p_params->enabled)
2687 return false; 2746 return false;
2688 2747
2689 method1 = skl_wm_method1(p->pixel_rate, 2748 method1 = skl_wm_method1(p->pixel_rate,
2690 p_params->bytes_per_pixel, 2749 p_params->bytes_per_pixel,
2691 mem_value); 2750 latency);
2692 method2 = skl_wm_method2(p->pixel_rate, 2751 method2 = skl_wm_method2(p->pixel_rate,
2693 p->pipe_htotal, 2752 p->pipe_htotal,
2694 p_params->horiz_pixels, 2753 p_params->horiz_pixels,
2695 p_params->bytes_per_pixel, 2754 p_params->bytes_per_pixel,
2696 mem_value); 2755 p_params->tiling,
2756 latency);
2697 2757
2698 plane_bytes_per_line = p_params->horiz_pixels * 2758 plane_bytes_per_line = p_params->horiz_pixels *
2699 p_params->bytes_per_pixel; 2759 p_params->bytes_per_pixel;
2760 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2700 2761
2701 /* For now xtile and linear */ 2762 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2702 if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1) 2763 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
2703 result_bytes = min(method1, method2); 2764 uint32_t y_tile_minimum = plane_blocks_per_line * 4;
2704 else 2765 selected_result = max(method2, y_tile_minimum);
2705 result_bytes = method1; 2766 } else {
2767 if ((ddb_allocation / plane_blocks_per_line) >= 1)
2768 selected_result = min(method1, method2);
2769 else
2770 selected_result = method1;
2771 }
2706 2772
2707 res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1; 2773 res_blocks = selected_result + 1;
2708 res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line); 2774 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
2775
2776 if (level >= 1 && level <= 7) {
2777 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2778 p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
2779 res_lines += 4;
2780 else
2781 res_blocks++;
2782 }
2709 2783
2710 if (res_blocks > ddb_allocation || res_lines > 31) 2784 if (res_blocks >= ddb_allocation || res_lines > 31)
2711 return false; 2785 return false;
2712 2786
2713 *out_blocks = res_blocks; 2787 *out_blocks = res_blocks;
@@ -2724,23 +2798,24 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
2724 int num_planes, 2798 int num_planes,
2725 struct skl_wm_level *result) 2799 struct skl_wm_level *result)
2726{ 2800{
2727 uint16_t latency = dev_priv->wm.skl_latency[level];
2728 uint16_t ddb_blocks; 2801 uint16_t ddb_blocks;
2729 int i; 2802 int i;
2730 2803
2731 for (i = 0; i < num_planes; i++) { 2804 for (i = 0; i < num_planes; i++) {
2732 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 2805 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
2733 2806
2734 result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i], 2807 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
2808 p, &p->plane[i],
2735 ddb_blocks, 2809 ddb_blocks,
2736 latency, 2810 level,
2737 &result->plane_res_b[i], 2811 &result->plane_res_b[i],
2738 &result->plane_res_l[i]); 2812 &result->plane_res_l[i]);
2739 } 2813 }
2740 2814
2741 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); 2815 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
2742 result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks, 2816 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
2743 latency, &result->cursor_res_b, 2817 ddb_blocks, level,
2818 &result->cursor_res_b,
2744 &result->cursor_res_l); 2819 &result->cursor_res_l);
2745} 2820}
2746 2821
@@ -3133,12 +3208,20 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3133 int pixel_size, bool enabled, bool scaled) 3208 int pixel_size, bool enabled, bool scaled)
3134{ 3209{
3135 struct intel_plane *intel_plane = to_intel_plane(plane); 3210 struct intel_plane *intel_plane = to_intel_plane(plane);
3211 struct drm_framebuffer *fb = plane->state->fb;
3136 3212
3137 intel_plane->wm.enabled = enabled; 3213 intel_plane->wm.enabled = enabled;
3138 intel_plane->wm.scaled = scaled; 3214 intel_plane->wm.scaled = scaled;
3139 intel_plane->wm.horiz_pixels = sprite_width; 3215 intel_plane->wm.horiz_pixels = sprite_width;
3140 intel_plane->wm.vert_pixels = sprite_height; 3216 intel_plane->wm.vert_pixels = sprite_height;
3141 intel_plane->wm.bytes_per_pixel = pixel_size; 3217 intel_plane->wm.bytes_per_pixel = pixel_size;
3218 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
3219 /*
3220 * Framebuffer can be NULL on plane disable, but it does not
3221 * matter for watermarks if we assume no tiling in that case.
3222 */
3223 if (fb)
3224 intel_plane->wm.tiling = fb->modifier[0];
3142 3225
3143 skl_update_wm(crtc); 3226 skl_update_wm(crtc);
3144} 3227}
@@ -3750,7 +3833,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3750/* gen6_set_rps is called to update the frequency request, but should also be 3833/* gen6_set_rps is called to update the frequency request, but should also be
3751 * called when the range (min_delay and max_delay) is modified so that we can 3834 * called when the range (min_delay and max_delay) is modified so that we can
3752 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 3835 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3753void gen6_set_rps(struct drm_device *dev, u8 val) 3836static void gen6_set_rps(struct drm_device *dev, u8 val)
3754{ 3837{
3755 struct drm_i915_private *dev_priv = dev->dev_private; 3838 struct drm_i915_private *dev_priv = dev->dev_private;
3756 3839
@@ -3786,6 +3869,27 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3786 trace_intel_gpu_freq_change(val * 50); 3869 trace_intel_gpu_freq_change(val * 50);
3787} 3870}
3788 3871
3872static void valleyview_set_rps(struct drm_device *dev, u8 val)
3873{
3874 struct drm_i915_private *dev_priv = dev->dev_private;
3875
3876 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3877 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3878 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3879
3880 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3881 "Odd GPU freq value\n"))
3882 val &= ~1;
3883
3884 if (val != dev_priv->rps.cur_freq)
3885 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3886
3887 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3888
3889 dev_priv->rps.cur_freq = val;
3890 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
3891}
3892
3789/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down 3893/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3790 * 3894 *
3791 * * If Gfx is Idle, then 3895 * * If Gfx is Idle, then
@@ -3850,38 +3954,20 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3850 3954
3851void gen6_rps_boost(struct drm_i915_private *dev_priv) 3955void gen6_rps_boost(struct drm_i915_private *dev_priv)
3852{ 3956{
3853 struct drm_device *dev = dev_priv->dev;
3854
3855 mutex_lock(&dev_priv->rps.hw_lock); 3957 mutex_lock(&dev_priv->rps.hw_lock);
3856 if (dev_priv->rps.enabled) { 3958 if (dev_priv->rps.enabled) {
3857 if (IS_VALLEYVIEW(dev)) 3959 intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3858 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3859 else
3860 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3861 dev_priv->rps.last_adj = 0; 3960 dev_priv->rps.last_adj = 0;
3862 } 3961 }
3863 mutex_unlock(&dev_priv->rps.hw_lock); 3962 mutex_unlock(&dev_priv->rps.hw_lock);
3864} 3963}
3865 3964
3866void valleyview_set_rps(struct drm_device *dev, u8 val) 3965void intel_set_rps(struct drm_device *dev, u8 val)
3867{ 3966{
3868 struct drm_i915_private *dev_priv = dev->dev_private; 3967 if (IS_VALLEYVIEW(dev))
3869 3968 valleyview_set_rps(dev, val);
3870 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3969 else
3871 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3970 gen6_set_rps(dev, val);
3872 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3873
3874 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3875 "Odd GPU freq value\n"))
3876 val &= ~1;
3877
3878 if (val != dev_priv->rps.cur_freq)
3879 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3880
3881 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3882
3883 dev_priv->rps.cur_freq = val;
3884 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
3885} 3971}
3886 3972
3887static void gen9_disable_rps(struct drm_device *dev) 3973static void gen9_disable_rps(struct drm_device *dev)
@@ -5633,6 +5719,10 @@ void intel_enable_gt_powersave(struct drm_device *dev)
5633{ 5719{
5634 struct drm_i915_private *dev_priv = dev->dev_private; 5720 struct drm_i915_private *dev_priv = dev->dev_private;
5635 5721
5722 /* Powersaving is controlled by the host when inside a VM */
5723 if (intel_vgpu_active(dev))
5724 return;
5725
5636 if (IS_IRONLAKE_M(dev)) { 5726 if (IS_IRONLAKE_M(dev)) {
5637 mutex_lock(&dev->struct_mutex); 5727 mutex_lock(&dev->struct_mutex);
5638 ironlake_enable_drps(dev); 5728 ironlake_enable_drps(dev);
@@ -6396,7 +6486,8 @@ void intel_init_clock_gating(struct drm_device *dev)
6396{ 6486{
6397 struct drm_i915_private *dev_priv = dev->dev_private; 6487 struct drm_i915_private *dev_priv = dev->dev_private;
6398 6488
6399 dev_priv->display.init_clock_gating(dev); 6489 if (dev_priv->display.init_clock_gating)
6490 dev_priv->display.init_clock_gating(dev);
6400} 6491}
6401 6492
6402void intel_suspend_hw(struct drm_device *dev) 6493void intel_suspend_hw(struct drm_device *dev)
@@ -6422,7 +6513,7 @@ void intel_init_pm(struct drm_device *dev)
6422 if (INTEL_INFO(dev)->gen >= 9) { 6513 if (INTEL_INFO(dev)->gen >= 9) {
6423 skl_setup_wm_latency(dev); 6514 skl_setup_wm_latency(dev);
6424 6515
6425 dev_priv->display.init_clock_gating = gen9_init_clock_gating; 6516 dev_priv->display.init_clock_gating = skl_init_clock_gating;
6426 dev_priv->display.update_wm = skl_update_wm; 6517 dev_priv->display.update_wm = skl_update_wm;
6427 dev_priv->display.update_sprite_wm = skl_update_sprite_wm; 6518 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
6428 } else if (HAS_PCH_SPLIT(dev)) { 6519 } else if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e5b3c6dbd467..cd79c3843452 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -502,6 +502,68 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
502 I915_WRITE(HWS_PGA, addr); 502 I915_WRITE(HWS_PGA, addr);
503} 503}
504 504
505static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
506{
507 struct drm_device *dev = ring->dev;
508 struct drm_i915_private *dev_priv = ring->dev->dev_private;
509 u32 mmio = 0;
510
511 /* The ring status page addresses are no longer next to the rest of
512 * the ring registers as of gen7.
513 */
514 if (IS_GEN7(dev)) {
515 switch (ring->id) {
516 case RCS:
517 mmio = RENDER_HWS_PGA_GEN7;
518 break;
519 case BCS:
520 mmio = BLT_HWS_PGA_GEN7;
521 break;
522 /*
523 * VCS2 actually doesn't exist on Gen7. Only shut up
524 * gcc switch check warning
525 */
526 case VCS2:
527 case VCS:
528 mmio = BSD_HWS_PGA_GEN7;
529 break;
530 case VECS:
531 mmio = VEBOX_HWS_PGA_GEN7;
532 break;
533 }
534 } else if (IS_GEN6(ring->dev)) {
535 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
536 } else {
537 /* XXX: gen8 returns to sanity */
538 mmio = RING_HWS_PGA(ring->mmio_base);
539 }
540
541 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
542 POSTING_READ(mmio);
543
544 /*
545 * Flush the TLB for this page
546 *
547 * FIXME: These two bits have disappeared on gen8, so a question
548 * arises: do we still need this and if so how should we go about
549 * invalidating the TLB?
550 */
551 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
552 u32 reg = RING_INSTPM(ring->mmio_base);
553
554 /* ring should be idle before issuing a sync flush*/
555 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
556
557 I915_WRITE(reg,
558 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
559 INSTPM_SYNC_FLUSH));
560 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
561 1000))
562 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
563 ring->name);
564 }
565}
566
505static bool stop_ring(struct intel_engine_cs *ring) 567static bool stop_ring(struct intel_engine_cs *ring)
506{ 568{
507 struct drm_i915_private *dev_priv = to_i915(ring->dev); 569 struct drm_i915_private *dev_priv = to_i915(ring->dev);
@@ -788,12 +850,14 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
788 * workaround for for a possible hang in the unlikely event a TLB 850 * workaround for for a possible hang in the unlikely event a TLB
789 * invalidation occurs during a PSD flush. 851 * invalidation occurs during a PSD flush.
790 */ 852 */
791 /* WaForceEnableNonCoherent:bdw */
792 /* WaHdcDisableFetchWhenMasked:bdw */
793 /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
794 WA_SET_BIT_MASKED(HDC_CHICKEN0, 853 WA_SET_BIT_MASKED(HDC_CHICKEN0,
854 /* WaForceEnableNonCoherent:bdw */
795 HDC_FORCE_NON_COHERENT | 855 HDC_FORCE_NON_COHERENT |
856 /* WaForceContextSaveRestoreNonCoherent:bdw */
857 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
858 /* WaHdcDisableFetchWhenMasked:bdw */
796 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 859 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
860 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
797 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 861 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
798 862
799 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 863 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
@@ -870,9 +934,132 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
870 GEN6_WIZ_HASHING_MASK, 934 GEN6_WIZ_HASHING_MASK,
871 GEN6_WIZ_HASHING_16x4); 935 GEN6_WIZ_HASHING_16x4);
872 936
937 if (INTEL_REVID(dev) == SKL_REVID_C0 ||
938 INTEL_REVID(dev) == SKL_REVID_D0)
939 /* WaBarrierPerformanceFixDisable:skl */
940 WA_SET_BIT_MASKED(HDC_CHICKEN0,
941 HDC_FENCE_DEST_SLM_DISABLE |
942 HDC_BARRIER_PERFORMANCE_DISABLE);
943
944 return 0;
945}
946
947static int gen9_init_workarounds(struct intel_engine_cs *ring)
948{
949 struct drm_device *dev = ring->dev;
950 struct drm_i915_private *dev_priv = dev->dev_private;
951
952 /* WaDisablePartialInstShootdown:skl */
953 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
954 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
955
956 /* Syncing dependencies between camera and graphics */
957 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
958 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
959
960 if (INTEL_REVID(dev) == SKL_REVID_A0 ||
961 INTEL_REVID(dev) == SKL_REVID_B0) {
962 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
963 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
964 GEN9_DG_MIRROR_FIX_ENABLE);
965 }
966
967 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
968 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
969 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
970 GEN9_RHWO_OPTIMIZATION_DISABLE);
971 WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
972 DISABLE_PIXEL_MASK_CAMMING);
973 }
974
975 if (INTEL_REVID(dev) >= SKL_REVID_C0) {
976 /* WaEnableYV12BugFixInHalfSliceChicken7:skl */
977 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
978 GEN9_ENABLE_YV12_BUGFIX);
979 }
980
981 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
982 /*
983 *Use Force Non-Coherent whenever executing a 3D context. This
984 * is a workaround for a possible hang in the unlikely event
985 * a TLB invalidation occurs during a PSD flush.
986 */
987 /* WaForceEnableNonCoherent:skl */
988 WA_SET_BIT_MASKED(HDC_CHICKEN0,
989 HDC_FORCE_NON_COHERENT);
990 }
991
992 /* Wa4x4STCOptimizationDisable:skl */
993 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
994
995 /* WaDisablePartialResolveInVc:skl */
996 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
997
998 /* WaCcsTlbPrefetchDisable:skl */
999 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
1000 GEN9_CCS_TLB_PREFETCH_ENABLE);
1001
1002 return 0;
1003}
1004
1005static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
1006{
1007 struct drm_device *dev = ring->dev;
1008 struct drm_i915_private *dev_priv = dev->dev_private;
1009 u8 vals[3] = { 0, 0, 0 };
1010 unsigned int i;
1011
1012 for (i = 0; i < 3; i++) {
1013 u8 ss;
1014
1015 /*
1016 * Only consider slices where one, and only one, subslice has 7
1017 * EUs
1018 */
1019 if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
1020 continue;
1021
1022 /*
1023 * subslice_7eu[i] != 0 (because of the check above) and
1024 * ss_max == 4 (maximum number of subslices possible per slice)
1025 *
1026 * -> 0 <= ss <= 3;
1027 */
1028 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
1029 vals[i] = 3 - ss;
1030 }
1031
1032 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1033 return 0;
1034
1035 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1036 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1037 GEN9_IZ_HASHING_MASK(2) |
1038 GEN9_IZ_HASHING_MASK(1) |
1039 GEN9_IZ_HASHING_MASK(0),
1040 GEN9_IZ_HASHING(2, vals[2]) |
1041 GEN9_IZ_HASHING(1, vals[1]) |
1042 GEN9_IZ_HASHING(0, vals[0]));
1043
873 return 0; 1044 return 0;
874} 1045}
875 1046
1047
1048static int skl_init_workarounds(struct intel_engine_cs *ring)
1049{
1050 struct drm_device *dev = ring->dev;
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1052
1053 gen9_init_workarounds(ring);
1054
1055 /* WaDisablePowerCompilerClockGating:skl */
1056 if (INTEL_REVID(dev) == SKL_REVID_B0)
1057 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1058 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1059
1060 return skl_tune_iz_hashing(ring);
1061}
1062
876int init_workarounds_ring(struct intel_engine_cs *ring) 1063int init_workarounds_ring(struct intel_engine_cs *ring)
877{ 1064{
878 struct drm_device *dev = ring->dev; 1065 struct drm_device *dev = ring->dev;
@@ -888,6 +1075,11 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
888 if (IS_CHERRYVIEW(dev)) 1075 if (IS_CHERRYVIEW(dev))
889 return chv_init_workarounds(ring); 1076 return chv_init_workarounds(ring);
890 1077
1078 if (IS_SKYLAKE(dev))
1079 return skl_init_workarounds(ring);
1080 else if (IS_GEN9(dev))
1081 return gen9_init_workarounds(ring);
1082
891 return 0; 1083 return 0;
892} 1084}
893 1085
@@ -1386,68 +1578,6 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
1386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1578 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1387} 1579}
1388 1580
1389void intel_ring_setup_status_page(struct intel_engine_cs *ring)
1390{
1391 struct drm_device *dev = ring->dev;
1392 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1393 u32 mmio = 0;
1394
1395 /* The ring status page addresses are no longer next to the rest of
1396 * the ring registers as of gen7.
1397 */
1398 if (IS_GEN7(dev)) {
1399 switch (ring->id) {
1400 case RCS:
1401 mmio = RENDER_HWS_PGA_GEN7;
1402 break;
1403 case BCS:
1404 mmio = BLT_HWS_PGA_GEN7;
1405 break;
1406 /*
1407 * VCS2 actually doesn't exist on Gen7. Only shut up
1408 * gcc switch check warning
1409 */
1410 case VCS2:
1411 case VCS:
1412 mmio = BSD_HWS_PGA_GEN7;
1413 break;
1414 case VECS:
1415 mmio = VEBOX_HWS_PGA_GEN7;
1416 break;
1417 }
1418 } else if (IS_GEN6(ring->dev)) {
1419 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
1420 } else {
1421 /* XXX: gen8 returns to sanity */
1422 mmio = RING_HWS_PGA(ring->mmio_base);
1423 }
1424
1425 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
1426 POSTING_READ(mmio);
1427
1428 /*
1429 * Flush the TLB for this page
1430 *
1431 * FIXME: These two bits have disappeared on gen8, so a question
1432 * arises: do we still need this and if so how should we go about
1433 * invalidating the TLB?
1434 */
1435 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
1436 u32 reg = RING_INSTPM(ring->mmio_base);
1437
1438 /* ring should be idle before issuing a sync flush*/
1439 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1440
1441 I915_WRITE(reg,
1442 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1443 INSTPM_SYNC_FLUSH));
1444 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1445 1000))
1446 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
1447 ring->name);
1448 }
1449}
1450
1451static int 1581static int
1452bsd_ring_flush(struct intel_engine_cs *ring, 1582bsd_ring_flush(struct intel_engine_cs *ring,
1453 u32 invalidate_domains, 1583 u32 invalidate_domains,
@@ -1611,7 +1741,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
1611static int 1741static int
1612i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1742i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1613 u64 offset, u32 length, 1743 u64 offset, u32 length,
1614 unsigned flags) 1744 unsigned dispatch_flags)
1615{ 1745{
1616 int ret; 1746 int ret;
1617 1747
@@ -1622,7 +1752,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1622 intel_ring_emit(ring, 1752 intel_ring_emit(ring,
1623 MI_BATCH_BUFFER_START | 1753 MI_BATCH_BUFFER_START |
1624 MI_BATCH_GTT | 1754 MI_BATCH_GTT |
1625 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1755 (dispatch_flags & I915_DISPATCH_SECURE ?
1756 0 : MI_BATCH_NON_SECURE_I965));
1626 intel_ring_emit(ring, offset); 1757 intel_ring_emit(ring, offset);
1627 intel_ring_advance(ring); 1758 intel_ring_advance(ring);
1628 1759
@@ -1635,8 +1766,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1635#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1766#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1636static int 1767static int
1637i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1768i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1638 u64 offset, u32 len, 1769 u64 offset, u32 len,
1639 unsigned flags) 1770 unsigned dispatch_flags)
1640{ 1771{
1641 u32 cs_offset = ring->scratch.gtt_offset; 1772 u32 cs_offset = ring->scratch.gtt_offset;
1642 int ret; 1773 int ret;
@@ -1654,7 +1785,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1654 intel_ring_emit(ring, MI_NOOP); 1785 intel_ring_emit(ring, MI_NOOP);
1655 intel_ring_advance(ring); 1786 intel_ring_advance(ring);
1656 1787
1657 if ((flags & I915_DISPATCH_PINNED) == 0) { 1788 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1658 if (len > I830_BATCH_LIMIT) 1789 if (len > I830_BATCH_LIMIT)
1659 return -ENOSPC; 1790 return -ENOSPC;
1660 1791
@@ -1686,7 +1817,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1686 return ret; 1817 return ret;
1687 1818
1688 intel_ring_emit(ring, MI_BATCH_BUFFER); 1819 intel_ring_emit(ring, MI_BATCH_BUFFER);
1689 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1820 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1821 0 : MI_BATCH_NON_SECURE));
1690 intel_ring_emit(ring, offset + len - 8); 1822 intel_ring_emit(ring, offset + len - 8);
1691 intel_ring_emit(ring, MI_NOOP); 1823 intel_ring_emit(ring, MI_NOOP);
1692 intel_ring_advance(ring); 1824 intel_ring_advance(ring);
@@ -1697,7 +1829,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1697static int 1829static int
1698i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1830i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1699 u64 offset, u32 len, 1831 u64 offset, u32 len,
1700 unsigned flags) 1832 unsigned dispatch_flags)
1701{ 1833{
1702 int ret; 1834 int ret;
1703 1835
@@ -1706,7 +1838,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1706 return ret; 1838 return ret;
1707 1839
1708 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1840 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1709 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1841 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1842 0 : MI_BATCH_NON_SECURE));
1710 intel_ring_advance(ring); 1843 intel_ring_advance(ring);
1711 1844
1712 return 0; 1845 return 0;
@@ -2097,6 +2230,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
2097 2230
2098 kref_init(&request->ref); 2231 kref_init(&request->ref);
2099 request->ring = ring; 2232 request->ring = ring;
2233 request->ringbuf = ring->buffer;
2100 request->uniq = dev_private->request_uniq++; 2234 request->uniq = dev_private->request_uniq++;
2101 2235
2102 ret = i915_gem_get_seqno(ring->dev, &request->seqno); 2236 ret = i915_gem_get_seqno(ring->dev, &request->seqno);
@@ -2273,9 +2407,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
2273static int 2407static int
2274gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2408gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2275 u64 offset, u32 len, 2409 u64 offset, u32 len,
2276 unsigned flags) 2410 unsigned dispatch_flags)
2277{ 2411{
2278 bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE); 2412 bool ppgtt = USES_PPGTT(ring->dev) &&
2413 !(dispatch_flags & I915_DISPATCH_SECURE);
2279 int ret; 2414 int ret;
2280 2415
2281 ret = intel_ring_begin(ring, 4); 2416 ret = intel_ring_begin(ring, 4);
@@ -2294,8 +2429,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2294 2429
2295static int 2430static int
2296hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2431hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2297 u64 offset, u32 len, 2432 u64 offset, u32 len,
2298 unsigned flags) 2433 unsigned dispatch_flags)
2299{ 2434{
2300 int ret; 2435 int ret;
2301 2436
@@ -2305,7 +2440,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2305 2440
2306 intel_ring_emit(ring, 2441 intel_ring_emit(ring,
2307 MI_BATCH_BUFFER_START | 2442 MI_BATCH_BUFFER_START |
2308 (flags & I915_DISPATCH_SECURE ? 2443 (dispatch_flags & I915_DISPATCH_SECURE ?
2309 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW)); 2444 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2310 /* bit0-7 is the length on GEN6+ */ 2445 /* bit0-7 is the length on GEN6+ */
2311 intel_ring_emit(ring, offset); 2446 intel_ring_emit(ring, offset);
@@ -2317,7 +2452,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2317static int 2452static int
2318gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2453gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2319 u64 offset, u32 len, 2454 u64 offset, u32 len,
2320 unsigned flags) 2455 unsigned dispatch_flags)
2321{ 2456{
2322 int ret; 2457 int ret;
2323 2458
@@ -2327,7 +2462,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2327 2462
2328 intel_ring_emit(ring, 2463 intel_ring_emit(ring,
2329 MI_BATCH_BUFFER_START | 2464 MI_BATCH_BUFFER_START |
2330 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 2465 (dispatch_flags & I915_DISPATCH_SECURE ?
2466 0 : MI_BATCH_NON_SECURE_I965));
2331 /* bit0-7 is the length on GEN6+ */ 2467 /* bit0-7 is the length on GEN6+ */
2332 intel_ring_emit(ring, offset); 2468 intel_ring_emit(ring, offset);
2333 intel_ring_advance(ring); 2469 intel_ring_advance(ring);
@@ -2612,19 +2748,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2612} 2748}
2613 2749
2614/** 2750/**
2615 * Initialize the second BSD ring for Broadwell GT3. 2751 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2616 * It is noted that this only exists on Broadwell GT3.
2617 */ 2752 */
2618int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2753int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2619{ 2754{
2620 struct drm_i915_private *dev_priv = dev->dev_private; 2755 struct drm_i915_private *dev_priv = dev->dev_private;
2621 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2756 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2622 2757
2623 if ((INTEL_INFO(dev)->gen != 8)) {
2624 DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
2625 return -EINVAL;
2626 }
2627
2628 ring->name = "bsd2 ring"; 2758 ring->name = "bsd2 ring";
2629 ring->id = VCS2; 2759 ring->id = VCS2;
2630 2760
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 714f3fdd57d2..8f3b49a23ccf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -164,7 +164,7 @@ struct intel_engine_cs {
164 u32 seqno); 164 u32 seqno);
165 int (*dispatch_execbuffer)(struct intel_engine_cs *ring, 165 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
166 u64 offset, u32 length, 166 u64 offset, u32 length,
167 unsigned flags); 167 unsigned dispatch_flags);
168#define I915_DISPATCH_SECURE 0x1 168#define I915_DISPATCH_SECURE 0x1
169#define I915_DISPATCH_PINNED 0x2 169#define I915_DISPATCH_PINNED 0x2
170 void (*cleanup)(struct intel_engine_cs *ring); 170 void (*cleanup)(struct intel_engine_cs *ring);
@@ -242,7 +242,7 @@ struct intel_engine_cs {
242 u32 flush_domains); 242 u32 flush_domains);
243 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, 243 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
244 struct intel_context *ctx, 244 struct intel_context *ctx,
245 u64 offset, unsigned flags); 245 u64 offset, unsigned dispatch_flags);
246 246
247 /** 247 /**
248 * List of objects currently involved in rendering from the 248 * List of objects currently involved in rendering from the
@@ -373,11 +373,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
373 * 0x06: ring 2 head pointer (915-class) 373 * 0x06: ring 2 head pointer (915-class)
374 * 0x10-0x1b: Context status DWords (GM45) 374 * 0x10-0x1b: Context status DWords (GM45)
375 * 0x1f: Last written status offset. (GM45) 375 * 0x1f: Last written status offset. (GM45)
376 * 0x20-0x2f: Reserved (Gen6+)
376 * 377 *
377 * The area from dword 0x20 to 0x3ff is available for driver usage. 378 * The area from dword 0x30 to 0x3ff is available for driver usage.
378 */ 379 */
379#define I915_GEM_HWS_INDEX 0x20 380#define I915_GEM_HWS_INDEX 0x30
380#define I915_GEM_HWS_SCRATCH_INDEX 0x30 381#define I915_GEM_HWS_SCRATCH_INDEX 0x40
381#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
382 383
383void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 384void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
@@ -425,7 +426,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
425int intel_init_vebox_ring_buffer(struct drm_device *dev); 426int intel_init_vebox_ring_buffer(struct drm_device *dev);
426 427
427u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 428u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
428void intel_ring_setup_status_page(struct intel_engine_cs *ring);
429 429
430int init_workarounds_ring(struct intel_engine_cs *ring); 430int init_workarounds_ring(struct intel_engine_cs *ring);
431 431
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 49695d7d51e3..6d8e29abbc33 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -230,6 +230,136 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
230 } 230 }
231} 231}
232 232
233#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
234 BIT(POWER_DOMAIN_TRANSCODER_A) | \
235 BIT(POWER_DOMAIN_PIPE_B) | \
236 BIT(POWER_DOMAIN_TRANSCODER_B) | \
237 BIT(POWER_DOMAIN_PIPE_C) | \
238 BIT(POWER_DOMAIN_TRANSCODER_C) | \
239 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
240 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
241 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
242 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
243 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
244 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
245 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
246 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
247 BIT(POWER_DOMAIN_AUX_B) | \
248 BIT(POWER_DOMAIN_AUX_C) | \
249 BIT(POWER_DOMAIN_AUX_D) | \
250 BIT(POWER_DOMAIN_AUDIO) | \
251 BIT(POWER_DOMAIN_VGA) | \
252 BIT(POWER_DOMAIN_INIT))
253#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
254 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
255 BIT(POWER_DOMAIN_PLLS) | \
256 BIT(POWER_DOMAIN_PIPE_A) | \
257 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
258 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
259 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
260 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
261 BIT(POWER_DOMAIN_AUX_A) | \
262 BIT(POWER_DOMAIN_INIT))
263#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
264 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
265 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
266 BIT(POWER_DOMAIN_INIT))
267#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
268 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
269 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
270 BIT(POWER_DOMAIN_INIT))
271#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
272 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
273 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
274 BIT(POWER_DOMAIN_INIT))
275#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
276 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
277 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
278 BIT(POWER_DOMAIN_INIT))
279#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
280 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
281#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
282 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
283 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
284 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
285 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
286 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
287 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
288 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
289 BIT(POWER_DOMAIN_INIT))
290
291static void skl_set_power_well(struct drm_i915_private *dev_priv,
292 struct i915_power_well *power_well, bool enable)
293{
294 uint32_t tmp, fuse_status;
295 uint32_t req_mask, state_mask;
296 bool check_fuse_status = false;
297
298 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
299 fuse_status = I915_READ(SKL_FUSE_STATUS);
300
301 switch (power_well->data) {
302 case SKL_DISP_PW_1:
303 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
304 SKL_FUSE_PG0_DIST_STATUS), 1)) {
305 DRM_ERROR("PG0 not enabled\n");
306 return;
307 }
308 break;
309 case SKL_DISP_PW_2:
310 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
311 DRM_ERROR("PG1 in disabled state\n");
312 return;
313 }
314 break;
315 case SKL_DISP_PW_DDI_A_E:
316 case SKL_DISP_PW_DDI_B:
317 case SKL_DISP_PW_DDI_C:
318 case SKL_DISP_PW_DDI_D:
319 case SKL_DISP_PW_MISC_IO:
320 break;
321 default:
322 WARN(1, "Unknown power well %lu\n", power_well->data);
323 return;
324 }
325
326 req_mask = SKL_POWER_WELL_REQ(power_well->data);
327 state_mask = SKL_POWER_WELL_STATE(power_well->data);
328
329 if (enable) {
330 if (!(tmp & req_mask)) {
331 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
332 DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
333 }
334
335 if (!(tmp & state_mask)) {
336 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
337 state_mask), 1))
338 DRM_ERROR("%s enable timeout\n",
339 power_well->name);
340 check_fuse_status = true;
341 }
342 } else {
343 if (tmp & req_mask) {
344 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
345 POSTING_READ(HSW_PWR_WELL_DRIVER);
346 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
347 }
348 }
349
350 if (check_fuse_status) {
351 if (power_well->data == SKL_DISP_PW_1) {
352 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
353 SKL_FUSE_PG1_DIST_STATUS), 1))
354 DRM_ERROR("PG1 distributing status timeout\n");
355 } else if (power_well->data == SKL_DISP_PW_2) {
356 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
357 SKL_FUSE_PG2_DIST_STATUS), 1))
358 DRM_ERROR("PG2 distributing status timeout\n");
359 }
360 }
361}
362
233static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 363static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
234 struct i915_power_well *power_well) 364 struct i915_power_well *power_well)
235{ 365{
@@ -255,6 +385,36 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
255 hsw_set_power_well(dev_priv, power_well, false); 385 hsw_set_power_well(dev_priv, power_well, false);
256} 386}
257 387
388static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
389 struct i915_power_well *power_well)
390{
391 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
392 SKL_POWER_WELL_STATE(power_well->data);
393
394 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
395}
396
397static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
398 struct i915_power_well *power_well)
399{
400 skl_set_power_well(dev_priv, power_well, power_well->count > 0);
401
402 /* Clear any request made by BIOS as driver is taking over */
403 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
404}
405
406static void skl_power_well_enable(struct drm_i915_private *dev_priv,
407 struct i915_power_well *power_well)
408{
409 skl_set_power_well(dev_priv, power_well, true);
410}
411
412static void skl_power_well_disable(struct drm_i915_private *dev_priv,
413 struct i915_power_well *power_well)
414{
415 skl_set_power_well(dev_priv, power_well, false);
416}
417
258static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 418static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
259 struct i915_power_well *power_well) 419 struct i915_power_well *power_well)
260{ 420{
@@ -829,6 +989,13 @@ static const struct i915_power_well_ops hsw_power_well_ops = {
829 .is_enabled = hsw_power_well_enabled, 989 .is_enabled = hsw_power_well_enabled,
830}; 990};
831 991
992static const struct i915_power_well_ops skl_power_well_ops = {
993 .sync_hw = skl_power_well_sync_hw,
994 .enable = skl_power_well_enable,
995 .disable = skl_power_well_disable,
996 .is_enabled = skl_power_well_enabled,
997};
998
832static struct i915_power_well hsw_power_wells[] = { 999static struct i915_power_well hsw_power_wells[] = {
833 { 1000 {
834 .name = "always-on", 1001 .name = "always-on",
@@ -1059,6 +1226,57 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
1059 return NULL; 1226 return NULL;
1060} 1227}
1061 1228
1229static struct i915_power_well skl_power_wells[] = {
1230 {
1231 .name = "always-on",
1232 .always_on = 1,
1233 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1234 .ops = &i9xx_always_on_power_well_ops,
1235 },
1236 {
1237 .name = "power well 1",
1238 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1239 .ops = &skl_power_well_ops,
1240 .data = SKL_DISP_PW_1,
1241 },
1242 {
1243 .name = "MISC IO power well",
1244 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1245 .ops = &skl_power_well_ops,
1246 .data = SKL_DISP_PW_MISC_IO,
1247 },
1248 {
1249 .name = "power well 2",
1250 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1251 .ops = &skl_power_well_ops,
1252 .data = SKL_DISP_PW_2,
1253 },
1254 {
1255 .name = "DDI A/E power well",
1256 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1257 .ops = &skl_power_well_ops,
1258 .data = SKL_DISP_PW_DDI_A_E,
1259 },
1260 {
1261 .name = "DDI B power well",
1262 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1263 .ops = &skl_power_well_ops,
1264 .data = SKL_DISP_PW_DDI_B,
1265 },
1266 {
1267 .name = "DDI C power well",
1268 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1269 .ops = &skl_power_well_ops,
1270 .data = SKL_DISP_PW_DDI_C,
1271 },
1272 {
1273 .name = "DDI D power well",
1274 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1275 .ops = &skl_power_well_ops,
1276 .data = SKL_DISP_PW_DDI_D,
1277 },
1278};
1279
1062#define set_power_wells(power_domains, __power_wells) ({ \ 1280#define set_power_wells(power_domains, __power_wells) ({ \
1063 (power_domains)->power_wells = (__power_wells); \ 1281 (power_domains)->power_wells = (__power_wells); \
1064 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 1282 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -1085,6 +1303,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1085 set_power_wells(power_domains, hsw_power_wells); 1303 set_power_wells(power_domains, hsw_power_wells);
1086 } else if (IS_BROADWELL(dev_priv->dev)) { 1304 } else if (IS_BROADWELL(dev_priv->dev)) {
1087 set_power_wells(power_domains, bdw_power_wells); 1305 set_power_wells(power_domains, bdw_power_wells);
1306 } else if (IS_SKYLAKE(dev_priv->dev)) {
1307 set_power_wells(power_domains, skl_power_wells);
1088 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1308 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1089 set_power_wells(power_domains, chv_power_wells); 1309 set_power_wells(power_domains, chv_power_wells);
1090 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 1310 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0a52c44ad03d..7051da7015d3 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -98,7 +98,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
98 if (min <= 0 || max <= 0) 98 if (min <= 0 || max <= 0)
99 return false; 99 return false;
100 100
101 if (WARN_ON(drm_vblank_get(dev, pipe))) 101 if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
102 return false; 102 return false;
103 103
104 local_irq_disable(); 104 local_irq_disable();
@@ -132,7 +132,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
132 132
133 finish_wait(wq, &wait); 133 finish_wait(wq, &wait);
134 134
135 drm_vblank_put(dev, pipe); 135 drm_crtc_vblank_put(&crtc->base);
136 136
137 *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 137 *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
138 138
@@ -189,7 +189,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
189 struct intel_plane *intel_plane = to_intel_plane(drm_plane); 189 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
190 const int pipe = intel_plane->pipe; 190 const int pipe = intel_plane->pipe;
191 const int plane = intel_plane->plane + 1; 191 const int plane = intel_plane->plane + 1;
192 u32 plane_ctl, stride; 192 u32 plane_ctl, stride_div;
193 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 193 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
194 194
195 plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); 195 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
@@ -245,17 +245,22 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
245 BUG(); 245 BUG();
246 } 246 }
247 247
248 switch (obj->tiling_mode) { 248 switch (fb->modifier[0]) {
249 case I915_TILING_NONE: 249 case DRM_FORMAT_MOD_NONE:
250 stride = fb->pitches[0] >> 6;
251 break; 250 break;
252 case I915_TILING_X: 251 case I915_FORMAT_MOD_X_TILED:
253 plane_ctl |= PLANE_CTL_TILED_X; 252 plane_ctl |= PLANE_CTL_TILED_X;
254 stride = fb->pitches[0] >> 9; 253 break;
254 case I915_FORMAT_MOD_Y_TILED:
255 plane_ctl |= PLANE_CTL_TILED_Y;
256 break;
257 case I915_FORMAT_MOD_Yf_TILED:
258 plane_ctl |= PLANE_CTL_TILED_YF;
255 break; 259 break;
256 default: 260 default:
257 BUG(); 261 MISSING_CASE(fb->modifier[0]);
258 } 262 }
263
259 if (drm_plane->state->rotation == BIT(DRM_ROTATE_180)) 264 if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
260 plane_ctl |= PLANE_CTL_ROTATE_180; 265 plane_ctl |= PLANE_CTL_ROTATE_180;
261 266
@@ -266,6 +271,9 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
266 pixel_size, true, 271 pixel_size, true,
267 src_w != crtc_w || src_h != crtc_h); 272 src_w != crtc_w || src_h != crtc_h);
268 273
274 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
275 fb->pixel_format);
276
269 /* Sizes are 0 based */ 277 /* Sizes are 0 based */
270 src_w--; 278 src_w--;
271 src_h--; 279 src_h--;
@@ -273,7 +281,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
273 crtc_h--; 281 crtc_h--;
274 282
275 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); 283 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
276 I915_WRITE(PLANE_STRIDE(pipe, plane), stride); 284 I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
277 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); 285 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
278 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w); 286 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
279 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); 287 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
@@ -993,7 +1001,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
993 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1001 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
994 1002
995 mutex_lock(&dev->struct_mutex); 1003 mutex_lock(&dev->struct_mutex);
996 if (dev_priv->fbc.plane == intel_crtc->plane) 1004 if (dev_priv->fbc.crtc == intel_crtc)
997 intel_fbc_disable(dev); 1005 intel_fbc_disable(dev);
998 mutex_unlock(&dev->struct_mutex); 1006 mutex_unlock(&dev->struct_mutex);
999 1007
@@ -1076,7 +1084,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
1076 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 1084 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
1077 struct intel_plane *intel_plane = to_intel_plane(plane); 1085 struct intel_plane *intel_plane = to_intel_plane(plane);
1078 struct drm_framebuffer *fb = state->base.fb; 1086 struct drm_framebuffer *fb = state->base.fb;
1079 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1080 int crtc_x, crtc_y; 1087 int crtc_x, crtc_y;
1081 unsigned int crtc_w, crtc_h; 1088 unsigned int crtc_w, crtc_h;
1082 uint32_t src_x, src_y, src_w, src_h; 1089 uint32_t src_x, src_y, src_w, src_h;
@@ -1106,16 +1113,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
1106 return -EINVAL; 1113 return -EINVAL;
1107 } 1114 }
1108 1115
1109 /* Sprite planes can be linear or x-tiled surfaces */
1110 switch (obj->tiling_mode) {
1111 case I915_TILING_NONE:
1112 case I915_TILING_X:
1113 break;
1114 default:
1115 DRM_DEBUG_KMS("Unsupported tiling mode\n");
1116 return -EINVAL;
1117 }
1118
1119 /* 1116 /*
1120 * FIXME the following code does a bunch of fuzzy adjustments to the 1117 * FIXME the following code does a bunch of fuzzy adjustments to the
1121 * coordinates and sizes. We probably need some way to decide whether 1118 * coordinates and sizes. We probably need some way to decide whether
@@ -1259,6 +1256,12 @@ finish:
1259 1256
1260 if (!intel_crtc->primary_enabled && !state->hides_primary) 1257 if (!intel_crtc->primary_enabled && !state->hides_primary)
1261 intel_crtc->atomic.post_enable_primary = true; 1258 intel_crtc->atomic.post_enable_primary = true;
1259
1260 /* Update watermarks on tiling changes. */
1261 if (!plane->state->fb || !state->base.fb ||
1262 plane->state->fb->modifier[0] !=
1263 state->base.fb->modifier[0])
1264 intel_crtc->atomic.update_wm = true;
1262 } 1265 }
1263 1266
1264 return 0; 1267 return 0;
@@ -1312,9 +1315,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1312 struct intel_plane *intel_plane; 1315 struct intel_plane *intel_plane;
1313 int ret = 0; 1316 int ret = 0;
1314 1317
1315 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1316 return -ENODEV;
1317
1318 /* Make sure we don't try to enable both src & dest simultaneously */ 1318 /* Make sure we don't try to enable both src & dest simultaneously */
1319 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 1319 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
1320 return -EINVAL; 1320 return -EINVAL;
@@ -1343,9 +1343,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1343 struct intel_plane *intel_plane; 1343 struct intel_plane *intel_plane;
1344 int ret = 0; 1344 int ret = 0;
1345 1345
1346 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1347 return -ENODEV;
1348
1349 drm_modeset_lock_all(dev); 1346 drm_modeset_lock_all(dev);
1350 1347
1351 plane = drm_plane_find(dev, get->plane_id); 1348 plane = drm_plane_find(dev, get->plane_id);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c47a3baa53d5..8879f17770aa 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -23,6 +23,7 @@
23 23
24#include "i915_drv.h" 24#include "i915_drv.h"
25#include "intel_drv.h" 25#include "intel_drv.h"
26#include "i915_vgpu.h"
26 27
27#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
28 29
@@ -210,6 +211,13 @@ static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
210 gen6_gt_check_fifodbg(dev_priv); 211 gen6_gt_check_fifodbg(dev_priv);
211} 212}
212 213
214static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
215{
216 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
217
218 return count & GT_FIFO_FREE_ENTRIES_MASK;
219}
220
213static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 221static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
214{ 222{
215 int ret = 0; 223 int ret = 0;
@@ -217,16 +225,15 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
217 /* On VLV, FIFO will be shared by both SW and HW. 225 /* On VLV, FIFO will be shared by both SW and HW.
218 * So, we need to read the FREE_ENTRIES everytime */ 226 * So, we need to read the FREE_ENTRIES everytime */
219 if (IS_VALLEYVIEW(dev_priv->dev)) 227 if (IS_VALLEYVIEW(dev_priv->dev))
220 dev_priv->uncore.fifo_count = 228 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
221 __raw_i915_read32(dev_priv, GTFIFOCTL) &
222 GT_FIFO_FREE_ENTRIES_MASK;
223 229
224 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 230 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
225 int loop = 500; 231 int loop = 500;
226 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 232 u32 fifo = fifo_free_entries(dev_priv);
233
227 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 234 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
228 udelay(10); 235 udelay(10);
229 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 236 fifo = fifo_free_entries(dev_priv);
230 } 237 }
231 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 238 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
232 ++ret; 239 ++ret;
@@ -314,8 +321,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
314 321
315 if (IS_GEN6(dev) || IS_GEN7(dev)) 322 if (IS_GEN6(dev) || IS_GEN7(dev))
316 dev_priv->uncore.fifo_count = 323 dev_priv->uncore.fifo_count =
317 __raw_i915_read32(dev_priv, GTFIFOCTL) & 324 fifo_free_entries(dev_priv);
318 GT_FIFO_FREE_ENTRIES_MASK;
319 } 325 }
320 326
321 if (!restore) 327 if (!restore)
@@ -328,8 +334,9 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
328{ 334{
329 struct drm_i915_private *dev_priv = dev->dev_private; 335 struct drm_i915_private *dev_priv = dev->dev_private;
330 336
331 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && 337 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
332 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 338 INTEL_INFO(dev)->gen >= 9) &&
339 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
333 /* The docs do not explain exactly how the calculation can be 340 /* The docs do not explain exactly how the calculation can be
334 * made. It is somewhat guessable, but for now, it's always 341 * made. It is somewhat guessable, but for now, it's always
335 * 128MB. 342 * 128MB.
@@ -640,6 +647,14 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
640 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 647 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
641} 648}
642 649
650#define __vgpu_read(x) \
651static u##x \
652vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
653 GEN6_READ_HEADER(x); \
654 val = __raw_i915_read##x(dev_priv, reg); \
655 GEN6_READ_FOOTER; \
656}
657
643#define __gen6_read(x) \ 658#define __gen6_read(x) \
644static u##x \ 659static u##x \
645gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 660gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
@@ -703,6 +718,10 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
703 GEN6_READ_FOOTER; \ 718 GEN6_READ_FOOTER; \
704} 719}
705 720
721__vgpu_read(8)
722__vgpu_read(16)
723__vgpu_read(32)
724__vgpu_read(64)
706__gen9_read(8) 725__gen9_read(8)
707__gen9_read(16) 726__gen9_read(16)
708__gen9_read(32) 727__gen9_read(32)
@@ -724,6 +743,7 @@ __gen6_read(64)
724#undef __chv_read 743#undef __chv_read
725#undef __vlv_read 744#undef __vlv_read
726#undef __gen6_read 745#undef __gen6_read
746#undef __vgpu_read
727#undef GEN6_READ_FOOTER 747#undef GEN6_READ_FOOTER
728#undef GEN6_READ_HEADER 748#undef GEN6_READ_HEADER
729 749
@@ -807,6 +827,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
807 GEN6_WRITE_FOOTER; \ 827 GEN6_WRITE_FOOTER; \
808} 828}
809 829
830#define __vgpu_write(x) \
831static void vgpu_write##x(struct drm_i915_private *dev_priv, \
832 off_t reg, u##x val, bool trace) { \
833 GEN6_WRITE_HEADER; \
834 __raw_i915_write##x(dev_priv, reg, val); \
835 GEN6_WRITE_FOOTER; \
836}
837
810static const u32 gen8_shadowed_regs[] = { 838static const u32 gen8_shadowed_regs[] = {
811 FORCEWAKE_MT, 839 FORCEWAKE_MT,
812 GEN6_RPNSWREQ, 840 GEN6_RPNSWREQ,
@@ -924,12 +952,17 @@ __gen6_write(8)
924__gen6_write(16) 952__gen6_write(16)
925__gen6_write(32) 953__gen6_write(32)
926__gen6_write(64) 954__gen6_write(64)
955__vgpu_write(8)
956__vgpu_write(16)
957__vgpu_write(32)
958__vgpu_write(64)
927 959
928#undef __gen9_write 960#undef __gen9_write
929#undef __chv_write 961#undef __chv_write
930#undef __gen8_write 962#undef __gen8_write
931#undef __hsw_write 963#undef __hsw_write
932#undef __gen6_write 964#undef __gen6_write
965#undef __vgpu_write
933#undef GEN6_WRITE_FOOTER 966#undef GEN6_WRITE_FOOTER
934#undef GEN6_WRITE_HEADER 967#undef GEN6_WRITE_HEADER
935 968
@@ -972,6 +1005,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
972 d->val_set = FORCEWAKE_KERNEL; 1005 d->val_set = FORCEWAKE_KERNEL;
973 d->val_clear = 0; 1006 d->val_clear = 0;
974 } else { 1007 } else {
1008 /* WaRsClearFWBitsAtReset:bdw,skl */
975 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1009 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
976 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1010 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
977 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1011 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
@@ -1082,6 +1116,8 @@ void intel_uncore_init(struct drm_device *dev)
1082{ 1116{
1083 struct drm_i915_private *dev_priv = dev->dev_private; 1117 struct drm_i915_private *dev_priv = dev->dev_private;
1084 1118
1119 i915_check_vgpu(dev);
1120
1085 intel_uncore_ellc_detect(dev); 1121 intel_uncore_ellc_detect(dev);
1086 intel_uncore_fw_domains_init(dev); 1122 intel_uncore_fw_domains_init(dev);
1087 __intel_uncore_early_sanitize(dev, false); 1123 __intel_uncore_early_sanitize(dev, false);
@@ -1130,6 +1166,11 @@ void intel_uncore_init(struct drm_device *dev)
1130 break; 1166 break;
1131 } 1167 }
1132 1168
1169 if (intel_vgpu_active(dev)) {
1170 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1171 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1172 }
1173
1133 i915_check_and_clear_faults(dev); 1174 i915_check_and_clear_faults(dev);
1134} 1175}
1135#undef ASSIGN_WRITE_MMIO_VFUNCS 1176#undef ASSIGN_WRITE_MMIO_VFUNCS
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index cde25009203a..dbc068988377 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -83,7 +83,8 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
83}; 83};
84 84
85static int mdp4_plane_prepare_fb(struct drm_plane *plane, 85static int mdp4_plane_prepare_fb(struct drm_plane *plane,
86 struct drm_framebuffer *fb) 86 struct drm_framebuffer *fb,
87 const struct drm_plane_state *new_state)
87{ 88{
88 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 89 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
89 struct mdp4_kms *mdp4_kms = get_kms(plane); 90 struct mdp4_kms *mdp4_kms = get_kms(plane);
@@ -93,7 +94,8 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
93} 94}
94 95
95static void mdp4_plane_cleanup_fb(struct drm_plane *plane, 96static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
96 struct drm_framebuffer *fb) 97 struct drm_framebuffer *fb,
98 const struct drm_plane_state *old_state)
97{ 99{
98 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 100 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
99 struct mdp4_kms *mdp4_kms = get_kms(plane); 101 struct mdp4_kms *mdp4_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 05cf9ab2a876..6bd48e246283 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -156,7 +156,8 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
156}; 156};
157 157
158static int mdp5_plane_prepare_fb(struct drm_plane *plane, 158static int mdp5_plane_prepare_fb(struct drm_plane *plane,
159 struct drm_framebuffer *fb) 159 struct drm_framebuffer *fb,
160 const struct drm_plane_state *new_state)
160{ 161{
161 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 162 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
162 struct mdp5_kms *mdp5_kms = get_kms(plane); 163 struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -166,7 +167,8 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
166} 167}
167 168
168static void mdp5_plane_cleanup_fb(struct drm_plane *plane, 169static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
169 struct drm_framebuffer *fb) 170 struct drm_framebuffer *fb,
171 const struct drm_plane_state *old_state)
170{ 172{
171 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 173 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
172 struct mdp5_kms *mdp5_kms = get_kms(plane); 174 struct mdp5_kms *mdp5_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 18fd643b6e69..5b192128cda2 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -96,11 +96,11 @@ static void complete_commit(struct msm_commit *c)
96 96
97 kms->funcs->prepare_commit(kms, state); 97 kms->funcs->prepare_commit(kms, state);
98 98
99 drm_atomic_helper_commit_pre_planes(dev, state); 99 drm_atomic_helper_commit_modeset_disables(dev, state);
100 100
101 drm_atomic_helper_commit_planes(dev, state); 101 drm_atomic_helper_commit_planes(dev, state);
102 102
103 drm_atomic_helper_commit_post_planes(dev, state); 103 drm_atomic_helper_commit_modeset_enables(dev, state);
104 104
105 /* NOTE: _wait_for_vblanks() only waits for vblank on 105 /* NOTE: _wait_for_vblanks() only waits for vblank on
106 * enabled CRTCs. So we end up faulting when disabling 106 * enabled CRTCs. So we end up faulting when disabling
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 25c7a998fc2c..9e72133bb64b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -15,6 +15,8 @@
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16 16
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18#include <drm/drm_atomic.h>
19#include <drm/drm_atomic_helper.h>
18#include <drm/drm_crtc.h> 20#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h> 21#include <drm/drm_crtc_helper.h>
20#include <drm/drm_fb_cma_helper.h> 22#include <drm/drm_fb_cma_helper.h>
@@ -99,9 +101,13 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
99 clk_disable_unprepare(rcrtc->clock); 101 clk_disable_unprepare(rcrtc->clock);
100} 102}
101 103
104/* -----------------------------------------------------------------------------
105 * Hardware Setup
106 */
107
102static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) 108static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
103{ 109{
104 const struct drm_display_mode *mode = &rcrtc->crtc.mode; 110 const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
105 unsigned long mode_clock = mode->clock * 1000; 111 unsigned long mode_clock = mode->clock * 1000;
106 unsigned long clk; 112 unsigned long clk;
107 u32 value; 113 u32 value;
@@ -187,9 +193,19 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
187 rcdu->dpad0_source = rcrtc->index; 193 rcdu->dpad0_source = rcrtc->index;
188} 194}
189 195
190void rcar_du_crtc_update_planes(struct drm_crtc *crtc) 196static unsigned int plane_zpos(struct rcar_du_plane *plane)
197{
198 return to_rcar_du_plane_state(plane->plane.state)->zpos;
199}
200
201static const struct rcar_du_format_info *
202plane_format(struct rcar_du_plane *plane)
203{
204 return to_rcar_du_plane_state(plane->plane.state)->format;
205}
206
207static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
191{ 208{
192 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
193 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; 209 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
194 unsigned int num_planes = 0; 210 unsigned int num_planes = 0;
195 unsigned int prio = 0; 211 unsigned int prio = 0;
@@ -201,29 +217,30 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
201 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; 217 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
202 unsigned int j; 218 unsigned int j;
203 219
204 if (plane->crtc != &rcrtc->crtc || !plane->enabled) 220 if (plane->plane.state->crtc != &rcrtc->crtc)
205 continue; 221 continue;
206 222
207 /* Insert the plane in the sorted planes array. */ 223 /* Insert the plane in the sorted planes array. */
208 for (j = num_planes++; j > 0; --j) { 224 for (j = num_planes++; j > 0; --j) {
209 if (planes[j-1]->zpos <= plane->zpos) 225 if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
210 break; 226 break;
211 planes[j] = planes[j-1]; 227 planes[j] = planes[j-1];
212 } 228 }
213 229
214 planes[j] = plane; 230 planes[j] = plane;
215 prio += plane->format->planes * 4; 231 prio += plane_format(plane)->planes * 4;
216 } 232 }
217 233
218 for (i = 0; i < num_planes; ++i) { 234 for (i = 0; i < num_planes; ++i) {
219 struct rcar_du_plane *plane = planes[i]; 235 struct rcar_du_plane *plane = planes[i];
220 unsigned int index = plane->hwindex; 236 struct drm_plane_state *state = plane->plane.state;
237 unsigned int index = to_rcar_du_plane_state(state)->hwindex;
221 238
222 prio -= 4; 239 prio -= 4;
223 dspr |= (index + 1) << prio; 240 dspr |= (index + 1) << prio;
224 dptsr |= DPTSR_PnDK(index) | DPTSR_PnTS(index); 241 dptsr |= DPTSR_PnDK(index) | DPTSR_PnTS(index);
225 242
226 if (plane->format->planes == 2) { 243 if (plane_format(plane)->planes == 2) {
227 index = (index + 1) % 8; 244 index = (index + 1) % 8;
228 245
229 prio -= 4; 246 prio -= 4;
@@ -236,8 +253,6 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
236 * with superposition controller 2. 253 * with superposition controller 2.
237 */ 254 */
238 if (rcrtc->index % 2) { 255 if (rcrtc->index % 2) {
239 u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
240
241 /* The DPTSR register is updated when the display controller is 256 /* The DPTSR register is updated when the display controller is
242 * stopped. We thus need to restart the DU. Once again, sorry 257 * stopped. We thus need to restart the DU. Once again, sorry
243 * for the flicker. One way to mitigate the issue would be to 258 * for the flicker. One way to mitigate the issue would be to
@@ -245,29 +260,104 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
245 * split, or through a module parameter). Flicker would then 260 * split, or through a module parameter). Flicker would then
246 * occur only if we need to break the pre-association. 261 * occur only if we need to break the pre-association.
247 */ 262 */
248 if (value != dptsr) { 263 mutex_lock(&rcrtc->group->lock);
264 if (rcar_du_group_read(rcrtc->group, DPTSR) != dptsr) {
249 rcar_du_group_write(rcrtc->group, DPTSR, dptsr); 265 rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
250 if (rcrtc->group->used_crtcs) 266 if (rcrtc->group->used_crtcs)
251 rcar_du_group_restart(rcrtc->group); 267 rcar_du_group_restart(rcrtc->group);
252 } 268 }
269 mutex_unlock(&rcrtc->group->lock);
253 } 270 }
254 271
255 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 272 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
256 dspr); 273 dspr);
257} 274}
258 275
276/* -----------------------------------------------------------------------------
277 * Page Flip
278 */
279
280void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
281 struct drm_file *file)
282{
283 struct drm_pending_vblank_event *event;
284 struct drm_device *dev = rcrtc->crtc.dev;
285 unsigned long flags;
286
287 /* Destroy the pending vertical blanking event associated with the
288 * pending page flip, if any, and disable vertical blanking interrupts.
289 */
290 spin_lock_irqsave(&dev->event_lock, flags);
291 event = rcrtc->event;
292 if (event && event->base.file_priv == file) {
293 rcrtc->event = NULL;
294 event->base.destroy(&event->base);
295 drm_crtc_vblank_put(&rcrtc->crtc);
296 }
297 spin_unlock_irqrestore(&dev->event_lock, flags);
298}
299
300static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
301{
302 struct drm_pending_vblank_event *event;
303 struct drm_device *dev = rcrtc->crtc.dev;
304 unsigned long flags;
305
306 spin_lock_irqsave(&dev->event_lock, flags);
307 event = rcrtc->event;
308 rcrtc->event = NULL;
309 spin_unlock_irqrestore(&dev->event_lock, flags);
310
311 if (event == NULL)
312 return;
313
314 spin_lock_irqsave(&dev->event_lock, flags);
315 drm_send_vblank_event(dev, rcrtc->index, event);
316 wake_up(&rcrtc->flip_wait);
317 spin_unlock_irqrestore(&dev->event_lock, flags);
318
319 drm_crtc_vblank_put(&rcrtc->crtc);
320}
321
322static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
323{
324 struct drm_device *dev = rcrtc->crtc.dev;
325 unsigned long flags;
326 bool pending;
327
328 spin_lock_irqsave(&dev->event_lock, flags);
329 pending = rcrtc->event != NULL;
330 spin_unlock_irqrestore(&dev->event_lock, flags);
331
332 return pending;
333}
334
335static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
336{
337 struct rcar_du_device *rcdu = rcrtc->group->dev;
338
339 if (wait_event_timeout(rcrtc->flip_wait,
340 !rcar_du_crtc_page_flip_pending(rcrtc),
341 msecs_to_jiffies(50)))
342 return;
343
344 dev_warn(rcdu->dev, "page flip timeout\n");
345
346 rcar_du_crtc_finish_page_flip(rcrtc);
347}
348
349/* -----------------------------------------------------------------------------
350 * Start/Stop and Suspend/Resume
351 */
352
259static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) 353static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
260{ 354{
261 struct drm_crtc *crtc = &rcrtc->crtc; 355 struct drm_crtc *crtc = &rcrtc->crtc;
262 bool interlaced; 356 bool interlaced;
263 unsigned int i;
264 357
265 if (rcrtc->started) 358 if (rcrtc->started)
266 return; 359 return;
267 360
268 if (WARN_ON(rcrtc->plane->format == NULL))
269 return;
270
271 /* Set display off and background to black */ 361 /* Set display off and background to black */
272 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0)); 362 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
273 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0)); 363 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
@@ -276,20 +366,8 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
276 rcar_du_crtc_set_display_timing(rcrtc); 366 rcar_du_crtc_set_display_timing(rcrtc);
277 rcar_du_group_set_routing(rcrtc->group); 367 rcar_du_group_set_routing(rcrtc->group);
278 368
279 mutex_lock(&rcrtc->group->planes.lock); 369 /* Start with all planes disabled. */
280 rcrtc->plane->enabled = true; 370 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
281 rcar_du_crtc_update_planes(crtc);
282 mutex_unlock(&rcrtc->group->planes.lock);
283
284 /* Setup planes. */
285 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
286 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
287
288 if (plane->crtc != crtc || !plane->enabled)
289 continue;
290
291 rcar_du_plane_setup(plane);
292 }
293 371
294 /* Select master sync mode. This enables display operation in master 372 /* Select master sync mode. This enables display operation in master
295 * sync mode (with the HSYNC and VSYNC signals configured as outputs and 373 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
@@ -302,6 +380,9 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
302 380
303 rcar_du_group_start_stop(rcrtc->group, true); 381 rcar_du_group_start_stop(rcrtc->group, true);
304 382
383 /* Turn vertical blanking interrupt reporting back on. */
384 drm_crtc_vblank_on(crtc);
385
305 rcrtc->started = true; 386 rcrtc->started = true;
306} 387}
307 388
@@ -312,10 +393,12 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
312 if (!rcrtc->started) 393 if (!rcrtc->started)
313 return; 394 return;
314 395
315 mutex_lock(&rcrtc->group->planes.lock); 396 /* Disable vertical blanking interrupt reporting. We first need to wait
316 rcrtc->plane->enabled = false; 397 * for page flip completion before stopping the CRTC as userspace
317 rcar_du_crtc_update_planes(crtc); 398 * expects page flips to eventually complete.
318 mutex_unlock(&rcrtc->group->planes.lock); 399 */
400 rcar_du_crtc_wait_page_flip(rcrtc);
401 drm_crtc_vblank_off(crtc);
319 402
320 /* Select switch sync mode. This stops display operation and configures 403 /* Select switch sync mode. This stops display operation and configures
321 * the HSYNC and VSYNC signals as inputs. 404 * the HSYNC and VSYNC signals as inputs.
@@ -335,196 +418,111 @@ void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
335 418
336void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) 419void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
337{ 420{
338 if (rcrtc->dpms != DRM_MODE_DPMS_ON) 421 unsigned int i;
422
423 if (!rcrtc->enabled)
339 return; 424 return;
340 425
341 rcar_du_crtc_get(rcrtc); 426 rcar_du_crtc_get(rcrtc);
342 rcar_du_crtc_start(rcrtc); 427 rcar_du_crtc_start(rcrtc);
343}
344
345static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
346{
347 struct drm_crtc *crtc = &rcrtc->crtc;
348
349 rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
350 rcar_du_plane_update_base(rcrtc->plane);
351}
352
353static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
354{
355 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
356 428
357 if (mode != DRM_MODE_DPMS_ON) 429 /* Commit the planes state. */
358 mode = DRM_MODE_DPMS_OFF; 430 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
431 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
359 432
360 if (rcrtc->dpms == mode) 433 if (plane->plane.state->crtc != &rcrtc->crtc)
361 return; 434 continue;
362 435
363 if (mode == DRM_MODE_DPMS_ON) { 436 rcar_du_plane_setup(plane);
364 rcar_du_crtc_get(rcrtc);
365 rcar_du_crtc_start(rcrtc);
366 } else {
367 rcar_du_crtc_stop(rcrtc);
368 rcar_du_crtc_put(rcrtc);
369 } 437 }
370 438
371 rcrtc->dpms = mode; 439 rcar_du_crtc_update_planes(rcrtc);
372} 440}
373 441
374static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc, 442/* -----------------------------------------------------------------------------
375 const struct drm_display_mode *mode, 443 * CRTC Functions
376 struct drm_display_mode *adjusted_mode) 444 */
377{
378 /* TODO Fixup modes */
379 return true;
380}
381 445
382static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc) 446static void rcar_du_crtc_enable(struct drm_crtc *crtc)
383{ 447{
384 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 448 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
385 449
386 /* We need to access the hardware during mode set, acquire a reference 450 if (rcrtc->enabled)
387 * to the CRTC. 451 return;
388 */
389 rcar_du_crtc_get(rcrtc);
390 452
391 /* Stop the CRTC and release the plane. Force the DPMS mode to off as a 453 rcar_du_crtc_get(rcrtc);
392 * result. 454 rcar_du_crtc_start(rcrtc);
393 */
394 rcar_du_crtc_stop(rcrtc);
395 rcar_du_plane_release(rcrtc->plane);
396 455
397 rcrtc->dpms = DRM_MODE_DPMS_OFF; 456 rcrtc->enabled = true;
398} 457}
399 458
400static int rcar_du_crtc_mode_set(struct drm_crtc *crtc, 459static void rcar_du_crtc_disable(struct drm_crtc *crtc)
401 struct drm_display_mode *mode,
402 struct drm_display_mode *adjusted_mode,
403 int x, int y,
404 struct drm_framebuffer *old_fb)
405{ 460{
406 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 461 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
407 struct rcar_du_device *rcdu = rcrtc->group->dev;
408 const struct rcar_du_format_info *format;
409 int ret;
410
411 format = rcar_du_format_info(crtc->primary->fb->pixel_format);
412 if (format == NULL) {
413 dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n",
414 crtc->primary->fb->pixel_format);
415 ret = -EINVAL;
416 goto error;
417 }
418 462
419 ret = rcar_du_plane_reserve(rcrtc->plane, format); 463 if (!rcrtc->enabled)
420 if (ret < 0) 464 return;
421 goto error;
422
423 rcrtc->plane->format = format;
424
425 rcrtc->plane->src_x = x;
426 rcrtc->plane->src_y = y;
427 rcrtc->plane->width = mode->hdisplay;
428 rcrtc->plane->height = mode->vdisplay;
429 465
430 rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb); 466 rcar_du_crtc_stop(rcrtc);
467 rcar_du_crtc_put(rcrtc);
431 468
469 rcrtc->enabled = false;
432 rcrtc->outputs = 0; 470 rcrtc->outputs = 0;
433
434 return 0;
435
436error:
437 /* There's no rollback/abort operation to clean up in case of error. We
438 * thus need to release the reference to the CRTC acquired in prepare()
439 * here.
440 */
441 rcar_du_crtc_put(rcrtc);
442 return ret;
443} 471}
444 472
445static void rcar_du_crtc_mode_commit(struct drm_crtc *crtc) 473static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
474 const struct drm_display_mode *mode,
475 struct drm_display_mode *adjusted_mode)
446{ 476{
447 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 477 /* TODO Fixup modes */
448 478 return true;
449 /* We're done, restart the CRTC and set the DPMS mode to on. The
450 * reference to the DU acquired at prepare() time will thus be released
451 * by the DPMS handler (possibly called by the disable() handler).
452 */
453 rcar_du_crtc_start(rcrtc);
454 rcrtc->dpms = DRM_MODE_DPMS_ON;
455} 479}
456 480
457static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 481static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
458 struct drm_framebuffer *old_fb)
459{ 482{
483 struct drm_pending_vblank_event *event = crtc->state->event;
460 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 484 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
485 struct drm_device *dev = rcrtc->crtc.dev;
486 unsigned long flags;
461 487
462 rcrtc->plane->src_x = x; 488 if (event) {
463 rcrtc->plane->src_y = y; 489 event->pipe = rcrtc->index;
464 490
465 rcar_du_crtc_update_base(rcrtc); 491 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
466 492
467 return 0; 493 spin_lock_irqsave(&dev->event_lock, flags);
494 rcrtc->event = event;
495 spin_unlock_irqrestore(&dev->event_lock, flags);
496 }
468} 497}
469 498
470static void rcar_du_crtc_disable(struct drm_crtc *crtc) 499static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
471{ 500{
472 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 501 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
473 502
474 rcar_du_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 503 rcar_du_crtc_update_planes(rcrtc);
475 rcar_du_plane_release(rcrtc->plane);
476} 504}
477 505
478static const struct drm_crtc_helper_funcs crtc_helper_funcs = { 506static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
479 .dpms = rcar_du_crtc_dpms,
480 .mode_fixup = rcar_du_crtc_mode_fixup, 507 .mode_fixup = rcar_du_crtc_mode_fixup,
481 .prepare = rcar_du_crtc_mode_prepare,
482 .commit = rcar_du_crtc_mode_commit,
483 .mode_set = rcar_du_crtc_mode_set,
484 .mode_set_base = rcar_du_crtc_mode_set_base,
485 .disable = rcar_du_crtc_disable, 508 .disable = rcar_du_crtc_disable,
509 .enable = rcar_du_crtc_enable,
510 .atomic_begin = rcar_du_crtc_atomic_begin,
511 .atomic_flush = rcar_du_crtc_atomic_flush,
486}; 512};
487 513
488void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, 514static const struct drm_crtc_funcs crtc_funcs = {
489 struct drm_file *file) 515 .reset = drm_atomic_helper_crtc_reset,
490{ 516 .destroy = drm_crtc_cleanup,
491 struct drm_pending_vblank_event *event; 517 .set_config = drm_atomic_helper_set_config,
492 struct drm_device *dev = rcrtc->crtc.dev; 518 .page_flip = drm_atomic_helper_page_flip,
493 unsigned long flags; 519 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
494 520 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
495 /* Destroy the pending vertical blanking event associated with the 521};
496 * pending page flip, if any, and disable vertical blanking interrupts.
497 */
498 spin_lock_irqsave(&dev->event_lock, flags);
499 event = rcrtc->event;
500 if (event && event->base.file_priv == file) {
501 rcrtc->event = NULL;
502 event->base.destroy(&event->base);
503 drm_vblank_put(dev, rcrtc->index);
504 }
505 spin_unlock_irqrestore(&dev->event_lock, flags);
506}
507
508static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
509{
510 struct drm_pending_vblank_event *event;
511 struct drm_device *dev = rcrtc->crtc.dev;
512 unsigned long flags;
513
514 spin_lock_irqsave(&dev->event_lock, flags);
515 event = rcrtc->event;
516 rcrtc->event = NULL;
517 spin_unlock_irqrestore(&dev->event_lock, flags);
518
519 if (event == NULL)
520 return;
521
522 spin_lock_irqsave(&dev->event_lock, flags);
523 drm_send_vblank_event(dev, rcrtc->index, event);
524 spin_unlock_irqrestore(&dev->event_lock, flags);
525 522
526 drm_vblank_put(dev, rcrtc->index); 523/* -----------------------------------------------------------------------------
527} 524 * Interrupt Handling
525 */
528 526
529static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) 527static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
530{ 528{
@@ -544,41 +542,9 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
544 return ret; 542 return ret;
545} 543}
546 544
547static int rcar_du_crtc_page_flip(struct drm_crtc *crtc, 545/* -----------------------------------------------------------------------------
548 struct drm_framebuffer *fb, 546 * Initialization
549 struct drm_pending_vblank_event *event, 547 */
550 uint32_t page_flip_flags)
551{
552 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
553 struct drm_device *dev = rcrtc->crtc.dev;
554 unsigned long flags;
555
556 spin_lock_irqsave(&dev->event_lock, flags);
557 if (rcrtc->event != NULL) {
558 spin_unlock_irqrestore(&dev->event_lock, flags);
559 return -EBUSY;
560 }
561 spin_unlock_irqrestore(&dev->event_lock, flags);
562
563 crtc->primary->fb = fb;
564 rcar_du_crtc_update_base(rcrtc);
565
566 if (event) {
567 event->pipe = rcrtc->index;
568 drm_vblank_get(dev, rcrtc->index);
569 spin_lock_irqsave(&dev->event_lock, flags);
570 rcrtc->event = event;
571 spin_unlock_irqrestore(&dev->event_lock, flags);
572 }
573
574 return 0;
575}
576
577static const struct drm_crtc_funcs crtc_funcs = {
578 .destroy = drm_crtc_cleanup,
579 .set_config = drm_crtc_helper_set_config,
580 .page_flip = rcar_du_crtc_page_flip,
581};
582 548
583int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) 549int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
584{ 550{
@@ -620,20 +586,24 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
620 return -EPROBE_DEFER; 586 return -EPROBE_DEFER;
621 } 587 }
622 588
589 init_waitqueue_head(&rcrtc->flip_wait);
590
623 rcrtc->group = rgrp; 591 rcrtc->group = rgrp;
624 rcrtc->mmio_offset = mmio_offsets[index]; 592 rcrtc->mmio_offset = mmio_offsets[index];
625 rcrtc->index = index; 593 rcrtc->index = index;
626 rcrtc->dpms = DRM_MODE_DPMS_OFF; 594 rcrtc->enabled = false;
627 rcrtc->plane = &rgrp->planes.planes[index % 2];
628
629 rcrtc->plane->crtc = crtc;
630 595
631 ret = drm_crtc_init(rcdu->ddev, crtc, &crtc_funcs); 596 ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
597 &rgrp->planes.planes[index % 2].plane,
598 NULL, &crtc_funcs);
632 if (ret < 0) 599 if (ret < 0)
633 return ret; 600 return ret;
634 601
635 drm_crtc_helper_add(crtc, &crtc_helper_funcs); 602 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
636 603
604 /* Start with vertical blanking interrupt reporting disabled. */
605 drm_crtc_vblank_off(crtc);
606
637 /* Register the interrupt handler. */ 607 /* Register the interrupt handler. */
638 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) { 608 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
639 irq = platform_get_irq(pdev, index); 609 irq = platform_get_irq(pdev, index);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index d2f89f7d2e5e..5d9aa9b33769 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,12 +15,12 @@
15#define __RCAR_DU_CRTC_H__ 15#define __RCAR_DU_CRTC_H__
16 16
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/wait.h>
18 19
19#include <drm/drmP.h> 20#include <drm/drmP.h>
20#include <drm/drm_crtc.h> 21#include <drm/drm_crtc.h>
21 22
22struct rcar_du_group; 23struct rcar_du_group;
23struct rcar_du_plane;
24 24
25struct rcar_du_crtc { 25struct rcar_du_crtc {
26 struct drm_crtc crtc; 26 struct drm_crtc crtc;
@@ -32,11 +32,12 @@ struct rcar_du_crtc {
32 bool started; 32 bool started;
33 33
34 struct drm_pending_vblank_event *event; 34 struct drm_pending_vblank_event *event;
35 wait_queue_head_t flip_wait;
36
35 unsigned int outputs; 37 unsigned int outputs;
36 int dpms; 38 bool enabled;
37 39
38 struct rcar_du_group *group; 40 struct rcar_du_group *group;
39 struct rcar_du_plane *plane;
40}; 41};
41 42
42#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) 43#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
@@ -59,6 +60,5 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
59 60
60void rcar_du_crtc_route_output(struct drm_crtc *crtc, 61void rcar_du_crtc_route_output(struct drm_crtc *crtc,
61 enum rcar_du_output output); 62 enum rcar_du_output output);
62void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
63 63
64#endif /* __RCAR_DU_CRTC_H__ */ 64#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index e0d74f821416..1d9e4f8568ae 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/pm.h> 20#include <linux/pm.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/wait.h>
22 23
23#include <drm/drmP.h> 24#include <drm/drmP.h>
24#include <drm/drm_crtc_helper.h> 25#include <drm/drm_crtc_helper.h>
@@ -163,6 +164,8 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
163 return -ENOMEM; 164 return -ENOMEM;
164 } 165 }
165 166
167 init_waitqueue_head(&rcdu->commit.wait);
168
166 rcdu->dev = &pdev->dev; 169 rcdu->dev = &pdev->dev;
167 rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data 170 rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
168 : (void *)platform_get_device_id(pdev)->driver_data; 171 : (void *)platform_get_device_id(pdev)->driver_data;
@@ -175,17 +178,19 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
175 if (IS_ERR(rcdu->mmio)) 178 if (IS_ERR(rcdu->mmio))
176 return PTR_ERR(rcdu->mmio); 179 return PTR_ERR(rcdu->mmio);
177 180
178 /* DRM/KMS objects */ 181 /* Initialize vertical blanking interrupts handling. Start with vblank
179 ret = rcar_du_modeset_init(rcdu); 182 * disabled for all CRTCs.
183 */
184 ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
180 if (ret < 0) { 185 if (ret < 0) {
181 dev_err(&pdev->dev, "failed to initialize DRM/KMS\n"); 186 dev_err(&pdev->dev, "failed to initialize vblank\n");
182 goto done; 187 goto done;
183 } 188 }
184 189
185 /* vblank handling */ 190 /* DRM/KMS objects */
186 ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1); 191 ret = rcar_du_modeset_init(rcdu);
187 if (ret < 0) { 192 if (ret < 0) {
188 dev_err(&pdev->dev, "failed to initialize vblank\n"); 193 dev_err(&pdev->dev, "failed to initialize DRM/KMS\n");
189 goto done; 194 goto done;
190 } 195 }
191 196
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index c5b9ea6a7eaa..c7c538dd2e68 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,6 +15,7 @@
15#define __RCAR_DU_DRV_H__ 15#define __RCAR_DU_DRV_H__
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/wait.h>
18 19
19#include "rcar_du_crtc.h" 20#include "rcar_du_crtc.h"
20#include "rcar_du_group.h" 21#include "rcar_du_group.h"
@@ -64,6 +65,10 @@ struct rcar_du_device_info {
64 unsigned int num_lvds; 65 unsigned int num_lvds;
65}; 66};
66 67
68#define RCAR_DU_MAX_CRTCS 3
69#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
70#define RCAR_DU_MAX_LVDS 2
71
67struct rcar_du_device { 72struct rcar_du_device {
68 struct device *dev; 73 struct device *dev;
69 const struct rcar_du_device_info *info; 74 const struct rcar_du_device_info *info;
@@ -73,13 +78,18 @@ struct rcar_du_device {
73 struct drm_device *ddev; 78 struct drm_device *ddev;
74 struct drm_fbdev_cma *fbdev; 79 struct drm_fbdev_cma *fbdev;
75 80
76 struct rcar_du_crtc crtcs[3]; 81 struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
77 unsigned int num_crtcs; 82 unsigned int num_crtcs;
78 83
79 struct rcar_du_group groups[2]; 84 struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
80 85
81 unsigned int dpad0_source; 86 unsigned int dpad0_source;
82 struct rcar_du_lvdsenc *lvds[2]; 87 struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
88
89 struct {
90 wait_queue_head_t wait;
91 u32 pending;
92 } commit;
83}; 93};
84 94
85static inline bool rcar_du_has(struct rcar_du_device *rcdu, 95static inline bool rcar_du_has(struct rcar_du_device *rcdu,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 279167f783f6..d0ae1e8009c6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -42,46 +42,40 @@ rcar_du_connector_best_encoder(struct drm_connector *connector)
42 * Encoder 42 * Encoder
43 */ 43 */
44 44
45static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode) 45static void rcar_du_encoder_disable(struct drm_encoder *encoder)
46{ 46{
47 struct rcar_du_encoder *renc = to_rcar_encoder(encoder); 47 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
48 48
49 if (mode != DRM_MODE_DPMS_ON) 49 if (renc->lvds)
50 mode = DRM_MODE_DPMS_OFF; 50 rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false);
51}
52
53static void rcar_du_encoder_enable(struct drm_encoder *encoder)
54{
55 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
51 56
52 if (renc->lvds) 57 if (renc->lvds)
53 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode); 58 rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true);
54} 59}
55 60
56static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder, 61static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
57 const struct drm_display_mode *mode, 62 struct drm_crtc_state *crtc_state,
58 struct drm_display_mode *adjusted_mode) 63 struct drm_connector_state *conn_state)
59{ 64{
60 struct rcar_du_encoder *renc = to_rcar_encoder(encoder); 65 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
66 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
67 const struct drm_display_mode *mode = &crtc_state->mode;
61 const struct drm_display_mode *panel_mode; 68 const struct drm_display_mode *panel_mode;
69 struct drm_connector *connector = conn_state->connector;
62 struct drm_device *dev = encoder->dev; 70 struct drm_device *dev = encoder->dev;
63 struct drm_connector *connector;
64 bool found = false;
65 71
66 /* DAC encoders have currently no restriction on the mode. */ 72 /* DAC encoders have currently no restriction on the mode. */
67 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC) 73 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
68 return true; 74 return 0;
69
70 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
71 if (connector->encoder == encoder) {
72 found = true;
73 break;
74 }
75 }
76
77 if (!found) {
78 dev_dbg(dev->dev, "mode_fixup: no connector found\n");
79 return false;
80 }
81 75
82 if (list_empty(&connector->modes)) { 76 if (list_empty(&connector->modes)) {
83 dev_dbg(dev->dev, "mode_fixup: empty modes list\n"); 77 dev_dbg(dev->dev, "encoder: empty modes list\n");
84 return false; 78 return -EINVAL;
85 } 79 }
86 80
87 panel_mode = list_first_entry(&connector->modes, 81 panel_mode = list_first_entry(&connector->modes,
@@ -90,7 +84,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
90 /* We're not allowed to modify the resolution. */ 84 /* We're not allowed to modify the resolution. */
91 if (mode->hdisplay != panel_mode->hdisplay || 85 if (mode->hdisplay != panel_mode->hdisplay ||
92 mode->vdisplay != panel_mode->vdisplay) 86 mode->vdisplay != panel_mode->vdisplay)
93 return false; 87 return -EINVAL;
94 88
95 /* The flat panel mode is fixed, just copy it to the adjusted mode. */ 89 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
96 drm_mode_copy(adjusted_mode, panel_mode); 90 drm_mode_copy(adjusted_mode, panel_mode);
@@ -102,25 +96,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
102 adjusted_mode->clock = clamp(adjusted_mode->clock, 96 adjusted_mode->clock = clamp(adjusted_mode->clock,
103 30000, 150000); 97 30000, 150000);
104 98
105 return true; 99 return 0;
106}
107
108static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
109{
110 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
111
112 if (renc->lvds)
113 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
114 DRM_MODE_DPMS_OFF);
115}
116
117static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
118{
119 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
120
121 if (renc->lvds)
122 rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
123 DRM_MODE_DPMS_ON);
124} 100}
125 101
126static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, 102static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
@@ -133,11 +109,10 @@ static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
133} 109}
134 110
135static const struct drm_encoder_helper_funcs encoder_helper_funcs = { 111static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
136 .dpms = rcar_du_encoder_dpms,
137 .mode_fixup = rcar_du_encoder_mode_fixup,
138 .prepare = rcar_du_encoder_mode_prepare,
139 .commit = rcar_du_encoder_mode_commit,
140 .mode_set = rcar_du_encoder_mode_set, 112 .mode_set = rcar_du_encoder_mode_set,
113 .disable = rcar_du_encoder_disable,
114 .enable = rcar_du_encoder_enable,
115 .atomic_check = rcar_du_encoder_atomic_check,
141}; 116};
142 117
143static const struct drm_encoder_funcs encoder_funcs = { 118static const struct drm_encoder_funcs encoder_funcs = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 0c38cdcda4ca..ed36433fbe84 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -14,6 +14,8 @@
14#ifndef __RCAR_DU_GROUP_H__ 14#ifndef __RCAR_DU_GROUP_H__
15#define __RCAR_DU_GROUP_H__ 15#define __RCAR_DU_GROUP_H__
16 16
17#include <linux/mutex.h>
18
17#include "rcar_du_plane.h" 19#include "rcar_du_plane.h"
18 20
19struct rcar_du_device; 21struct rcar_du_device;
@@ -25,6 +27,7 @@ struct rcar_du_device;
25 * @index: group index 27 * @index: group index
26 * @use_count: number of users of the group (rcar_du_group_(get|put)) 28 * @use_count: number of users of the group (rcar_du_group_(get|put))
27 * @used_crtcs: number of CRTCs currently in use 29 * @used_crtcs: number of CRTCs currently in use
30 * @lock: protects the DPTSR register
28 * @planes: planes handled by the group 31 * @planes: planes handled by the group
29 */ 32 */
30struct rcar_du_group { 33struct rcar_du_group {
@@ -35,6 +38,8 @@ struct rcar_du_group {
35 unsigned int use_count; 38 unsigned int use_count;
36 unsigned int used_crtcs; 39 unsigned int used_crtcs;
37 40
41 struct mutex lock;
42
38 struct rcar_du_planes planes; 43 struct rcar_du_planes planes;
39}; 44};
40 45
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index ca94b029ac80..96f2eb43713c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
15#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
17#include <drm/drm_encoder_slave.h> 18#include <drm/drm_encoder_slave.h>
@@ -74,10 +75,13 @@ rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
74} 75}
75 76
76static const struct drm_connector_funcs connector_funcs = { 77static const struct drm_connector_funcs connector_funcs = {
77 .dpms = drm_helper_connector_dpms, 78 .dpms = drm_atomic_helper_connector_dpms,
79 .reset = drm_atomic_helper_connector_reset,
78 .detect = rcar_du_hdmi_connector_detect, 80 .detect = rcar_du_hdmi_connector_detect,
79 .fill_modes = drm_helper_probe_single_connector_modes, 81 .fill_modes = drm_helper_probe_single_connector_modes,
80 .destroy = rcar_du_hdmi_connector_destroy, 82 .destroy = rcar_du_hdmi_connector_destroy,
83 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
84 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
81}; 85};
82 86
83int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, 87int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
@@ -108,7 +112,7 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
108 if (ret < 0) 112 if (ret < 0)
109 return ret; 113 return ret;
110 114
111 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 115 connector->dpms = DRM_MODE_DPMS_OFF;
112 drm_object_property_set_value(&connector->base, 116 drm_object_property_set_value(&connector->base,
113 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 117 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
114 118
@@ -116,7 +120,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
116 if (ret < 0) 120 if (ret < 0)
117 return ret; 121 return ret;
118 122
119 connector->encoder = encoder;
120 rcon->encoder = renc; 123 rcon->encoder = renc;
121 124
122 return 0; 125 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 221f0a17fd6a..81da8419282b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -26,41 +26,50 @@
26struct rcar_du_hdmienc { 26struct rcar_du_hdmienc {
27 struct rcar_du_encoder *renc; 27 struct rcar_du_encoder *renc;
28 struct device *dev; 28 struct device *dev;
29 int dpms; 29 bool enabled;
30}; 30};
31 31
32#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi) 32#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
33#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs) 33#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
34 34
35static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode) 35static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
36{ 36{
37 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 37 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
38 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 38 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
39 39
40 if (mode != DRM_MODE_DPMS_ON) 40 if (sfuncs->dpms)
41 mode = DRM_MODE_DPMS_OFF; 41 sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
42 42
43 if (hdmienc->dpms == mode) 43 if (hdmienc->renc->lvds)
44 return; 44 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
45 false);
45 46
46 if (mode == DRM_MODE_DPMS_ON && hdmienc->renc->lvds) 47 hdmienc->enabled = false;
47 rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode); 48}
48 49
49 if (sfuncs->dpms) 50static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
50 sfuncs->dpms(encoder, mode); 51{
52 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
53 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
54
55 if (hdmienc->renc->lvds)
56 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
57 true);
51 58
52 if (mode != DRM_MODE_DPMS_ON && hdmienc->renc->lvds) 59 if (sfuncs->dpms)
53 rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode); 60 sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
54 61
55 hdmienc->dpms = mode; 62 hdmienc->enabled = true;
56} 63}
57 64
58static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder, 65static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
59 const struct drm_display_mode *mode, 66 struct drm_crtc_state *crtc_state,
60 struct drm_display_mode *adjusted_mode) 67 struct drm_connector_state *conn_state)
61{ 68{
62 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 69 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
63 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); 70 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
71 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
72 const struct drm_display_mode *mode = &crtc_state->mode;
64 73
65 /* The internal LVDS encoder has a clock frequency operating range of 74 /* The internal LVDS encoder has a clock frequency operating range of
66 * 30MHz to 150MHz. Clamp the clock accordingly. 75 * 30MHz to 150MHz. Clamp the clock accordingly.
@@ -70,19 +79,9 @@ static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
70 30000, 150000); 79 30000, 150000);
71 80
72 if (sfuncs->mode_fixup == NULL) 81 if (sfuncs->mode_fixup == NULL)
73 return true; 82 return 0;
74
75 return sfuncs->mode_fixup(encoder, mode, adjusted_mode);
76}
77 83
78static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder) 84 return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
79{
80 rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
81}
82
83static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder)
84{
85 rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON);
86} 85}
87 86
88static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder, 87static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
@@ -99,18 +98,18 @@ static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
99} 98}
100 99
101static const struct drm_encoder_helper_funcs encoder_helper_funcs = { 100static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
102 .dpms = rcar_du_hdmienc_dpms,
103 .mode_fixup = rcar_du_hdmienc_mode_fixup,
104 .prepare = rcar_du_hdmienc_mode_prepare,
105 .commit = rcar_du_hdmienc_mode_commit,
106 .mode_set = rcar_du_hdmienc_mode_set, 101 .mode_set = rcar_du_hdmienc_mode_set,
102 .disable = rcar_du_hdmienc_disable,
103 .enable = rcar_du_hdmienc_enable,
104 .atomic_check = rcar_du_hdmienc_atomic_check,
107}; 105};
108 106
109static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder) 107static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
110{ 108{
111 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 109 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
112 110
113 rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF); 111 if (hdmienc->enabled)
112 rcar_du_hdmienc_disable(encoder);
114 113
115 drm_encoder_cleanup(encoder); 114 drm_encoder_cleanup(encoder);
116 put_device(hdmienc->dev); 115 put_device(hdmienc->dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index cc9136e8ee9c..fb052bca574f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -12,12 +12,15 @@
12 */ 12 */
13 13
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
15#include <drm/drm_crtc.h> 17#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
17#include <drm/drm_fb_cma_helper.h> 19#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h> 20#include <drm/drm_gem_cma_helper.h>
19 21
20#include <linux/of_graph.h> 22#include <linux/of_graph.h>
23#include <linux/wait.h>
21 24
22#include "rcar_du_crtc.h" 25#include "rcar_du_crtc.h"
23#include "rcar_du_drv.h" 26#include "rcar_du_drv.h"
@@ -185,9 +188,309 @@ static void rcar_du_output_poll_changed(struct drm_device *dev)
185 drm_fbdev_cma_hotplug_event(rcdu->fbdev); 188 drm_fbdev_cma_hotplug_event(rcdu->fbdev);
186} 189}
187 190
191/* -----------------------------------------------------------------------------
192 * Atomic Check and Update
193 */
194
195/*
196 * Atomic hardware plane allocator
197 *
198 * The hardware plane allocator is solely based on the atomic plane states
199 * without keeping any external state to avoid races between .atomic_check()
200 * and .atomic_commit().
201 *
202 * The core idea is to avoid using a free planes bitmask that would need to be
203 * shared between check and commit handlers with a collective knowledge based on
204 * the allocated hardware plane(s) for each KMS plane. The allocator then loops
205 * over all plane states to compute the free planes bitmask, allocates hardware
206 * planes based on that bitmask, and stores the result back in the plane states.
207 *
208 * For this to work we need to access the current state of planes not touched by
209 * the atomic update. To ensure that it won't be modified, we need to lock all
210 * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
211 * updates from .atomic_check() up to completion (when swapping the states if
212 * the check step has succeeded) or rollback (when freeing the states if the
213 * check step has failed).
214 *
215 * Allocation is performed in the .atomic_check() handler and applied
216 * automatically when the core swaps the old and new states.
217 */
218
219static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
220 struct rcar_du_plane_state *state)
221{
222 const struct rcar_du_format_info *cur_format;
223
224 cur_format = to_rcar_du_plane_state(plane->plane.state)->format;
225
226 /* Lowering the number of planes doesn't strictly require reallocation
227 * as the extra hardware plane will be freed when committing, but doing
228 * so could lead to more fragmentation.
229 */
230 return !cur_format || cur_format->planes != state->format->planes;
231}
232
233static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
234{
235 unsigned int mask;
236
237 if (state->hwindex == -1)
238 return 0;
239
240 mask = 1 << state->hwindex;
241 if (state->format->planes == 2)
242 mask |= 1 << ((state->hwindex + 1) % 8);
243
244 return mask;
245}
246
247static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
248{
249 unsigned int i;
250
251 for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
252 if (!(free & (1 << i)))
253 continue;
254
255 if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
256 break;
257 }
258
259 return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
260}
261
262static int rcar_du_atomic_check(struct drm_device *dev,
263 struct drm_atomic_state *state)
264{
265 struct rcar_du_device *rcdu = dev->dev_private;
266 unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
267 unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
268 bool needs_realloc = false;
269 unsigned int groups = 0;
270 unsigned int i;
271 int ret;
272
273 ret = drm_atomic_helper_check(dev, state);
274 if (ret < 0)
275 return ret;
276
277 /* Check if hardware planes need to be reallocated. */
278 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
279 struct rcar_du_plane_state *plane_state;
280 struct rcar_du_plane *plane;
281 unsigned int index;
282
283 if (!state->planes[i])
284 continue;
285
286 plane = to_rcar_plane(state->planes[i]);
287 plane_state = to_rcar_du_plane_state(state->plane_states[i]);
288
289 /* If the plane is being disabled we don't need to go through
290 * the full reallocation procedure. Just mark the hardware
291 * plane(s) as freed.
292 */
293 if (!plane_state->format) {
294 index = plane - plane->group->planes.planes;
295 group_freed_planes[plane->group->index] |= 1 << index;
296 plane_state->hwindex = -1;
297 continue;
298 }
299
300 /* If the plane needs to be reallocated mark it as such, and
301 * mark the hardware plane(s) as free.
302 */
303 if (rcar_du_plane_needs_realloc(plane, plane_state)) {
304 groups |= 1 << plane->group->index;
305 needs_realloc = true;
306
307 index = plane - plane->group->planes.planes;
308 group_freed_planes[plane->group->index] |= 1 << index;
309 plane_state->hwindex = -1;
310 }
311 }
312
313 if (!needs_realloc)
314 return 0;
315
316 /* Grab all plane states for the groups that need reallocation to ensure
317 * locking and avoid racy updates. This serializes the update operation,
318 * but there's not much we can do about it as that's the hardware
319 * design.
320 *
321 * Compute the used planes mask for each group at the same time to avoid
322 * looping over the planes separately later.
323 */
324 while (groups) {
325 unsigned int index = ffs(groups) - 1;
326 struct rcar_du_group *group = &rcdu->groups[index];
327 unsigned int used_planes = 0;
328
329 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
330 struct rcar_du_plane *plane = &group->planes.planes[i];
331 struct rcar_du_plane_state *plane_state;
332 struct drm_plane_state *s;
333
334 s = drm_atomic_get_plane_state(state, &plane->plane);
335 if (IS_ERR(s))
336 return PTR_ERR(s);
337
338 /* If the plane has been freed in the above loop its
339 * hardware planes must not be added to the used planes
340 * bitmask. However, the current state doesn't reflect
341 * the free state yet, as we've modified the new state
342 * above. Use the local freed planes list to check for
343 * that condition instead.
344 */
345 if (group_freed_planes[index] & (1 << i))
346 continue;
347
348 plane_state = to_rcar_du_plane_state(plane->plane.state);
349 used_planes |= rcar_du_plane_hwmask(plane_state);
350 }
351
352 group_free_planes[index] = 0xff & ~used_planes;
353 groups &= ~(1 << index);
354 }
355
356 /* Reallocate hardware planes for each plane that needs it. */
357 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
358 struct rcar_du_plane_state *plane_state;
359 struct rcar_du_plane *plane;
360 int idx;
361
362 if (!state->planes[i])
363 continue;
364
365 plane = to_rcar_plane(state->planes[i]);
366 plane_state = to_rcar_du_plane_state(state->plane_states[i]);
367
368 /* Skip planes that are being disabled or don't need to be
369 * reallocated.
370 */
371 if (!plane_state->format ||
372 !rcar_du_plane_needs_realloc(plane, plane_state))
373 continue;
374
375 idx = rcar_du_plane_hwalloc(plane_state->format->planes,
376 group_free_planes[plane->group->index]);
377 if (idx < 0) {
378 dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
379 __func__);
380 return idx;
381 }
382
383 plane_state->hwindex = idx;
384
385 group_free_planes[plane->group->index] &=
386 ~rcar_du_plane_hwmask(plane_state);
387 }
388
389 return 0;
390}
391
392struct rcar_du_commit {
393 struct work_struct work;
394 struct drm_device *dev;
395 struct drm_atomic_state *state;
396 u32 crtcs;
397};
398
399static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
400{
401 struct drm_device *dev = commit->dev;
402 struct rcar_du_device *rcdu = dev->dev_private;
403 struct drm_atomic_state *old_state = commit->state;
404
405 /* Apply the atomic update. */
406 drm_atomic_helper_commit_modeset_disables(dev, old_state);
407 drm_atomic_helper_commit_modeset_enables(dev, old_state);
408 drm_atomic_helper_commit_planes(dev, old_state);
409
410 drm_atomic_helper_wait_for_vblanks(dev, old_state);
411
412 drm_atomic_helper_cleanup_planes(dev, old_state);
413
414 drm_atomic_state_free(old_state);
415
416 /* Complete the commit, wake up any waiter. */
417 spin_lock(&rcdu->commit.wait.lock);
418 rcdu->commit.pending &= ~commit->crtcs;
419 wake_up_all_locked(&rcdu->commit.wait);
420 spin_unlock(&rcdu->commit.wait.lock);
421
422 kfree(commit);
423}
424
425static void rcar_du_atomic_work(struct work_struct *work)
426{
427 struct rcar_du_commit *commit =
428 container_of(work, struct rcar_du_commit, work);
429
430 rcar_du_atomic_complete(commit);
431}
432
433static int rcar_du_atomic_commit(struct drm_device *dev,
434 struct drm_atomic_state *state, bool async)
435{
436 struct rcar_du_device *rcdu = dev->dev_private;
437 struct rcar_du_commit *commit;
438 unsigned int i;
439 int ret;
440
441 ret = drm_atomic_helper_prepare_planes(dev, state);
442 if (ret)
443 return ret;
444
445 /* Allocate the commit object. */
446 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
447 if (commit == NULL)
448 return -ENOMEM;
449
450 INIT_WORK(&commit->work, rcar_du_atomic_work);
451 commit->dev = dev;
452 commit->state = state;
453
454 /* Wait until all affected CRTCs have completed previous commits and
455 * mark them as pending.
456 */
457 for (i = 0; i < dev->mode_config.num_crtc; ++i) {
458 if (state->crtcs[i])
459 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
460 }
461
462 spin_lock(&rcdu->commit.wait.lock);
463 ret = wait_event_interruptible_locked(rcdu->commit.wait,
464 !(rcdu->commit.pending & commit->crtcs));
465 if (ret == 0)
466 rcdu->commit.pending |= commit->crtcs;
467 spin_unlock(&rcdu->commit.wait.lock);
468
469 if (ret) {
470 kfree(commit);
471 return ret;
472 }
473
474 /* Swap the state, this is the point of no return. */
475 drm_atomic_helper_swap_state(dev, state);
476
477 if (async)
478 schedule_work(&commit->work);
479 else
480 rcar_du_atomic_complete(commit);
481
482 return 0;
483}
484
485/* -----------------------------------------------------------------------------
486 * Initialization
487 */
488
188static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { 489static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
189 .fb_create = rcar_du_fb_create, 490 .fb_create = rcar_du_fb_create,
190 .output_poll_changed = rcar_du_output_poll_changed, 491 .output_poll_changed = rcar_du_output_poll_changed,
492 .atomic_check = rcar_du_atomic_check,
493 .atomic_commit = rcar_du_atomic_commit,
191}; 494};
192 495
193static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, 496static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
@@ -392,6 +695,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
392 for (i = 0; i < num_groups; ++i) { 695 for (i = 0; i < num_groups; ++i) {
393 struct rcar_du_group *rgrp = &rcdu->groups[i]; 696 struct rcar_du_group *rgrp = &rcdu->groups[i];
394 697
698 mutex_init(&rgrp->lock);
699
395 rgrp->dev = rcdu; 700 rgrp->dev = rcdu;
396 rgrp->mmio_offset = mmio_offsets[i]; 701 rgrp->mmio_offset = mmio_offsets[i];
397 rgrp->index = i; 702 rgrp->index = i;
@@ -439,27 +744,21 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
439 encoder->possible_clones = (1 << num_encoders) - 1; 744 encoder->possible_clones = (1 << num_encoders) - 1;
440 } 745 }
441 746
442 /* Now that the CRTCs have been initialized register the planes. */ 747 drm_mode_config_reset(dev);
443 for (i = 0; i < num_groups; ++i) {
444 ret = rcar_du_planes_register(&rcdu->groups[i]);
445 if (ret < 0)
446 return ret;
447 }
448 748
449 drm_kms_helper_poll_init(dev); 749 drm_kms_helper_poll_init(dev);
450 750
451 drm_helper_disable_unused_functions(dev); 751 if (dev->mode_config.num_connector) {
452 752 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
453 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc, 753 dev->mode_config.num_connector);
454 dev->mode_config.num_connector); 754 if (IS_ERR(fbdev))
455 if (IS_ERR(fbdev)) 755 return PTR_ERR(fbdev);
456 return PTR_ERR(fbdev);
457 756
458#ifndef CONFIG_FRAMEBUFFER_CONSOLE 757 rcdu->fbdev = fbdev;
459 drm_fbdev_cma_restore_mode(fbdev); 758 } else {
460#endif 759 dev_info(rcdu->dev,
461 760 "no connector found, disabling fbdev emulation\n");
462 rcdu->fbdev = fbdev; 761 }
463 762
464 return 0; 763 return 0;
465} 764}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 6d9811c052c4..0c43032fc693 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
15#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
17 18
@@ -74,10 +75,13 @@ rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
74} 75}
75 76
76static const struct drm_connector_funcs connector_funcs = { 77static const struct drm_connector_funcs connector_funcs = {
77 .dpms = drm_helper_connector_dpms, 78 .dpms = drm_atomic_helper_connector_dpms,
79 .reset = drm_atomic_helper_connector_reset,
78 .detect = rcar_du_lvds_connector_detect, 80 .detect = rcar_du_lvds_connector_detect,
79 .fill_modes = drm_helper_probe_single_connector_modes, 81 .fill_modes = drm_helper_probe_single_connector_modes,
80 .destroy = rcar_du_lvds_connector_destroy, 82 .destroy = rcar_du_lvds_connector_destroy,
83 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
84 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
81}; 85};
82 86
83int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 87int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
@@ -117,7 +121,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
117 if (ret < 0) 121 if (ret < 0)
118 return ret; 122 return ret;
119 123
120 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 124 connector->dpms = DRM_MODE_DPMS_OFF;
121 drm_object_property_set_value(&connector->base, 125 drm_object_property_set_value(&connector->base,
122 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 126 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
123 127
@@ -125,7 +129,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
125 if (ret < 0) 129 if (ret < 0)
126 return ret; 130 return ret;
127 131
128 connector->encoder = encoder;
129 lvdscon->connector.encoder = renc; 132 lvdscon->connector.encoder = renc;
130 133
131 return 0; 134 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index 7cfb48ce1791..85043c5bad03 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -28,7 +28,7 @@ struct rcar_du_lvdsenc {
28 unsigned int index; 28 unsigned int index;
29 void __iomem *mmio; 29 void __iomem *mmio;
30 struct clk *clock; 30 struct clk *clock;
31 int dpms; 31 bool enabled;
32 32
33 enum rcar_lvds_input input; 33 enum rcar_lvds_input input;
34}; 34};
@@ -48,7 +48,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
48 u32 pllcr; 48 u32 pllcr;
49 int ret; 49 int ret;
50 50
51 if (lvds->dpms == DRM_MODE_DPMS_ON) 51 if (lvds->enabled)
52 return 0; 52 return 0;
53 53
54 ret = clk_prepare_enable(lvds->clock); 54 ret = clk_prepare_enable(lvds->clock);
@@ -110,13 +110,13 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
110 lvdcr0 |= LVDCR0_LVRES; 110 lvdcr0 |= LVDCR0_LVRES;
111 rcar_lvds_write(lvds, LVDCR0, lvdcr0); 111 rcar_lvds_write(lvds, LVDCR0, lvdcr0);
112 112
113 lvds->dpms = DRM_MODE_DPMS_ON; 113 lvds->enabled = true;
114 return 0; 114 return 0;
115} 115}
116 116
117static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds) 117static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
118{ 118{
119 if (lvds->dpms == DRM_MODE_DPMS_OFF) 119 if (!lvds->enabled)
120 return; 120 return;
121 121
122 rcar_lvds_write(lvds, LVDCR0, 0); 122 rcar_lvds_write(lvds, LVDCR0, 0);
@@ -124,13 +124,13 @@ static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
124 124
125 clk_disable_unprepare(lvds->clock); 125 clk_disable_unprepare(lvds->clock);
126 126
127 lvds->dpms = DRM_MODE_DPMS_OFF; 127 lvds->enabled = false;
128} 128}
129 129
130int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, 130int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc,
131 struct drm_crtc *crtc, int mode) 131 bool enable)
132{ 132{
133 if (mode == DRM_MODE_DPMS_OFF) { 133 if (!enable) {
134 rcar_du_lvdsenc_stop(lvds); 134 rcar_du_lvdsenc_stop(lvds);
135 return 0; 135 return 0;
136 } else if (crtc) { 136 } else if (crtc) {
@@ -179,7 +179,7 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
179 lvds->dev = rcdu; 179 lvds->dev = rcdu;
180 lvds->index = i; 180 lvds->index = i;
181 lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0; 181 lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
182 lvds->dpms = DRM_MODE_DPMS_OFF; 182 lvds->enabled = false;
183 183
184 ret = rcar_du_lvdsenc_get_resources(lvds, pdev); 184 ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
185 if (ret < 0) 185 if (ret < 0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index f65aabda0796..9a6001c07303 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -28,15 +28,15 @@ enum rcar_lvds_input {
28 28
29#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS) 29#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
30int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu); 30int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
31int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, 31int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
32 struct drm_crtc *crtc, int mode); 32 struct drm_crtc *crtc, bool enable);
33#else 33#else
34static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu) 34static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
35{ 35{
36 return 0; 36 return 0;
37} 37}
38static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, 38static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
39 struct drm_crtc *crtc, int mode) 39 struct drm_crtc *crtc, bool enable)
40{ 40{
41 return 0; 41 return 0;
42} 42}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 50f2f2b20d39..35a2f04ab799 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -12,10 +12,12 @@
12 */ 12 */
13 13
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
15#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
17#include <drm/drm_fb_cma_helper.h> 18#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h> 19#include <drm/drm_gem_cma_helper.h>
20#include <drm/drm_plane_helper.h>
19 21
20#include "rcar_du_drv.h" 22#include "rcar_du_drv.h"
21#include "rcar_du_kms.h" 23#include "rcar_du_kms.h"
@@ -26,16 +28,6 @@
26#define RCAR_DU_COLORKEY_SOURCE (1 << 24) 28#define RCAR_DU_COLORKEY_SOURCE (1 << 24)
27#define RCAR_DU_COLORKEY_MASK (1 << 24) 29#define RCAR_DU_COLORKEY_MASK (1 << 24)
28 30
29struct rcar_du_kms_plane {
30 struct drm_plane plane;
31 struct rcar_du_plane *hwplane;
32};
33
34static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
35{
36 return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
37}
38
39static u32 rcar_du_plane_read(struct rcar_du_group *rgrp, 31static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
40 unsigned int index, u32 reg) 32 unsigned int index, u32 reg)
41{ 33{
@@ -50,74 +42,31 @@ static void rcar_du_plane_write(struct rcar_du_group *rgrp,
50 data); 42 data);
51} 43}
52 44
53int rcar_du_plane_reserve(struct rcar_du_plane *plane, 45static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane)
54 const struct rcar_du_format_info *format)
55{
56 struct rcar_du_group *rgrp = plane->group;
57 unsigned int i;
58 int ret = -EBUSY;
59
60 mutex_lock(&rgrp->planes.lock);
61
62 for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
63 if (!(rgrp->planes.free & (1 << i)))
64 continue;
65
66 if (format->planes == 1 ||
67 rgrp->planes.free & (1 << ((i + 1) % 8)))
68 break;
69 }
70
71 if (i == ARRAY_SIZE(rgrp->planes.planes))
72 goto done;
73
74 rgrp->planes.free &= ~(1 << i);
75 if (format->planes == 2)
76 rgrp->planes.free &= ~(1 << ((i + 1) % 8));
77
78 plane->hwindex = i;
79
80 ret = 0;
81
82done:
83 mutex_unlock(&rgrp->planes.lock);
84 return ret;
85}
86
87void rcar_du_plane_release(struct rcar_du_plane *plane)
88{
89 struct rcar_du_group *rgrp = plane->group;
90
91 if (plane->hwindex == -1)
92 return;
93
94 mutex_lock(&rgrp->planes.lock);
95 rgrp->planes.free |= 1 << plane->hwindex;
96 if (plane->format->planes == 2)
97 rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
98 mutex_unlock(&rgrp->planes.lock);
99
100 plane->hwindex = -1;
101}
102
103void rcar_du_plane_update_base(struct rcar_du_plane *plane)
104{ 46{
47 struct rcar_du_plane_state *state =
48 to_rcar_du_plane_state(plane->plane.state);
49 struct drm_framebuffer *fb = plane->plane.state->fb;
105 struct rcar_du_group *rgrp = plane->group; 50 struct rcar_du_group *rgrp = plane->group;
106 unsigned int index = plane->hwindex; 51 unsigned int src_x = state->state.src_x >> 16;
52 unsigned int src_y = state->state.src_y >> 16;
53 unsigned int index = state->hwindex;
54 struct drm_gem_cma_object *gem;
107 bool interlaced; 55 bool interlaced;
108 u32 mwr; 56 u32 mwr;
109 57
110 interlaced = plane->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; 58 interlaced = state->state.crtc->state->adjusted_mode.flags
59 & DRM_MODE_FLAG_INTERLACE;
111 60
112 /* Memory pitch (expressed in pixels). Must be doubled for interlaced 61 /* Memory pitch (expressed in pixels). Must be doubled for interlaced
113 * operation with 32bpp formats. 62 * operation with 32bpp formats.
114 */ 63 */
115 if (plane->format->planes == 2) 64 if (state->format->planes == 2)
116 mwr = plane->pitch; 65 mwr = fb->pitches[0];
117 else 66 else
118 mwr = plane->pitch * 8 / plane->format->bpp; 67 mwr = fb->pitches[0] * 8 / state->format->bpp;
119 68
120 if (interlaced && plane->format->bpp == 32) 69 if (interlaced && state->format->bpp == 32)
121 mwr *= 2; 70 mwr *= 2;
122 71
123 rcar_du_plane_write(rgrp, index, PnMWR, mwr); 72 rcar_du_plane_write(rgrp, index, PnMWR, mwr);
@@ -134,42 +83,33 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
134 * require a halved Y position value, in both progressive and interlaced 83 * require a halved Y position value, in both progressive and interlaced
135 * modes. 84 * modes.
136 */ 85 */
137 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x); 86 rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
138 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y * 87 rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
139 (!interlaced && plane->format->bpp == 32 ? 2 : 1)); 88 (!interlaced && state->format->bpp == 32 ? 2 : 1));
140 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
141 89
142 if (plane->format->planes == 2) { 90 gem = drm_fb_cma_get_gem_obj(fb, 0);
143 index = (index + 1) % 8; 91 rcar_du_plane_write(rgrp, index, PnDSA0R, gem->paddr + fb->offsets[0]);
144
145 rcar_du_plane_write(rgrp, index, PnMWR, plane->pitch);
146 92
147 rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x); 93 if (state->format->planes == 2) {
148 rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y * 94 index = (index + 1) % 8;
149 (plane->format->bpp == 16 ? 2 : 1) / 2);
150 rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
151 }
152}
153 95
154void rcar_du_plane_compute_base(struct rcar_du_plane *plane, 96 rcar_du_plane_write(rgrp, index, PnMWR, fb->pitches[0]);
155 struct drm_framebuffer *fb)
156{
157 struct drm_gem_cma_object *gem;
158 97
159 plane->pitch = fb->pitches[0]; 98 rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
99 rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
100 (state->format->bpp == 16 ? 2 : 1) / 2);
160 101
161 gem = drm_fb_cma_get_gem_obj(fb, 0);
162 plane->dma[0] = gem->paddr + fb->offsets[0];
163
164 if (plane->format->planes == 2) {
165 gem = drm_fb_cma_get_gem_obj(fb, 1); 102 gem = drm_fb_cma_get_gem_obj(fb, 1);
166 plane->dma[1] = gem->paddr + fb->offsets[1]; 103 rcar_du_plane_write(rgrp, index, PnDSA0R,
104 gem->paddr + fb->offsets[1]);
167 } 105 }
168} 106}
169 107
170static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, 108static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
171 unsigned int index) 109 unsigned int index)
172{ 110{
111 struct rcar_du_plane_state *state =
112 to_rcar_du_plane_state(plane->plane.state);
173 struct rcar_du_group *rgrp = plane->group; 113 struct rcar_du_group *rgrp = plane->group;
174 u32 colorkey; 114 u32 colorkey;
175 u32 pnmr; 115 u32 pnmr;
@@ -183,47 +123,47 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
183 * For XRGB, set the alpha value to the plane-wide alpha value and 123 * For XRGB, set the alpha value to the plane-wide alpha value and
184 * enable alpha-blending regardless of the X bit value. 124 * enable alpha-blending regardless of the X bit value.
185 */ 125 */
186 if (plane->format->fourcc != DRM_FORMAT_XRGB1555) 126 if (state->format->fourcc != DRM_FORMAT_XRGB1555)
187 rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0); 127 rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
188 else 128 else
189 rcar_du_plane_write(rgrp, index, PnALPHAR, 129 rcar_du_plane_write(rgrp, index, PnALPHAR,
190 PnALPHAR_ABIT_X | plane->alpha); 130 PnALPHAR_ABIT_X | state->alpha);
191 131
192 pnmr = PnMR_BM_MD | plane->format->pnmr; 132 pnmr = PnMR_BM_MD | state->format->pnmr;
193 133
194 /* Disable color keying when requested. YUV formats have the 134 /* Disable color keying when requested. YUV formats have the
195 * PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying 135 * PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
196 * automatically. 136 * automatically.
197 */ 137 */
198 if ((plane->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE) 138 if ((state->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
199 pnmr |= PnMR_SPIM_TP_OFF; 139 pnmr |= PnMR_SPIM_TP_OFF;
200 140
201 /* For packed YUV formats we need to select the U/V order. */ 141 /* For packed YUV formats we need to select the U/V order. */
202 if (plane->format->fourcc == DRM_FORMAT_YUYV) 142 if (state->format->fourcc == DRM_FORMAT_YUYV)
203 pnmr |= PnMR_YCDF_YUYV; 143 pnmr |= PnMR_YCDF_YUYV;
204 144
205 rcar_du_plane_write(rgrp, index, PnMR, pnmr); 145 rcar_du_plane_write(rgrp, index, PnMR, pnmr);
206 146
207 switch (plane->format->fourcc) { 147 switch (state->format->fourcc) {
208 case DRM_FORMAT_RGB565: 148 case DRM_FORMAT_RGB565:
209 colorkey = ((plane->colorkey & 0xf80000) >> 8) 149 colorkey = ((state->colorkey & 0xf80000) >> 8)
210 | ((plane->colorkey & 0x00fc00) >> 5) 150 | ((state->colorkey & 0x00fc00) >> 5)
211 | ((plane->colorkey & 0x0000f8) >> 3); 151 | ((state->colorkey & 0x0000f8) >> 3);
212 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey); 152 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
213 break; 153 break;
214 154
215 case DRM_FORMAT_ARGB1555: 155 case DRM_FORMAT_ARGB1555:
216 case DRM_FORMAT_XRGB1555: 156 case DRM_FORMAT_XRGB1555:
217 colorkey = ((plane->colorkey & 0xf80000) >> 9) 157 colorkey = ((state->colorkey & 0xf80000) >> 9)
218 | ((plane->colorkey & 0x00f800) >> 6) 158 | ((state->colorkey & 0x00f800) >> 6)
219 | ((plane->colorkey & 0x0000f8) >> 3); 159 | ((state->colorkey & 0x0000f8) >> 3);
220 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey); 160 rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
221 break; 161 break;
222 162
223 case DRM_FORMAT_XRGB8888: 163 case DRM_FORMAT_XRGB8888:
224 case DRM_FORMAT_ARGB8888: 164 case DRM_FORMAT_ARGB8888:
225 rcar_du_plane_write(rgrp, index, PnTC3R, 165 rcar_du_plane_write(rgrp, index, PnTC3R,
226 PnTC3R_CODE | (plane->colorkey & 0xffffff)); 166 PnTC3R_CODE | (state->colorkey & 0xffffff));
227 break; 167 break;
228 } 168 }
229} 169}
@@ -231,6 +171,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
231static void __rcar_du_plane_setup(struct rcar_du_plane *plane, 171static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
232 unsigned int index) 172 unsigned int index)
233{ 173{
174 struct rcar_du_plane_state *state =
175 to_rcar_du_plane_state(plane->plane.state);
234 struct rcar_du_group *rgrp = plane->group; 176 struct rcar_du_group *rgrp = plane->group;
235 u32 ddcr2 = PnDDCR2_CODE; 177 u32 ddcr2 = PnDDCR2_CODE;
236 u32 ddcr4; 178 u32 ddcr4;
@@ -242,17 +184,17 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
242 */ 184 */
243 ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4); 185 ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
244 ddcr4 &= ~PnDDCR4_EDF_MASK; 186 ddcr4 &= ~PnDDCR4_EDF_MASK;
245 ddcr4 |= plane->format->edf | PnDDCR4_CODE; 187 ddcr4 |= state->format->edf | PnDDCR4_CODE;
246 188
247 rcar_du_plane_setup_mode(plane, index); 189 rcar_du_plane_setup_mode(plane, index);
248 190
249 if (plane->format->planes == 2) { 191 if (state->format->planes == 2) {
250 if (plane->hwindex != index) { 192 if (state->hwindex != index) {
251 if (plane->format->fourcc == DRM_FORMAT_NV12 || 193 if (state->format->fourcc == DRM_FORMAT_NV12 ||
252 plane->format->fourcc == DRM_FORMAT_NV21) 194 state->format->fourcc == DRM_FORMAT_NV21)
253 ddcr2 |= PnDDCR2_Y420; 195 ddcr2 |= PnDDCR2_Y420;
254 196
255 if (plane->format->fourcc == DRM_FORMAT_NV21) 197 if (state->format->fourcc == DRM_FORMAT_NV21)
256 ddcr2 |= PnDDCR2_NV21; 198 ddcr2 |= PnDDCR2_NV21;
257 199
258 ddcr2 |= PnDDCR2_DIVU; 200 ddcr2 |= PnDDCR2_DIVU;
@@ -265,10 +207,10 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
265 rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4); 207 rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
266 208
267 /* Destination position and size */ 209 /* Destination position and size */
268 rcar_du_plane_write(rgrp, index, PnDSXR, plane->width); 210 rcar_du_plane_write(rgrp, index, PnDSXR, plane->plane.state->crtc_w);
269 rcar_du_plane_write(rgrp, index, PnDSYR, plane->height); 211 rcar_du_plane_write(rgrp, index, PnDSYR, plane->plane.state->crtc_h);
270 rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x); 212 rcar_du_plane_write(rgrp, index, PnDPXR, plane->plane.state->crtc_x);
271 rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y); 213 rcar_du_plane_write(rgrp, index, PnDPYR, plane->plane.state->crtc_y);
272 214
273 /* Wrap-around and blinking, disabled */ 215 /* Wrap-around and blinking, disabled */
274 rcar_du_plane_write(rgrp, index, PnWASPR, 0); 216 rcar_du_plane_write(rgrp, index, PnWASPR, 0);
@@ -279,150 +221,140 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
279 221
280void rcar_du_plane_setup(struct rcar_du_plane *plane) 222void rcar_du_plane_setup(struct rcar_du_plane *plane)
281{ 223{
282 __rcar_du_plane_setup(plane, plane->hwindex); 224 struct rcar_du_plane_state *state =
283 if (plane->format->planes == 2) 225 to_rcar_du_plane_state(plane->plane.state);
284 __rcar_du_plane_setup(plane, (plane->hwindex + 1) % 8); 226
227 __rcar_du_plane_setup(plane, state->hwindex);
228 if (state->format->planes == 2)
229 __rcar_du_plane_setup(plane, (state->hwindex + 1) % 8);
285 230
286 rcar_du_plane_update_base(plane); 231 rcar_du_plane_setup_fb(plane);
287} 232}
288 233
289static int 234static int rcar_du_plane_atomic_check(struct drm_plane *plane,
290rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 235 struct drm_plane_state *state)
291 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
292 unsigned int crtc_w, unsigned int crtc_h,
293 uint32_t src_x, uint32_t src_y,
294 uint32_t src_w, uint32_t src_h)
295{ 236{
237 struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state);
296 struct rcar_du_plane *rplane = to_rcar_plane(plane); 238 struct rcar_du_plane *rplane = to_rcar_plane(plane);
297 struct rcar_du_device *rcdu = rplane->group->dev; 239 struct rcar_du_device *rcdu = rplane->group->dev;
298 const struct rcar_du_format_info *format;
299 unsigned int nplanes;
300 int ret;
301 240
302 format = rcar_du_format_info(fb->pixel_format); 241 if (!state->fb || !state->crtc) {
303 if (format == NULL) { 242 rstate->format = NULL;
304 dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__, 243 return 0;
305 fb->pixel_format);
306 return -EINVAL;
307 } 244 }
308 245
309 if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) { 246 if (state->src_w >> 16 != state->crtc_w ||
247 state->src_h >> 16 != state->crtc_h) {
310 dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__); 248 dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
311 return -EINVAL; 249 return -EINVAL;
312 } 250 }
313 251
314 nplanes = rplane->format ? rplane->format->planes : 0; 252 rstate->format = rcar_du_format_info(state->fb->pixel_format);
315 253 if (rstate->format == NULL) {
316 /* Reallocate hardware planes if the number of required planes has 254 dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
317 * changed. 255 state->fb->pixel_format);
318 */ 256 return -EINVAL;
319 if (format->planes != nplanes) {
320 rcar_du_plane_release(rplane);
321 ret = rcar_du_plane_reserve(rplane, format);
322 if (ret < 0)
323 return ret;
324 } 257 }
325 258
326 rplane->crtc = crtc;
327 rplane->format = format;
328
329 rplane->src_x = src_x >> 16;
330 rplane->src_y = src_y >> 16;
331 rplane->dst_x = crtc_x;
332 rplane->dst_y = crtc_y;
333 rplane->width = crtc_w;
334 rplane->height = crtc_h;
335
336 rcar_du_plane_compute_base(rplane, fb);
337 rcar_du_plane_setup(rplane);
338
339 mutex_lock(&rplane->group->planes.lock);
340 rplane->enabled = true;
341 rcar_du_crtc_update_planes(rplane->crtc);
342 mutex_unlock(&rplane->group->planes.lock);
343
344 return 0; 259 return 0;
345} 260}
346 261
347static int rcar_du_plane_disable(struct drm_plane *plane) 262static void rcar_du_plane_atomic_update(struct drm_plane *plane,
263 struct drm_plane_state *old_state)
348{ 264{
349 struct rcar_du_plane *rplane = to_rcar_plane(plane); 265 struct rcar_du_plane *rplane = to_rcar_plane(plane);
350 266
351 if (!rplane->enabled) 267 if (plane->state->crtc)
352 return 0; 268 rcar_du_plane_setup(rplane);
269}
353 270
354 mutex_lock(&rplane->group->planes.lock); 271static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
355 rplane->enabled = false; 272 .atomic_check = rcar_du_plane_atomic_check,
356 rcar_du_crtc_update_planes(rplane->crtc); 273 .atomic_update = rcar_du_plane_atomic_update,
357 mutex_unlock(&rplane->group->planes.lock); 274};
358 275
359 rcar_du_plane_release(rplane); 276static void rcar_du_plane_reset(struct drm_plane *plane)
277{
278 struct rcar_du_plane_state *state;
360 279
361 rplane->crtc = NULL; 280 if (plane->state && plane->state->fb)
362 rplane->format = NULL; 281 drm_framebuffer_unreference(plane->state->fb);
363 282
364 return 0; 283 kfree(plane->state);
365} 284 plane->state = NULL;
366 285
367/* Both the .set_property and the .update_plane operations are called with the 286 state = kzalloc(sizeof(*state), GFP_KERNEL);
368 * mode_config lock held. There is this no need to explicitly protect access to 287 if (state == NULL)
369 * the alpha and colorkey fields and the mode register.
370 */
371static void rcar_du_plane_set_alpha(struct rcar_du_plane *plane, u32 alpha)
372{
373 if (plane->alpha == alpha)
374 return; 288 return;
375 289
376 plane->alpha = alpha; 290 state->hwindex = -1;
377 if (!plane->enabled || plane->format->fourcc != DRM_FORMAT_XRGB1555) 291 state->alpha = 255;
378 return; 292 state->colorkey = RCAR_DU_COLORKEY_NONE;
293 state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
379 294
380 rcar_du_plane_setup_mode(plane, plane->hwindex); 295 plane->state = &state->state;
296 plane->state->plane = plane;
381} 297}
382 298
383static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane, 299static struct drm_plane_state *
384 u32 colorkey) 300rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
385{ 301{
386 if (plane->colorkey == colorkey) 302 struct rcar_du_plane_state *state;
387 return; 303 struct rcar_du_plane_state *copy;
388 304
389 plane->colorkey = colorkey; 305 state = to_rcar_du_plane_state(plane->state);
390 if (!plane->enabled) 306 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
391 return; 307 if (copy == NULL)
308 return NULL;
309
310 if (copy->state.fb)
311 drm_framebuffer_reference(copy->state.fb);
392 312
393 rcar_du_plane_setup_mode(plane, plane->hwindex); 313 return &copy->state;
394} 314}
395 315
396static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane, 316static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
397 unsigned int zpos) 317 struct drm_plane_state *state)
398{ 318{
399 mutex_lock(&plane->group->planes.lock); 319 kfree(to_rcar_du_plane_state(state));
400 if (plane->zpos == zpos) 320}
401 goto done;
402 321
403 plane->zpos = zpos; 322static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
404 if (!plane->enabled) 323 struct drm_plane_state *state,
405 goto done; 324 struct drm_property *property,
325 uint64_t val)
326{
327 struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state);
328 struct rcar_du_plane *rplane = to_rcar_plane(plane);
329 struct rcar_du_group *rgrp = rplane->group;
406 330
407 rcar_du_crtc_update_planes(plane->crtc); 331 if (property == rgrp->planes.alpha)
332 rstate->alpha = val;
333 else if (property == rgrp->planes.colorkey)
334 rstate->colorkey = val;
335 else if (property == rgrp->planes.zpos)
336 rstate->zpos = val;
337 else
338 return -EINVAL;
408 339
409done: 340 return 0;
410 mutex_unlock(&plane->group->planes.lock);
411} 341}
412 342
413static int rcar_du_plane_set_property(struct drm_plane *plane, 343static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
414 struct drm_property *property, 344 const struct drm_plane_state *state, struct drm_property *property,
415 uint64_t value) 345 uint64_t *val)
416{ 346{
347 const struct rcar_du_plane_state *rstate =
348 container_of(state, const struct rcar_du_plane_state, state);
417 struct rcar_du_plane *rplane = to_rcar_plane(plane); 349 struct rcar_du_plane *rplane = to_rcar_plane(plane);
418 struct rcar_du_group *rgrp = rplane->group; 350 struct rcar_du_group *rgrp = rplane->group;
419 351
420 if (property == rgrp->planes.alpha) 352 if (property == rgrp->planes.alpha)
421 rcar_du_plane_set_alpha(rplane, value); 353 *val = rstate->alpha;
422 else if (property == rgrp->planes.colorkey) 354 else if (property == rgrp->planes.colorkey)
423 rcar_du_plane_set_colorkey(rplane, value); 355 *val = rstate->colorkey;
424 else if (property == rgrp->planes.zpos) 356 else if (property == rgrp->planes.zpos)
425 rcar_du_plane_set_zpos(rplane, value); 357 *val = rstate->zpos;
426 else 358 else
427 return -EINVAL; 359 return -EINVAL;
428 360
@@ -430,10 +362,15 @@ static int rcar_du_plane_set_property(struct drm_plane *plane,
430} 362}
431 363
432static const struct drm_plane_funcs rcar_du_plane_funcs = { 364static const struct drm_plane_funcs rcar_du_plane_funcs = {
433 .update_plane = rcar_du_plane_update, 365 .update_plane = drm_atomic_helper_update_plane,
434 .disable_plane = rcar_du_plane_disable, 366 .disable_plane = drm_atomic_helper_disable_plane,
435 .set_property = rcar_du_plane_set_property, 367 .reset = rcar_du_plane_reset,
368 .set_property = drm_atomic_helper_plane_set_property,
436 .destroy = drm_plane_cleanup, 369 .destroy = drm_plane_cleanup,
370 .atomic_duplicate_state = rcar_du_plane_atomic_duplicate_state,
371 .atomic_destroy_state = rcar_du_plane_atomic_destroy_state,
372 .atomic_set_property = rcar_du_plane_atomic_set_property,
373 .atomic_get_property = rcar_du_plane_atomic_get_property,
437}; 374};
438 375
439static const uint32_t formats[] = { 376static const uint32_t formats[] = {
@@ -453,10 +390,11 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
453{ 390{
454 struct rcar_du_planes *planes = &rgrp->planes; 391 struct rcar_du_planes *planes = &rgrp->planes;
455 struct rcar_du_device *rcdu = rgrp->dev; 392 struct rcar_du_device *rcdu = rgrp->dev;
393 unsigned int num_planes;
394 unsigned int num_crtcs;
395 unsigned int crtcs;
456 unsigned int i; 396 unsigned int i;
457 397 int ret;
458 mutex_init(&planes->lock);
459 planes->free = 0xff;
460 398
461 planes->alpha = 399 planes->alpha =
462 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255); 400 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
@@ -478,45 +416,34 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
478 if (planes->zpos == NULL) 416 if (planes->zpos == NULL)
479 return -ENOMEM; 417 return -ENOMEM;
480 418
481 for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) { 419 /* Create one primary plane per in this group CRTC and seven overlay
482 struct rcar_du_plane *plane = &planes->planes[i]; 420 * planes.
483 421 */
484 plane->group = rgrp; 422 num_crtcs = min(rcdu->num_crtcs - 2 * rgrp->index, 2U);
485 plane->hwindex = -1; 423 num_planes = num_crtcs + 7;
486 plane->alpha = 255;
487 plane->colorkey = RCAR_DU_COLORKEY_NONE;
488 plane->zpos = 0;
489 }
490
491 return 0;
492}
493
494int rcar_du_planes_register(struct rcar_du_group *rgrp)
495{
496 struct rcar_du_planes *planes = &rgrp->planes;
497 struct rcar_du_device *rcdu = rgrp->dev;
498 unsigned int crtcs;
499 unsigned int i;
500 int ret;
501 424
502 crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index)); 425 crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
503 426
504 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { 427 for (i = 0; i < num_planes; ++i) {
505 struct rcar_du_kms_plane *plane; 428 enum drm_plane_type type = i < num_crtcs
506 429 ? DRM_PLANE_TYPE_PRIMARY
507 plane = devm_kzalloc(rcdu->dev, sizeof(*plane), GFP_KERNEL); 430 : DRM_PLANE_TYPE_OVERLAY;
508 if (plane == NULL) 431 struct rcar_du_plane *plane = &planes->planes[i];
509 return -ENOMEM;
510 432
511 plane->hwplane = &planes->planes[i + 2]; 433 plane->group = rgrp;
512 plane->hwplane->zpos = 1;
513 434
514 ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs, 435 ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
515 &rcar_du_plane_funcs, formats, 436 &rcar_du_plane_funcs, formats,
516 ARRAY_SIZE(formats), false); 437 ARRAY_SIZE(formats), type);
517 if (ret < 0) 438 if (ret < 0)
518 return ret; 439 return ret;
519 440
441 drm_plane_helper_add(&plane->plane,
442 &rcar_du_plane_helper_funcs);
443
444 if (type == DRM_PLANE_TYPE_PRIMARY)
445 continue;
446
520 drm_object_attach_property(&plane->plane.base, 447 drm_object_attach_property(&plane->plane.base,
521 planes->alpha, 255); 448 planes->alpha, 255);
522 drm_object_attach_property(&plane->plane.base, 449 drm_object_attach_property(&plane->plane.base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 3021288b1a89..abff0ebeb195 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,68 +14,57 @@
14#ifndef __RCAR_DU_PLANE_H__ 14#ifndef __RCAR_DU_PLANE_H__
15#define __RCAR_DU_PLANE_H__ 15#define __RCAR_DU_PLANE_H__
16 16
17#include <linux/mutex.h>
18
19#include <drm/drmP.h> 17#include <drm/drmP.h>
20#include <drm/drm_crtc.h> 18#include <drm/drm_crtc.h>
21 19
22struct rcar_du_format_info; 20struct rcar_du_format_info;
23struct rcar_du_group; 21struct rcar_du_group;
24 22
25/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As 23/* The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
26 * using KMS planes requires at least one of the CRTCs being enabled, no more 24 * As using overlay planes requires at least one of the CRTCs being enabled, no
27 * than 7 KMS planes can be available. We thus create 7 KMS planes and 25 * more than 7 overlay planes can be available. We thus create 1 primary plane
28 * 9 software planes (one for each KMS planes and one for each CRTC). 26 * per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
29 */ 27 */
30 28#define RCAR_DU_NUM_KMS_PLANES 9
31#define RCAR_DU_NUM_KMS_PLANES 7
32#define RCAR_DU_NUM_HW_PLANES 8 29#define RCAR_DU_NUM_HW_PLANES 8
33#define RCAR_DU_NUM_SW_PLANES 9
34 30
35struct rcar_du_plane { 31struct rcar_du_plane {
32 struct drm_plane plane;
36 struct rcar_du_group *group; 33 struct rcar_du_group *group;
37 struct drm_crtc *crtc;
38
39 bool enabled;
40
41 int hwindex; /* 0-based, -1 means unused */
42 unsigned int alpha;
43 unsigned int colorkey;
44 unsigned int zpos;
45
46 const struct rcar_du_format_info *format;
47
48 unsigned long dma[2];
49 unsigned int pitch;
50
51 unsigned int width;
52 unsigned int height;
53
54 unsigned int src_x;
55 unsigned int src_y;
56 unsigned int dst_x;
57 unsigned int dst_y;
58}; 34};
59 35
36static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
37{
38 return container_of(plane, struct rcar_du_plane, plane);
39}
40
60struct rcar_du_planes { 41struct rcar_du_planes {
61 struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES]; 42 struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES];
62 unsigned int free;
63 struct mutex lock;
64 43
65 struct drm_property *alpha; 44 struct drm_property *alpha;
66 struct drm_property *colorkey; 45 struct drm_property *colorkey;
67 struct drm_property *zpos; 46 struct drm_property *zpos;
68}; 47};
69 48
49struct rcar_du_plane_state {
50 struct drm_plane_state state;
51
52 const struct rcar_du_format_info *format;
53 int hwindex; /* 0-based, -1 means unused */
54
55 unsigned int alpha;
56 unsigned int colorkey;
57 unsigned int zpos;
58};
59
60static inline struct rcar_du_plane_state *
61to_rcar_du_plane_state(struct drm_plane_state *state)
62{
63 return container_of(state, struct rcar_du_plane_state, state);
64}
65
70int rcar_du_planes_init(struct rcar_du_group *rgrp); 66int rcar_du_planes_init(struct rcar_du_group *rgrp);
71int rcar_du_planes_register(struct rcar_du_group *rgrp);
72 67
73void rcar_du_plane_setup(struct rcar_du_plane *plane); 68void rcar_du_plane_setup(struct rcar_du_plane *plane);
74void rcar_du_plane_update_base(struct rcar_du_plane *plane);
75void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
76 struct drm_framebuffer *fb);
77int rcar_du_plane_reserve(struct rcar_du_plane *plane,
78 const struct rcar_du_format_info *format);
79void rcar_du_plane_release(struct rcar_du_plane *plane);
80 69
81#endif /* __RCAR_DU_PLANE_H__ */ 70#endif /* __RCAR_DU_PLANE_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d4879921cc7..e0a5d8f93963 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
15#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
17 18
@@ -43,10 +44,13 @@ rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
43} 44}
44 45
45static const struct drm_connector_funcs connector_funcs = { 46static const struct drm_connector_funcs connector_funcs = {
46 .dpms = drm_helper_connector_dpms, 47 .dpms = drm_atomic_helper_connector_dpms,
48 .reset = drm_atomic_helper_connector_reset,
47 .detect = rcar_du_vga_connector_detect, 49 .detect = rcar_du_vga_connector_detect,
48 .fill_modes = drm_helper_probe_single_connector_modes, 50 .fill_modes = drm_helper_probe_single_connector_modes,
49 .destroy = rcar_du_vga_connector_destroy, 51 .destroy = rcar_du_vga_connector_destroy,
52 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
53 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
50}; 54};
51 55
52int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, 56int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
@@ -76,7 +80,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
76 if (ret < 0) 80 if (ret < 0)
77 return ret; 81 return ret;
78 82
79 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 83 connector->dpms = DRM_MODE_DPMS_OFF;
80 drm_object_property_set_value(&connector->base, 84 drm_object_property_set_value(&connector->base,
81 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 85 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
82 86
@@ -84,7 +88,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
84 if (ret < 0) 88 if (ret < 0)
85 return ret; 89 return ret;
86 90
87 connector->encoder = encoder;
88 rcon->encoder = renc; 91 rcon->encoder = renc;
89 92
90 return 0; 93 return 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 1a52522f5da7..b7f781573b15 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -472,13 +472,15 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = {
472}; 472};
473 473
474static int tegra_plane_prepare_fb(struct drm_plane *plane, 474static int tegra_plane_prepare_fb(struct drm_plane *plane,
475 struct drm_framebuffer *fb) 475 struct drm_framebuffer *fb,
476 const struct drm_plane_state *new_state)
476{ 477{
477 return 0; 478 return 0;
478} 479}
479 480
480static void tegra_plane_cleanup_fb(struct drm_plane *plane, 481static void tegra_plane_cleanup_fb(struct drm_plane *plane,
481 struct drm_framebuffer *fb) 482 struct drm_framebuffer *fb,
483 const struct drm_plane_state *old_fb)
482{ 484{
483} 485}
484 486
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 7dd328d77996..5f1880766110 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -55,9 +55,9 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
55 * current layout. 55 * current layout.
56 */ 56 */
57 57
58 drm_atomic_helper_commit_pre_planes(drm, state); 58 drm_atomic_helper_commit_modeset_disables(drm, state);
59 drm_atomic_helper_commit_planes(drm, state); 59 drm_atomic_helper_commit_planes(drm, state);
60 drm_atomic_helper_commit_post_planes(drm, state); 60 drm_atomic_helper_commit_modeset_enables(drm, state);
61 61
62 drm_atomic_helper_wait_for_vblanks(drm, state); 62 drm_atomic_helper_wait_for_vblanks(drm, state);
63 63
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e928625a9da0..63c0b0131f61 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,9 @@ struct dma_buf_attachment;
104 * PRIME: used in the prime code. 104 * PRIME: used in the prime code.
105 * This is the category used by the DRM_DEBUG_PRIME() macro. 105 * This is the category used by the DRM_DEBUG_PRIME() macro.
106 * 106 *
107 * ATOMIC: used in the atomic code.
108 * This is the category used by the DRM_DEBUG_ATOMIC() macro.
109 *
107 * Enabling verbose debug messages is done through the drm.debug parameter, 110 * Enabling verbose debug messages is done through the drm.debug parameter,
108 * each category being enabled by a bit. 111 * each category being enabled by a bit.
109 * 112 *
@@ -121,6 +124,7 @@ struct dma_buf_attachment;
121#define DRM_UT_DRIVER 0x02 124#define DRM_UT_DRIVER 0x02
122#define DRM_UT_KMS 0x04 125#define DRM_UT_KMS 0x04
123#define DRM_UT_PRIME 0x08 126#define DRM_UT_PRIME 0x08
127#define DRM_UT_ATOMIC 0x10
124 128
125extern __printf(2, 3) 129extern __printf(2, 3)
126void drm_ut_debug_printk(const char *function_name, 130void drm_ut_debug_printk(const char *function_name,
@@ -207,6 +211,11 @@ void drm_err(const char *format, ...);
207 if (unlikely(drm_debug & DRM_UT_PRIME)) \ 211 if (unlikely(drm_debug & DRM_UT_PRIME)) \
208 drm_ut_debug_printk(__func__, fmt, ##args); \ 212 drm_ut_debug_printk(__func__, fmt, ##args); \
209 } while (0) 213 } while (0)
214#define DRM_DEBUG_ATOMIC(fmt, args...) \
215 do { \
216 if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
217 drm_ut_debug_printk(__func__, fmt, ##args); \
218 } while (0)
210 219
211/*@}*/ 220/*@}*/
212 221
@@ -922,6 +931,7 @@ extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
922extern void drm_vblank_off(struct drm_device *dev, int crtc); 931extern void drm_vblank_off(struct drm_device *dev, int crtc);
923extern void drm_vblank_on(struct drm_device *dev, int crtc); 932extern void drm_vblank_on(struct drm_device *dev, int crtc);
924extern void drm_crtc_vblank_off(struct drm_crtc *crtc); 933extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
934extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
925extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 935extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
926extern void drm_vblank_cleanup(struct drm_device *dev); 936extern void drm_vblank_cleanup(struct drm_device *dev);
927 937
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 8039d54a7441..829280b56874 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -43,9 +43,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
43void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, 43void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
44 struct drm_atomic_state *old_state); 44 struct drm_atomic_state *old_state);
45 45
46void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, 46void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
47 struct drm_atomic_state *state); 47 struct drm_atomic_state *state);
48void drm_atomic_helper_commit_post_planes(struct drm_device *dev, 48void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
49 struct drm_atomic_state *old_state); 49 struct drm_atomic_state *old_state);
50 50
51int drm_atomic_helper_prepare_planes(struct drm_device *dev, 51int drm_atomic_helper_prepare_planes(struct drm_device *dev,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 920e21a8f3fd..b1465d6fbe94 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -202,6 +202,7 @@ struct drm_framebuffer {
202 const struct drm_framebuffer_funcs *funcs; 202 const struct drm_framebuffer_funcs *funcs;
203 unsigned int pitches[4]; 203 unsigned int pitches[4];
204 unsigned int offsets[4]; 204 unsigned int offsets[4];
205 uint64_t modifier[4];
205 unsigned int width; 206 unsigned int width;
206 unsigned int height; 207 unsigned int height;
207 /* depth can be 15 or 16 */ 208 /* depth can be 15 or 16 */
@@ -1155,6 +1156,9 @@ struct drm_mode_config {
1155 /* whether async page flip is supported or not */ 1156 /* whether async page flip is supported or not */
1156 bool async_page_flip; 1157 bool async_page_flip;
1157 1158
1159 /* whether the driver supports fb modifiers */
1160 bool allow_fb_modifiers;
1161
1158 /* cursor size */ 1162 /* cursor size */
1159 uint32_t cursor_width, cursor_height; 1163 uint32_t cursor_width, cursor_height;
1160}; 1164};
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c250a22b39ab..92d5135b55d2 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -89,6 +89,7 @@ struct drm_crtc_helper_funcs {
89 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, 89 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
90 struct drm_display_mode *adjusted_mode, int x, int y, 90 struct drm_display_mode *adjusted_mode, int x, int y,
91 struct drm_framebuffer *old_fb); 91 struct drm_framebuffer *old_fb);
92 /* Actually set the mode for atomic helpers, optional */
92 void (*mode_set_nofb)(struct drm_crtc *crtc); 93 void (*mode_set_nofb)(struct drm_crtc *crtc);
93 94
94 /* Move the crtc on the current fb to the given position *optional* */ 95 /* Move the crtc on the current fb to the given position *optional* */
@@ -119,7 +120,7 @@ struct drm_crtc_helper_funcs {
119 * @mode_fixup: try to fixup proposed mode for this connector 120 * @mode_fixup: try to fixup proposed mode for this connector
120 * @prepare: part of the disable sequence, called before the CRTC modeset 121 * @prepare: part of the disable sequence, called before the CRTC modeset
121 * @commit: called after the CRTC modeset 122 * @commit: called after the CRTC modeset
122 * @mode_set: set this mode 123 * @mode_set: set this mode, optional for atomic helpers
123 * @get_crtc: return CRTC that the encoder is currently attached to 124 * @get_crtc: return CRTC that the encoder is currently attached to
124 * @detect: connection status detection 125 * @detect: connection status detection
125 * @disable: disable encoder when not in use (overrides DPMS off) 126 * @disable: disable encoder when not in use (overrides DPMS off)
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 7e25030a6aa2..d4803224028f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -92,6 +92,9 @@
92# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ 92# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
93# define DP_OUI_SUPPORT (1 << 7) 93# define DP_OUI_SUPPORT (1 << 7)
94 94
95#define DP_SUPPORTED_LINK_RATES 0x010 /*eDP 1.4*/
96#define DP_MAX_SUPPORTED_RATES 0x8
97
95#define DP_I2C_SPEED_CAP 0x00c /* DPI */ 98#define DP_I2C_SPEED_CAP 0x00c /* DPI */
96# define DP_I2C_SPEED_1K 0x01 99# define DP_I2C_SPEED_1K 0x01
97# define DP_I2C_SPEED_5K 0x02 100# define DP_I2C_SPEED_5K 0x02
@@ -101,6 +104,7 @@
101# define DP_I2C_SPEED_1M 0x20 104# define DP_I2C_SPEED_1M 0x20
102 105
103#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ 106#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
107# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
104#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ 108#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
105 109
106/* Multiple stream transport */ 110/* Multiple stream transport */
@@ -221,6 +225,8 @@
221# define DP_UP_REQ_EN (1 << 1) 225# define DP_UP_REQ_EN (1 << 1)
222# define DP_UPSTREAM_IS_SRC (1 << 2) 226# define DP_UPSTREAM_IS_SRC (1 << 2)
223 227
228#define DP_LINK_RATE_SET 0x115
229
224#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ 230#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
225# define DP_PSR_ENABLE (1 << 0) 231# define DP_PSR_ENABLE (1 << 0)
226# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) 232# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
@@ -332,6 +338,8 @@
332# define DP_SET_POWER_D3 0x2 338# define DP_SET_POWER_D3 0x2
333# define DP_SET_POWER_MASK 0x3 339# define DP_SET_POWER_MASK 0x3
334 340
341#define DP_EDP_DPCD_REV 0x700
342
335#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ 343#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
336#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ 344#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
337#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ 345#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index d92f6dd1fb11..0616188c7801 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -92,7 +92,7 @@ enum drm_mode_status {
92#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ 92#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
93#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */ 93#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
94#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */ 94#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
95#define CRTC_STEREO_DOUBLE_ONLY (CRTC_NO_DBLSCAN | CRTC_NO_VSCAN) 95#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
96 96
97#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF 97#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
98 98
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 31c11d36fae6..72ddab02ebd9 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -59,9 +59,11 @@ extern int drm_crtc_init(struct drm_device *dev,
59 */ 59 */
60struct drm_plane_helper_funcs { 60struct drm_plane_helper_funcs {
61 int (*prepare_fb)(struct drm_plane *plane, 61 int (*prepare_fb)(struct drm_plane *plane,
62 struct drm_framebuffer *fb); 62 struct drm_framebuffer *fb,
63 const struct drm_plane_state *new_state);
63 void (*cleanup_fb)(struct drm_plane *plane, 64 void (*cleanup_fb)(struct drm_plane *plane,
64 struct drm_framebuffer *fb); 65 struct drm_framebuffer *fb,
66 const struct drm_plane_state *old_state);
65 67
66 int (*atomic_check)(struct drm_plane *plane, 68 int (*atomic_check)(struct drm_plane *plane,
67 struct drm_plane_state *state); 69 struct drm_plane_state *state);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index d016dc57f007..f2e47fd56751 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -259,21 +259,31 @@
259 INTEL_VGA_DEVICE(0x22b2, info), \ 259 INTEL_VGA_DEVICE(0x22b2, info), \
260 INTEL_VGA_DEVICE(0x22b3, info) 260 INTEL_VGA_DEVICE(0x22b3, info)
261 261
262#define INTEL_SKL_IDS(info) \ 262#define INTEL_SKL_GT1_IDS(info) \
263 INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
264 INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ 263 INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
265 INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
266 INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
267 INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ 264 INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
265 INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
266 INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
267 INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
268
269#define INTEL_SKL_GT2_IDS(info) \
270 INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
271 INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
268 INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \ 272 INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
269 INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ 273 INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
270 INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
271 INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ 274 INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
272 INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
273 INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
274 INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ 275 INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
275 INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
276 INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
277 INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ 276 INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
278 277
278#define INTEL_SKL_GT3_IDS(info) \
279 INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
280 INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
281 INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
282
283#define INTEL_SKL_IDS(info) \
284 INTEL_SKL_GT1_IDS(info), \
285 INTEL_SKL_GT2_IDS(info), \
286 INTEL_SKL_GT3_IDS(info)
287
288
279#endif /* _I915_PCIIDS_H */ 289#endif /* _I915_PCIIDS_H */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 01b2d6d0e355..ff6ef62d084b 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -630,6 +630,7 @@ struct drm_gem_open {
630 */ 630 */
631#define DRM_CAP_CURSOR_WIDTH 0x8 631#define DRM_CAP_CURSOR_WIDTH 0x8
632#define DRM_CAP_CURSOR_HEIGHT 0x9 632#define DRM_CAP_CURSOR_HEIGHT 0x9
633#define DRM_CAP_ADDFB2_MODIFIERS 0x10
633 634
634/** DRM_IOCTL_GET_CAP ioctl argument type */ 635/** DRM_IOCTL_GET_CAP ioctl argument type */
635struct drm_get_cap { 636struct drm_get_cap {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index a284f11a8ef5..e6efac23c7ea 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -129,4 +129,82 @@
129#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ 129#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
130#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ 130#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
131 131
132
133/*
134 * Format Modifiers:
135 *
136 * Format modifiers describe, typically, a re-ordering or modification
137 * of the data in a plane of an FB. This can be used to express tiled/
138 * swizzled formats, or compression, or a combination of the two.
139 *
140 * The upper 8 bits of the format modifier are a vendor-id as assigned
141 * below. The lower 56 bits are assigned as vendor sees fit.
142 */
143
144/* Vendor Ids: */
145#define DRM_FORMAT_MOD_NONE 0
146#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
147#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
148#define DRM_FORMAT_MOD_VENDOR_NV 0x03
149#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
150#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
151/* add more to the end as needed */
152
153#define fourcc_mod_code(vendor, val) \
154 ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffL))
155
156/*
157 * Format Modifier tokens:
158 *
159 * When adding a new token please document the layout with a code comment,
160 * similar to the fourcc codes above. drm_fourcc.h is considered the
161 * authoritative source for all of these.
162 */
163
164/* Intel framebuffer modifiers */
165
166/*
167 * Intel X-tiling layout
168 *
169 * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
170 * in row-major layout. Within the tile bytes are laid out row-major, with
171 * a platform-dependent stride. On top of that the memory can apply
172 * platform-depending swizzling of some higher address bits into bit6.
173 *
174 * This format is highly platforms specific and not useful for cross-driver
175 * sharing. It exists since on a given platform it does uniquely identify the
176 * layout in a simple way for i915-specific userspace.
177 */
178#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
179
180/*
181 * Intel Y-tiling layout
182 *
183 * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
184 * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
185 * chunks column-major, with a platform-dependent height. On top of that the
186 * memory can apply platform-depending swizzling of some higher address bits
187 * into bit6.
188 *
189 * This format is highly platforms specific and not useful for cross-driver
190 * sharing. It exists since on a given platform it does uniquely identify the
191 * layout in a simple way for i915-specific userspace.
192 */
193#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
194
195/*
196 * Intel Yf-tiling layout
197 *
198 * This is a tiled layout using 4Kb tiles in row-major layout.
199 * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
200 * are arranged in four groups (two wide, two high) with column-major layout.
201 * Each group therefore consits out of four 256 byte units, which are also laid
202 * out as 2x2 column-major.
203 * 256 byte units are made out of four 64 byte blocks of pixels, producing
204 * either a square block or a 2:1 unit.
205 * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
206 * in pixel depends on the pixel depth.
207 */
208#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
209
132#endif /* DRM_FOURCC_H */ 210#endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index ca788e01dab2..dbeba949462a 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -336,6 +336,7 @@ struct drm_mode_fb_cmd {
336}; 336};
337 337
338#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ 338#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
339#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
339 340
340struct drm_mode_fb_cmd2 { 341struct drm_mode_fb_cmd2 {
341 __u32 fb_id; 342 __u32 fb_id;
@@ -356,10 +357,18 @@ struct drm_mode_fb_cmd2 {
356 * So it would consist of Y as offsets[0] and UV as 357 * So it would consist of Y as offsets[0] and UV as
357 * offsets[1]. Note that offsets[0] will generally 358 * offsets[1]. Note that offsets[0] will generally
358 * be 0 (but this is not required). 359 * be 0 (but this is not required).
360 *
361 * To accommodate tiled, compressed, etc formats, a per-plane
362 * modifier can be specified. The default value of zero
363 * indicates "native" format as specified by the fourcc.
364 * Vendor specific modifier token. This allows, for example,
365 * different tiling/swizzling pattern on different planes.
366 * See discussion above of DRM_FORMAT_MOD_xxx.
359 */ 367 */
360 __u32 handles[4]; 368 __u32 handles[4];
361 __u32 pitches[4]; /* pitch for each plane */ 369 __u32 pitches[4]; /* pitch for each plane */
362 __u32 offsets[4]; /* offset of each plane */ 370 __u32 offsets[4]; /* offset of each plane */
371 __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
363}; 372};
364 373
365#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 374#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01